From 22aadfa4994dc23ed99990ae27d5eff6344957ac Mon Sep 17 00:00:00 2001
From: Tristan Walter <twalter@orn.mpg.de>
Date: Thu, 8 Oct 2020 12:02:38 +0200
Subject: [PATCH] compiles on macOS

---
 Application/CMakeLists.txt     |   24 +-
 Application/cmake_conda_osx.sh |    1 +
 docs/parameters_tgrabs.rst     |  278 +++----
 docs/parameters_trex.rst       | 1369 +++++++++++++++++---------------
 docs/run.rst                   |   13 +-
 5 files changed, 894 insertions(+), 791 deletions(-)

diff --git a/Application/CMakeLists.txt b/Application/CMakeLists.txt
index 1c67f2a..eb7acf7 100644
--- a/Application/CMakeLists.txt
+++ b/Application/CMakeLists.txt
@@ -290,7 +290,7 @@ if(NOT TREX_BUILD_GLFW)
     find_package(GLFW)
     if(NOT GLFW_FOUND)
         message(STATUS "Cannot find GLFW. Trying to build it instead.")
-        set(TREX_BUILD_GLFW ON)
+        set(TREX_BUILD_GLFW ON CACHE BOOL "" FORCE)
     endif()
 endif()
 
@@ -495,7 +495,7 @@ if(NOT TREX_BUILD_PNG AND NOT ${TREX_BUILD_PNG})
     
     if(NOT PNG_FOUND)
         message(STATUS "Did not find PNG. Trying to build it.")
-        set(TREX_BUILD_PNG ON)
+        set(TREX_BUILD_PNG ON CACHE BOOL "" FORCE)
         
     else()
         include_directories(${PNG_INCLUDE_DIR})
@@ -520,8 +520,8 @@ if((NOT ${TREX_BUILD_ZLIB}) AND ZLIB_FOUND AND ZLIB_VERSION_STRING VERSION_GREAT
     
 else()
     message(STATUS "Building libZ and libZIP")
-	set(TREX_BUILD_ZLIB ON)
-    set(TREX_BUILD_ZIP ON)
+	set(TREX_BUILD_ZLIB ON CACHE BOOL "" FORCE)
+    set(TREX_BUILD_ZIP ON CACHE BOOL "" FORCE)
     set(ZLIB_LIBRARY "z")
     set(zlib_install_dir "${CMAKE_BINARY_DIR}/zlib")
     
@@ -565,7 +565,7 @@ if(NOT TREX_BUILD_ZIP)
     
     if(NOT Zip_FOUND)
         message(STATUS "Did not find ZIP. Trying to build it.")
-        set(TREX_BUILD_ZIP ON)
+        set(TREX_BUILD_ZIP ON CACHE BOOL "" FORCE)
         
     else()
         include_directories(${ZIP_INCLUDE_DIR})
@@ -649,7 +649,7 @@ if(${TREX_BUILD_PNG})
     if(WIN32)
         set(PNG_LIBRARY "libpng16_static$<IF:$<CONFIG:Debug>,d,>")
     else()
-        set(PNG_LIBRARY "png16$<IF:$<CONFIG:Debug>,d,>")
+        set(PNG_LIBRARY "${png_install_dir}/lib/libpng16$<IF:$<CONFIG:Debug>,d,>.a")
     endif()
 
     message(STATUS "PNG_LIBRARY = ${PNG_LIBRARY}")
@@ -703,13 +703,14 @@ if(WIN32)
     set(OpenCV_STATIC ON)
 endif()
 
-if(NOT TREX_BUILD_OPENCV)
+if(NOT ${TREX_BUILD_OPENCV})
+    message(STATUS "Trying to find OpenCV...")
     find_package( OpenCV )
     
     if(NOT OpenCV_FOUND)
-        set(TREX_BUILD_OPENCV ON)
+        set(TREX_BUILD_OPENCV ON CACHE BOOL "OpenCV Override" FORCE)
         message(STATUS "Cannot find OpenCV. Trying to build it.")
-    else()
+    else() 
         include_directories( ${OpenCV_INCLUDE_DIRS} )
         message(STATUS "Found OpenCV Version: ${OpenCV_VERSION} include: ${OpenCV_INCLUDE_DIRS} libs: ${OpenCV_LIBRARIES}")
         set(OpenCV_LIBS ${OpenCV_LIBRARIES})
@@ -725,9 +726,10 @@ if(NOT TREX_BUILD_OPENCV)
     endif()
 endif()
 
-if(TREX_BUILD_OPENCV)
+if(${TREX_BUILD_OPENCV})
     set(trigger_build_dir ${CMAKE_BINARY_DIR}/force_opencv)
     set(BUILD_SHARED_LIBS OFF)
+    message(STATUS "Building OpenCV...")
     
     if(UNIX)
         get_filename_component(PKG_CONFIG_PATH ${AVDEVICE_LIBRARY} DIRECTORY)
@@ -816,7 +818,7 @@ if(TREX_BUILD_OPENCV)
             -DBUILD_JPEG:BOOL=ON
             -DWITH_OPENJPEG:BOOL=OFF
             -DBUILD_JASPER:BOOL=OFF
-            -DBUILD_opencv_python3=OFF
+            -DBUILD_opencv_python3=ON
             -DBUILD_opencv_python2=OFF
             -DWITH_JASPER:BOOL=OFF
             -DBUILD_opencv_world:BOOL=OFF
diff --git a/Application/cmake_conda_osx.sh b/Application/cmake_conda_osx.sh
index b7f1cbc..337366c 100755
--- a/Application/cmake_conda_osx.sh
+++ b/Application/cmake_conda_osx.sh
@@ -79,6 +79,7 @@ else
             -DPYTHON3_PACKAGES_PATH=$(python3 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())") \
             -DTREX_BUILD_ZLIB=ON \
             -DTREX_BUILD_ZIP=ON \
+            -DTREX_BUILD_PNG=ON \
             -DCMAKE_PREFIX_PATH="$CONDA_PREFIX;$CONDA_PREFIX/lib/pkgconfig;$CONDA_PREFIX/lib"
     else
         echo "**************************************"
diff --git a/docs/parameters_tgrabs.rst b/docs/parameters_tgrabs.rst
index abc2aff..eaae708 100644
--- a/docs/parameters_tgrabs.rst
+++ b/docs/parameters_tgrabs.rst
@@ -7,55 +7,54 @@ TGrabs parameters
 
 .. NOTE::
 	|grabs| has a live-tracking feature, allowing users to extract positions and postures of individuals while recording/converting. For this process, all parameters relevant for tracking are available in |grabs| as well -- for a reference of those, please refer to :doc:`parameters_trex`.
-.. function:: adaptive_threshold_scale(float)
+.. function:: enable_closed_loop(bool)
 	:noindex:
 
-	**default value:** 2
+	**default value:** false
 
 
-	Threshold value to be used for adaptive thresholding, if enabled.
+	When enabled, live tracking will be executed for every frame received. Frames will be sent to the 'closed_loop.py' script - see this script for more information. Sets `enable_live_tracking` to true. Allows the tracker to skip frames by default, in order to catch up to the video.
 
+	.. seealso:: :func:`enable_live_tracking`, 
 
 
-.. function:: approximate_length_minutes(uint)
+.. function:: mask_path(path)
 	:noindex:
 
-	**default value:** 0
+	**default value:** ""
 
 
-	If available, please provide the approximate length of the video in minutes here, so that the encoding strategy can be chosen intelligently. If set to 0, infinity is assumed. This setting is overwritten by `stop_after_minutes`.
+	Path to a video file containing a mask to be applied to the video while recording. Only works for conversions.
 
-	.. seealso:: :func:`stop_after_minutes`, 
 
 
-.. function:: average_samples(int)
+.. function:: meta_build(string)
 	:noindex:
 
-	**default value:** 100
+	**default value:** ""
 
 
-	Number of samples taken to generate an average image. Usually has to be less if `average_method` is set to max.
+	The current commit hash. The video is branded with this information for later inspection of errors that might have occured.
 
-	.. seealso:: :func:`average_method`, 
 
 
-.. function:: averaging_method(string)
+.. function:: meta_misc(string)
 	:noindex:
 
-	**default value:** "mean"
+	**default value:** ""
 
 
-	This can be either 'mean', 'mode', 'min' or 'max'. All accumulated background images (to be used for generating an average background) will be combined using the max or mean function.
+	Other information.
 
 
 
-.. function:: blob_size_range(rangef)
+.. function:: meta_species(string)
 	:noindex:
 
-	**default value:** [0.01,500000]
+	**default value:** ""
 
 
-	Minimum or maximum size of the individuals on screen after thresholding. Anything smaller or bigger than these values will be disregarded as noise.
+	Name of the species used.
 
 
 
@@ -69,321 +68,332 @@ TGrabs parameters
 
 
 
-.. function:: cam_limit_exposure(int)
+.. function:: cam_resolution(size<int>)
 	:noindex:
 
-	**default value:** 5500
+	**default value:** [2048,2048]
 
 
-	[BASLER] Sets the cameras exposure time in micro seconds.
+	[BASLER] Defines the dimensions of the camera image.
 
 
 
-.. function:: cam_resolution(size<int>)
+.. function:: averaging_method(string)
 	:noindex:
 
-	**default value:** [2048,2048]
+	**default value:** "mean"
 
 
-	[BASLER] Defines the dimensions of the camera image.
+	This can be either 'mean', 'mode', 'min' or 'max'. All accumulated background images (to be used for generating an average background) will be combined using the max or mean function.
 
 
 
-.. function:: closing_size(int)
+.. function:: correct_luminance(bool)
 	:noindex:
 
-	**default value:** 3
+	**default value:** false
 
 
-	Size of the dilation/erosion filters for if `use_closing` is enabled.
+	Attempts to correct for badly lit backgrounds by evening out luminance across the background.
 
-	.. seealso:: :func:`use_closing`, 
 
 
-.. function:: color_channel(ulong)
+.. function:: enable_difference(bool)
 	:noindex:
 
-	**default value:** 1
+	**default value:** true
 
 
-	Index (0-2) of the color channel to be used during video conversion, if more than one channel is present in the video file.
+	Enables background subtraction. If disabled, `threshold` will be applied to the raw greyscale values instead of difference values.
 
+	.. seealso:: :func:`threshold`, 
 
 
-.. function:: correct_luminance(bool)
+.. function:: image_adjust(bool)
 	:noindex:
 
 	**default value:** false
 
 
-	Attempts to correct for badly lit backgrounds by evening out luminance across the background.
+	Converts the image to floating-point (temporarily) and performs f(x,y) * `image_contrast_increase` + `image_brightness_increase` plus, if enabled, squares the image (`image_square_brightness`).
 
+	.. seealso:: :func:`image_contrast_increase`, :func:`image_brightness_increase`, :func:`image_square_brightness`, 
 
 
-.. function:: crop_offsets(offsets)
+.. function:: grabber_force_settings(bool)
 	:noindex:
 
-	**default value:** [0,0,0,0]
+	**default value:** false
 
 
-	Percentage offsets [left, top, right, bottom] that will be cut off the input images (e.g. [0.1,0.1,0.5,0.5] will remove 10%% from the left and top and 50%% from the right and bottom and the video will be 60%% smaller in X and Y).
+	If set to true, live tracking will always overwrite a settings file with `filename`.settings in the output folder.
 
+	.. seealso:: :func:`filename`, 
 
 
-.. function:: crop_window(bool)
+.. function:: closing_size(int)
+	:noindex:
+
+	**default value:** 3
+
+
+	Size of the dilation/erosion filters for if `use_closing` is enabled.
+
+	.. seealso:: :func:`use_closing`, 
+
+
+.. function:: use_adaptive_threshold(bool)
 	:noindex:
 
 	**default value:** false
 
 
-	If set to true, the grabber will open a window before the analysis starts where the user can drag+drop points defining the crop_offsets.
+	Enables or disables adaptive thresholding (slower than normal threshold). Deals better with weird backgrounds.
 
 
 
-.. function:: dilation_size(int)
+.. function:: grabber_use_threads(bool)
 	:noindex:
 
-	**default value:** 0
+	**default value:** true
 
 
-	If set to a value greater than zero, detected shapes will be inflated (and potentially merged). When set to a value smaller than zero, detected shapes will be shrunk (and potentially split).
+	Use threads to process images (specifically the blob detection).
 
 
 
-.. function:: enable_closed_loop(bool)
+.. function:: terminate_error(bool)
 	:noindex:
 
 	**default value:** false
 
 
-	When enabled, live tracking will be executed for every frame received. Frames will be sent to the 'closed_loop.py' script - see this script for more information. Sets `enable_live_tracking` to true. Allows the tracker to skip frames by default, in order to catch up to the video.
+	Internal variable.
 
-	.. seealso:: :func:`enable_live_tracking`, 
 
 
-.. function:: enable_difference(bool)
+.. function:: recording(bool)
 	:noindex:
 
 	**default value:** true
 
 
-	Enables background subtraction. If disabled, `threshold` will be applied to the raw greyscale values instead of difference values.
+	If set to true, the program will record frames whenever individuals are found.
 
-	.. seealso:: :func:`threshold`, 
 
 
-.. function:: enable_live_tracking(bool)
+.. function:: video_source(string)
 	:noindex:
 
-	**default value:** false
+	**default value:** "basler"
 
 
-	When enabled, the program will save a .results file for the recorded video plus export the data (see `output_graphs` in the tracker documentation).
+	Where the video is recorded from. Can be the name of a file, or one of the keywords ['basler', 'webcam', 'test_image'].
 
-	.. seealso:: :func:`output_graphs`, 
 
 
-.. function:: equalize_histogram(bool)
+.. function:: video_conversion_range(pair<int,int>)
 	:noindex:
 
-	**default value:** false
+	**default value:** [-1,-1]
 
 
-	Equalizes the histogram of the image before thresholding and background subtraction.
+	If set to a valid value (!= -1), start and end values determine the range converted.
 
 
 
-.. function:: grabber_force_settings(bool)
+.. function:: save_raw_movie(bool)
 	:noindex:
 
 	**default value:** false
 
 
-	If set to true, live tracking will always overwrite a settings file with `filename`.settings in the output folder.
+	Saves a RAW movie (.mov) with a similar name in the same folder, while also recording to a PV file. This might reduce the maximum framerate slightly, but it gives you the best of both worlds.
 
-	.. seealso:: :func:`filename`, 
 
 
-.. function:: image_adjust(bool)
+.. function:: stop_after_minutes(uint)
 	:noindex:
 
-	**default value:** false
+	**default value:** 0
 
 
-	Converts the image to floating-point (temporarily) and performs f(x,y) * `image_contrast_increase` + `image_brightness_increase` plus, if enabled, squares the image (`image_square_brightness`).
+	If set to a value above 0, the video will stop recording after X minutes of recording time.
 
-	.. seealso:: :func:`image_contrast_increase`, :func:`image_brightness_increase`, :func:`image_square_brightness`, 
 
 
-.. function:: image_brightness_increase(float)
+.. function:: blob_size_range(rangef)
 	:noindex:
 
-	**default value:** 0
+	**default value:** [0.01,500000]
 
 
-	Value that is added to the preprocessed image before applying the threshold (see `image_adjust`). The neutral value is 0 here.
+	Minimum or maximum size of the individuals on screen after thresholding. Anything smaller or bigger than these values will be disregarded as noise.
 
-	.. seealso:: :func:`image_adjust`, 
 
 
-.. function:: image_contrast_increase(float)
+.. function:: image_brightness_increase(float)
 	:noindex:
 
-	**default value:** 3
+	**default value:** 0
 
 
-	Value that is multiplied to the preprocessed image before applying the threshold (see `image_adjust`). The neutral value is 1 here.
+	Value that is added to the preprocessed image before applying the threshold (see `image_adjust`). The neutral value is 0 here.
 
 	.. seealso:: :func:`image_adjust`, 
 
 
-.. function:: image_square_brightness(bool)
+.. function:: enable_live_tracking(bool)
 	:noindex:
 
 	**default value:** false
 
 
-	Squares the floating point input image after background subtraction. This brightens brighter parts of the image, and darkens darker regions.
+	When enabled, the program will save a .results file for the recorded video plus export the data (see `output_graphs` in the tracker documentation).
 
+	.. seealso:: :func:`output_graphs`, 
 
 
-.. function:: mask_path(path)
+.. function:: dilation_size(int)
 	:noindex:
 
-	**default value:** ""
+	**default value:** 0
 
 
-	Path to a video file containing a mask to be applied to the video while recording. Only works for conversions.
+	If set to a value greater than zero, detected shapes will be inflated (and potentially merged). When set to a value smaller than zero, detected shapes will be shrunk (and potentially split).
 
 
 
-.. function:: meta_age_days(int)
+.. function:: meta_write_these(array<string>)
 	:noindex:
 
-	**default value:** -1
+	**default value:** ["meta_species","meta_age_days","meta_conditions","meta_misc","cam_limit_exposure","meta_real_width","meta_source_path","meta_cmd","meta_build","meta_conversion_time","frame_rate","cam_undistort_vector","cam_matrix"]
 
 
-	Age of the individuals used in days.
+	The given settings values will be written to the video file.
 
 
 
-.. function:: meta_build(string)
+.. function:: image_contrast_increase(float)
 	:noindex:
 
-	**default value:** ""
+	**default value:** 3
 
 
-	The current commit hash. The video is branded with this information for later inspection of errors that might have occured.
+	Value that is multiplied to the preprocessed image before applying the threshold (see `image_adjust`). The neutral value is 1 here.
 
+	.. seealso:: :func:`image_adjust`, 
 
 
-.. function:: meta_cmd(string)
+.. function:: meta_conversion_time(string)
 	:noindex:
 
 	**default value:** ""
 
 
-	Command-line of the framegrabber when conversion was started.
+	This contains the time of when this video was converted / recorded as a string.
 
 
 
-.. function:: meta_conditions(string)
+.. function:: color_channel(ulong)
 	:noindex:
 
-	**default value:** ""
+	**default value:** 1
 
 
-	Treatment name.
+	Index (0-2) of the color channel to be used during video conversion, if more than one channel is present in the video file.
 
 
 
-.. function:: meta_conversion_time(string)
+.. function:: image_square_brightness(bool)
 	:noindex:
 
-	**default value:** ""
+	**default value:** false
 
 
-	This contains the time of when this video was converted / recorded as a string.
+	Squares the floating point input image after background subtraction. This brightens brighter parts of the image, and darkens darker regions.
 
 
 
-.. function:: meta_misc(string)
+.. function:: meta_cmd(string)
 	:noindex:
 
 	**default value:** ""
 
 
-	Other information.
+	Command-line of the framegrabber when conversion was started.
 
 
 
-.. function:: meta_species(string)
+.. function:: test_image(string)
 	:noindex:
 
-	**default value:** ""
+	**default value:** "checkerboard"
 
 
-	Name of the species used.
+	Defines, which test image will be used if `video_source` is set to 'test_image'.
 
+	.. seealso:: :func:`video_source`, 
 
 
-.. function:: meta_write_these(array<string>)
+.. function:: average_samples(int)
 	:noindex:
 
-	**default value:** ["meta_species","meta_age_days","meta_conditions","meta_misc","cam_limit_exposure","meta_real_width","meta_source_path","meta_cmd","meta_build","meta_conversion_time","frame_rate","cam_undistort_vector","cam_matrix"]
+	**default value:** 100
 
 
-	The given settings values will be written to the video file.
+	Number of samples taken to generate an average image. Usually has to be less if `average_method` is set to max.
 
+	.. seealso:: :func:`average_method`, 
 
 
-.. function:: quit_after_average(bool)
+.. function:: reset_average(bool)
 	:noindex:
 
 	**default value:** false
 
 
-	If set to true, this will terminate the program directly after generating (or loading) a background average image.
+	If set to true, the average will be regenerated using the live stream of images (video or camera).
 
 
 
-.. function:: recording(bool)
+.. function:: meta_age_days(int)
 	:noindex:
 
-	**default value:** true
+	**default value:** -1
 
 
-	If set to true, the program will record frames whenever individuals are found.
+	Age of the individuals used in days.
 
 
 
-.. function:: reset_average(bool)
+.. function:: threshold_maximum(int)
 	:noindex:
 
-	**default value:** false
+	**default value:** 255
 
 
-	If set to true, the average will be regenerated using the live stream of images (video or camera).
+	
 
 
 
-.. function:: save_raw_movie(bool)
+.. function:: meta_conditions(string)
 	:noindex:
 
-	**default value:** false
+	**default value:** ""
 
 
-	Saves a RAW movie (.mov) with a similar name in the same folder, while also recording to a PV file. This might reduce the maximum framerate slightly, but it gives you the best of both worlds.
+	Treatment name.
 
 
 
-.. function:: stop_after_minutes(uint)
+.. function:: threshold(int)
 	:noindex:
 
-	**default value:** 0
+	**default value:** 9
 
 
-	If set to a value above 0, the video will stop recording after X minutes of recording time.
+	Threshold to be applied to the input image to find blobs.
 
 
 
@@ -398,64 +408,64 @@ TGrabs parameters
 	.. seealso:: :func:`approximate_length_minutes`, :func:`stop_after_minutes`, 
 
 
-.. function:: terminate(bool)
+.. function:: approximate_length_minutes(uint)
 	:noindex:
 
-	**default value:** false
+	**default value:** 0
 
 
-	Terminates the program gracefully.
+	If available, please provide the approximate length of the video in minutes here, so that the encoding strategy can be chosen intelligently. If set to 0, infinity is assumed. This setting is overwritten by `stop_after_minutes`.
 
+	.. seealso:: :func:`stop_after_minutes`, 
 
 
-.. function:: terminate_error(bool)
+.. function:: quit_after_average(bool)
 	:noindex:
 
 	**default value:** false
 
 
-	Internal variable.
+	If set to true, this will terminate the program directly after generating (or loading) a background average image.
 
 
 
-.. function:: test_image(string)
+.. function:: crop_offsets(offsets)
 	:noindex:
 
-	**default value:** "checkerboard"
+	**default value:** [0,0,0,0]
 
 
-	Defines, which test image will be used if `video_source` is set to 'test_image'.
+	Percentage offsets [left, top, right, bottom] that will be cut off the input images (e.g. [0.1,0.1,0.5,0.5] will remove 10%% from the left and top and 50%% from the right and bottom and the video will be 60%% smaller in X and Y).
 
-	.. seealso:: :func:`video_source`, 
 
 
-.. function:: threshold(int)
+.. function:: equalize_histogram(bool)
 	:noindex:
 
-	**default value:** 9
+	**default value:** false
 
 
-	Threshold to be applied to the input image to find blobs.
+	Equalizes the histogram of the image before thresholding and background subtraction.
 
 
 
-.. function:: threshold_maximum(int)
+.. function:: crop_window(bool)
 	:noindex:
 
-	**default value:** 255
+	**default value:** false
 
 
-	
+	If set to true, the grabber will open a window before the analysis starts where the user can drag+drop points defining the crop_offsets.
 
 
 
-.. function:: use_adaptive_threshold(bool)
+.. function:: adaptive_threshold_scale(float)
 	:noindex:
 
-	**default value:** false
+	**default value:** 2
 
 
-	Enables or disables adaptive thresholding (slower than normal threshold). Deals better with weird backgrounds.
+	Threshold value to be used for adaptive thresholding, if enabled.
 
 
 
@@ -470,23 +480,13 @@ TGrabs parameters
 	.. seealso:: :func:`closing_size`, 
 
 
-.. function:: video_conversion_range(pair<int,int>)
-	:noindex:
-
-	**default value:** [-1,-1]
-
-
-	If set to a valid value (!= -1), start and end values determine the range converted.
-
-
-
-.. function:: video_source(string)
+.. function:: cam_limit_exposure(int)
 	:noindex:
 
-	**default value:** "basler"
+	**default value:** 5500
 
 
-	Where the video is recorded from. Can be the name of a file, or one of the keywords ['basler', 'webcam', 'test_image'].
+	[BASLER] Sets the cameras exposure time in micro seconds.
 
 
 
diff --git a/docs/parameters_trex.rst b/docs/parameters_trex.rst
index b3bab90..c49ed5e 100644
--- a/docs/parameters_trex.rst
+++ b/docs/parameters_trex.rst
@@ -3,1212 +3,1202 @@
 
 TRex parameters
 ###############
-.. function:: analysis_paused(bool)
+.. function:: image_invert(bool)
 
 	**default value:** false
 
 
-	Halts the analysis.
-
-
-
-.. function:: analysis_range(pair<int,int>)
-
-	**default value:** [-1,-1]
-
+	Inverts the image greyscale values before thresholding.
 
-	Sets start and end of the analysed frames.
 
 
+.. function:: cam_scale(float)
 
-.. function:: app_name(string)
+	**default value:** 1
 
-	**default value:** "TRex"
 
+	Scales the image down or up by the given factor.
 
-	Name of the application.
 
 
+.. function:: cam_matrix(array<float>)
 
-.. function:: auto_apply(bool)
+	**default value:** [2945.0896,0,617.255432,0,2942.825195,682.473633,0,0,1]
 
-	**default value:** false
 
+	
 
-	If set to true, the application will automatically apply the network with existing weights once the analysis is done. It will then automatically correct and reanalyse the video.
 
 
+.. function:: gui_connectivity_matrix_file(path)
 
-.. function:: auto_minmax_size(bool)
+	**default value:** ""
 
-	**default value:** false
 
+	Path to connectivity table. Expected structure is a csv table with columns [frame | #(track_max_individuals^2) values] and frames in y-direction.
 
-	Program will try to find minimum / maximum size of the individuals automatically for the current `cm_per_pixel` setting. Can only be passed as an argument upon startup. The calculation is based on the median blob size in the video and assumes a relatively low level of noise.
 
-	.. seealso:: :func:`cm_per_pixel`, 
 
+.. function:: gui_foi_types(array<string>)
 
-.. function:: auto_no_results(bool)
+	**default value:** []
 
-	**default value:** false
 
+	A list of all the foi types registered.
 
-	If set to true, the auto_quit option will NOT save a .results file along with the NPZ (or CSV) files. This saves time and space, but also means that the tracked portion cannot be loaded via -load afterwards. Useful, if you only want to analyse the resulting data and never look at the tracked video again.
 
 
+.. function:: gui_foi_name(string)
 
-.. function:: auto_no_tracking_data(bool)
+	**default value:** "correcting"
 
-	**default value:** false
 
+	If not empty, the gui will display the given FOI type in the timeline and allow to navigate between them via M/N.
 
-	If set to true, the auto_quit option will NOT save any `output_graphs` tracking data - just the posture data (if enabled) and the results file (if not disabled). This saves time and space if that is a need.
 
-	.. seealso:: :func:`output_graphs`, 
 
+.. function:: huge_timestamp_ends_segment(bool)
 
-.. function:: auto_number_individuals(bool)
+	**default value:** true
 
-	**default value:** false
 
+	
 
-	Program will automatically try to find the number of individuals (with sizes given in `blob_size_ranges`) and set `track_max_individuals` to that value.
 
-	.. seealso:: :func:`blob_size_ranges`, :func:`track_max_individuals`, 
 
+.. function:: track_blacklist(array<array<vec>>)
 
-.. function:: auto_quit(bool)
+	**default value:** []
 
-	**default value:** false
 
+	If this is not empty, objects within the given rectangles or polygons (>= 3 points) [[x0,y0],[x1,y1](, ...)], ...] will be ignored during tracking.
 
-	If set to true, the application will automatically save all results and export CSV files and quit, after the analysis is complete.
 
 
+.. function:: gpu_accumulation_max_segments(uint)
 
-.. function:: auto_train(bool)
+	**default value:** 15
 
-	**default value:** false
 
+	If there are more than `gpu_accumulation_max_segments` global segments to be trained on, they will be filtered according to their quality until said limit is reached.
 
-	If set to true (and `recognition_enable` is also set to true), the application will automatically train the recognition network with the best track segment and apply it to the video.
 
-	.. seealso:: :func:`recognition_enable`, 
 
+.. function:: gpu_max_cache(float)
 
-.. function:: auto_train_dont_apply(bool)
+	**default value:** 2
 
-	**default value:** false
 
+	Size of the image cache (transferring to GPU) in GigaBytes when applying the network.
 
-	If set to true, setting `auto_train` will only train and not apply the trained network.
 
-	.. seealso:: :func:`auto_train`, 
 
+.. function:: gui_auto_scale_focus_one(bool)
 
-.. function:: auto_train_on_startup(bool)
+	**default value:** true
 
-	**default value:** false
 
+	If set to true (and `gui_auto_scale` set to true, too), the tracker will zoom in on the selected individual, if one is selected.
 
-	This is a parameter that is used by the system to determine whether `auto_train` was set on startup, and thus also whether a failure of `auto_train` should result in a crash (return code != 0).
+	.. seealso:: :func:`gui_auto_scale`, 
 
-	.. seealso:: :func:`auto_train`, :func:`auto_train`, 
 
+.. function:: gpu_learning_rate(float)
 
-.. function:: blob_size_ranges(BlobSizeRange)
+	**default value:** 0.0005
 
-	**default value:** [[0.1,3]]
 
+	Learning rate for training a recognition network.
 
-	Blobs below the lower bound are recognized as noise instead of individuals. Blobs bigger than the upper bound are considered to potentially contain more than one individual. The unit is #pixels * (`meta_real_width` / video_width).
 
-	.. seealso:: :func:`meta_real_width`, 
 
+.. function:: gpu_accumulation_enable_final_step(bool)
 
-.. function:: blob_split_global_shrink_limit(float)
+	**default value:** true
 
-	**default value:** 0.2
 
+	If enabled, the network will be trained on all the validation + training data accumulated, as a last step of the accumulation protocol cascade. This is intentional overfitting.
 
-	The minimum percentage of the minimum in `blob_size_ranges`, that a blob is allowed to be reduced to during splitting. If this value is set too low, the program might start recognizing parts of individual as other individual too quickly.
 
-	.. seealso:: :func:`blob_size_ranges`, 
 
+.. function:: auto_train_dont_apply(bool)
 
-.. function:: blob_split_max_shrink(float)
+	**default value:** false
 
-	**default value:** 0.2
 
+	If set to true, setting `auto_train` will only train and not apply the trained network.
 
-	The minimum percentage of the starting blob size (after thresholding), that a blob is allowed to be reduced to during splitting. If this value is set too low, the program might start recognizing parts of individual as other individual too quickly.
+	.. seealso:: :func:`auto_train`, 
 
 
+.. function:: gpu_accepted_uniqueness(float)
 
-.. function:: blobs_per_thread(float)
+	**default value:** 0
 
-	**default value:** 150
 
+	If changed (from 0), the ratio given here will be the acceptable uniqueness for the video - which will stop accumulation if reached.
 
-	Number of blobs for which properties will be calculated per thread.
 
 
+.. function:: matching_probability_threshold(float)
 
-.. function:: build(string)
+	**default value:** 0.1
 
-	**default value:** ""
 
+	The probability below which a possible connection between blob and identity is considered too low. The probability depends largely upon settings like `track_max_speed`.
 
-	Current build version
+	.. seealso:: :func:`track_max_speed`, 
 
 
+.. function:: gpu_max_epochs(ulong)
 
-.. function:: calculate_posture(bool)
+	**default value:** 150
 
-	**default value:** true
 
+	Maximum number of epochs for training a recognition network.
 
-	Enables or disables posture calculation. Can only be set before the video is analysed (e.g. in a settings file or as a startup parameter).
 
 
+.. function:: individuals_per_thread(float)
 
-.. function:: cam_circle_mask(bool)
+	**default value:** 1
 
-	**default value:** false
 
+	Number of individuals for which positions will be estimated per thread.
 
-	If set to true, a circle with a diameter of the width of the video image will mask the video. Anything outside that circle will be disregarded as background.
 
 
+.. function:: midline_samples(uint64)
 
-.. function:: cam_matrix(array<float>)
+	**default value:** 0
 
-	**default value:** [2945.0896,0,617.255432,0,2942.825195,682.473633,0,0,1]
 
+	The maximum number of samples taken for generating a ``median midline length``. Setting this to 0 removes the limit all together. A limit may be set for very long videos, or videos with lots of individual, for memory reasons.
 
-	
+	.. seealso:: :func:``, :func:``, 
 
 
+.. function:: recognition_save_training_images(bool)
 
-.. function:: cam_scale(float)
+	**default value:** false
 
-	**default value:** 1
 
+	If set to true, the program will save the images used for a successful training of the recognition network to the output path.
 
-	Scales the image down or up by the given factor.
 
 
+.. function:: peak_mode(peak_mode_t)
 
-.. function:: cam_undistort(bool)
+	**default value:** pointy
 
-	**default value:** false
+	**possible values:**
+		- `pointy`: The head is broader than the tail.
+		- `broad`: The tail is broader than the head.
 
+	This determines whether the tail of an individual should be expected to be pointy or broad.
 
-	If set to true, the recorded video image will be undistorted using `cam_undistort_vector` (1x5) and `cam_matrix` (3x3).
 
-	.. seealso:: :func:`cam_undistort_vector`, :func:`cam_matrix`, 
 
 
-.. function:: cam_undistort_vector(array<float>)
+.. function:: recognition_smooth_amount(ulong)
 
-	**default value:** [-0.257663,-0.192336,0.002455,0.003988,0.35924]
+	**default value:** 200
 
 
 	
 
 
 
-.. function:: cm_per_pixel(float)
+.. function:: smooth_window(uint)
 
-	**default value:** 0
+	**default value:** 2
 
 
-	The ratio of `meta_real_width` / `video_size`.width that is used to convert pixels to centimeters. Will be automatically calculated based on the video and the meta-parameters contained within.
+	Smoothing window used for exported data with the #smooth tag.
 
-	.. seealso:: :func:`meta_real_width`, :func:`video_size`, 
 
 
-.. function:: cmd_line(string)
+.. function:: recognition_border(recognition_border_t)
 
-	**default value:** ""
+	**default value:** none
+
+	**possible values:**
+		- `none`: No border at all. All points are inside the recognition boundary. (default)
+		- `heatmap`: Looks at a subset of frames from the video, trying to find out where individuals go and masking all the places they do not.
+		- `outline`: Similar to heatmap, but tries to build a convex border around the around (without holes in it).
+		- `shapes`: Any array of convex shapes. Set coordinates by changing `recognition_shapes`.
+		- `grid`: The points defined in `grid_points` are turned into N different circles inside the arena (with points in `grid_points` being the circle centers), which define in/out if inside/outside any of the circles.
+		- `circle`: The video-file provides a binary mask (e.g. when `cam_circle_mask` was set to true during recording), which is then used to determine in/out.
 
+	This defines the type of border that is used in all automatic recognition routines. Depending on the type set here, you might need to set other parameters as well (e.g. `recognition_shapes`). In general, this defines whether an image of an individual is usable for automatic recognition. If it is inside the defined border, then it will be passed on to the recognition network - if not, then it wont.
 
-	An approximation of the command-line arguments passed to the program.
 
+	.. seealso:: :func:`recognition_shapes`, 
 
 
-.. function:: correct_illegal_lines(bool)
+.. function:: grid_points(array<vec>)
 
-	**default value:** false
+	**default value:** []
 
 
-	In older versions of the software, blobs can be constructed in 'illegal' ways, meaning the lines might be overlapping. If the software is printing warnings about it, this should probably be enabled (makes it slower).
+	Whenever there is an identification network loaded and this array contains more than one point [[x0,y0],[x1,y1],...], then the network will only be applied to blobs within circles around these points. The size of these circles is half of the average distance between the points.
 
 
 
-.. function:: debug(bool)
+.. function:: cam_circle_mask(bool)
 
 	**default value:** false
 
 
-	Enables some verbose debug print-outs.
+	If set to true, a circle with a diameter of the width of the video image will mask the video. Anything outside that circle will be disregarded as background.
 
 
 
-.. function:: debug_recognition_output_all_methods(bool)
+.. function:: manually_approved(map<int,int>)
 
-	**default value:** false
+	**default value:** {}
 
 
-	If set to true, a complete training will attempt to output all images for each identity with all available normalization methods.
+	A list of ranges of manually approved frames that may be used for generating training datasets {232:232,5555:5560}.
 
 
 
-.. function:: enable_absolute_difference(bool)
+.. function:: tags_path(path)
 
-	**default value:** true
+	**default value:** ""
 
 
-	If set to true, the threshold values will be applied to abs(image - background). Otherwise max(0, image - background).
+	If this path is set, the program will try to find tags and save them at the specified location.
 
 
 
-.. function:: error_terminate(bool)
+.. function:: output_centered(bool)
 
 	**default value:** false
 
 
-	
+	If set to true, the origin of all X and Y coordinates is going to be set to the center of the video.
 
 
 
-.. function:: event_min_peak_offset(float)
+.. function:: output_recognition_data(bool)
 
-	**default value:** 0.15
+	**default value:** false
 
 
-	
+	Save recognition / probability data npz file along with the usual NPZ/CSV files containing positions and such. If set to true, a file called '`output_dir`/`fish_data_dir`/`filename`\_recognition_fishXXX.npz' will be created for each individual XXX.
 
+	.. seealso:: :func:`output_dir`, :func:`fish_data_dir`, :func:`filename`, 
 
 
-.. function:: exec(path)
+.. function:: output_npz(bool)
 
-	**default value:** ""
+	**default value:** true
 
 
-	This can be set to the path of an additional settings file that is executed after the normal settings file.
+	When pressing the S(ave) button or using auto_quit, this setting allows to switch between CSV and NPZ output. If set to true, all output will be NPZ files (recommended). If set to false, some output (`output_graphs`) will be CSV files, while others (posture data, etc.) will remain in NPZ format due to technical constraints.
 
+	.. seealso:: :func:`output_graphs`, 
 
 
-.. function:: ffmpeg_path(path)
+.. function:: tracklet_max_images(ulong)
 
-	**default value:** ""
+	**default value:** 0
 
 
-	Path to an ffmpeg executable file. This is used for converting videos after recording them (from the GUI). It is not a critical component of the software, but mostly for convenience.
+	Maximum number of images that are being output per tracklet given that `output_image_per_tracklet` is true. If the number is 0, then every image will be exported that has been recognized as an individual.
 
+	.. seealso:: :func:`output_image_per_tracklet`, 
 
 
-.. function:: filename(path)
+.. function:: cm_per_pixel(float)
 
-	**default value:** ""
+	**default value:** 0
 
 
-	Opened filename (without .pv).
+	The ratio of `meta_real_width` / `video_size`.width that is used to convert pixels to centimeters. Will be automatically calculated based on the video and the meta-parameters contained within.
 
+	.. seealso:: :func:`meta_real_width`, :func:`video_size`, 
 
 
-.. function:: fishdata_dir(path)
+.. function:: output_graphs(array<pair<string,array<string>>>)
 
-	**default value:** "data"
+	**default value:** [["X",["RAW","WCENTROID"]],["Y",["RAW","WCENTROID"]],["X",["RAW","HEAD"]],["Y",["RAW","HEAD"]],["VX",["RAW","HEAD"]],["VY",["RAW","HEAD"]],["AX",["RAW","HEAD"]],["AY",["RAW","HEAD"]],["ANGLE",["RAW"]],["ANGULAR_V",["RAW"]],["ANGULAR_A",["RAW"]],["MIDLINE_OFFSET",["RAW"]],["normalized_midline",["RAW"]],["midline_length",["RAW"]],["midline_x",["RAW"]],["midline_y",["RAW"]],["segment_length",["RAW"]],["SPEED",["RAW","WCENTROID"]],["SPEED",["SMOOTH","WCENTROID"]],["SPEED",["RAW","PCENTROID"]],["SPEED",["RAW","HEAD"]],["BORDER_DISTANCE",["PCENTROID"]],["time",[]],["timestamp",[]],["frame",[]],["missing",[]],["num_pixels",[]],["ACCELERATION",["RAW","PCENTROID"]],["ACCELERATION",["RAW","WCENTROID"]]]
 
 
-	Subfolder (below `output_dir`) where the exported NPZ or CSV files will be saved (see `output_graphs`).
+	The functions that will be exported when saving to CSV, or shown in the graph. [['X',[option], ...]]
 
-	.. seealso:: :func:`output_dir`, :func:`output_graphs`, 
 
 
-.. function:: frame_rate(int)
+.. function:: cam_undistort_vector(array<float>)
 
-	**default value:** 0
+	**default value:** [-0.257663,-0.192336,0.002455,0.003988,0.35924]
 
 
-	Specifies the frame rate of the video. It is used e.g. for playback speed and certain parts of the matching algorithm. Will be set by the .settings of a video (or by the video itself).
+	
 
 
 
-.. function:: gpu_accepted_uniqueness(float)
+.. function:: output_min_frames(ulong)
 
-	**default value:** 0
+	**default value:** 1
 
 
-	If changed (from 0), the ratio given here will be the acceptable uniqueness for the video - which will stop accumulation if reached.
+	Filters all individual with less than N frames when exporting. Individuals with fewer than N frames will also be hidden in the GUI unless `gui_show_inactive_individuals` is enabled (default).
 
+	.. seealso:: :func:`gui_show_inactive_individuals`, 
 
 
-.. function:: gpu_accumulation_enable_final_step(bool)
+.. function:: gui_show_dataset(bool)
 
-	**default value:** true
+	**default value:** false
 
 
-	If enabled, the network will be trained on all the validation + training data accumulated, as a last step of the accumulation protocol cascade. This is intentional overfitting.
+	Show/hide detailed dataset information on-screen.
 
 
 
-.. function:: gpu_accumulation_max_segments(uint)
+.. function:: track_posture_threshold(int)
 
 	**default value:** 15
 
 
-	If there are more than `gpu_accumulation_max_segments` global segments to be trained on, they will be filtered according to their quality until said limit is reached.
+	Same as `track_threshold`, but for posture estimation.
 
+	.. seealso:: :func:`track_threshold`, 
 
 
-.. function:: gpu_enable_accumulation(bool)
+.. function:: output_default_options(map<string,array<string>>)
 
-	**default value:** true
+	**default value:** {"event_acceleration":["/10"],"ACCELERATION":["/15","SMOOTH","CENTROID"],"L_V":["/10"],"v_direction":["/10"],"DOT_V":["/10"],"ANGULAR_V":["/10","SMOOTH","CENTROID"],"ANGULAR_A":["/1000","SMOOTH","CENTROID"],"NEIGHBOR_VECTOR_T":["/1"],"SPEED":["/10","SMOOTH"],"NEIGHBOR_DISTANCE":["/10"],"X":["/100"],"Y":["/100"],"tailbeat_threshold":["pm"],"tailbeat_peak":["pm"],"threshold_reached":["POINTS"],"midline_length":["/15"],"amplitude":["/100"],"outline_size":["/100"],"global":["/10"]}
 
 
-	Enables or disables the idtrackerai-esque accumulation protocol cascade. It is usually a good thing to enable this (especially in more complicated videos), but can be disabled as a fallback (e.g. if computation time is a major constraint).
+	Default scaling and smoothing options for output functions, which are applied to functions in `output_graphs` during export.
 
+	.. seealso:: :func:`output_graphs`, 
 
 
-.. function:: gpu_learning_rate(float)
+.. function:: output_annotations(map<string,string>)
 
-	**default value:** 0.0005
+	**default value:** {"ACCELERATION":"cm/s2","ACCELERATION_SMOOTH":"cm/s2","BORDER_DISTANCE":"cm","NEIGHBOR_DISTANCE":"cm","ORIENTATION":"rad","SPEED":"cm/s","SPEED_OLD":"cm/s","SPEED_SMOOTH":"cm/s","VX":"cm/s","VY":"cm/s","X":"cm","Y":"cm","global":"px"}
 
 
-	Learning rate for training a recognition network.
+	Units (as a string) of output functions to be annotated in various places like graphs.
 
 
 
-.. function:: gpu_max_cache(float)
+.. function:: analysis_range(pair<int,int>)
 
-	**default value:** 2
+	**default value:** [-1,-1]
 
 
-	Size of the image cache (transferring to GPU) in GigaBytes when applying the network.
+	Sets start and end of the analysed frames.
 
 
 
-.. function:: gpu_max_epochs(ulong)
+.. function:: auto_train_on_startup(bool)
 
-	**default value:** 150
+	**default value:** false
 
 
-	Maximum number of epochs for training a recognition network.
+	This is a parameter that is used by the system to determine whether `auto_train` was set on startup, and thus also whether a failure of `auto_train` should result in a crash (return code != 0).
 
+	.. seealso:: :func:`auto_train`, :func:`auto_train`, 
 
 
-.. function:: gpu_max_sample_gb(float)
+.. function:: auto_train(bool)
 
-	**default value:** 2
+	**default value:** false
 
 
-	Maximum size of per-individual sample images in GigaBytes. If the collected images are too many, they will be sub-sampled in regular intervals.
+	If set to true (and `recognition_enable` is also set to true), the application will automatically train the recognition network with the best track segment and apply it to the video.
 
+	.. seealso:: :func:`recognition_enable`, 
 
 
-.. function:: gpu_min_elements(ulong)
+.. function:: auto_no_results(bool)
 
-	**default value:** 25000
+	**default value:** false
 
 
-	Minimum number of images being collected, before sending them to the GPU.
+	If set to true, the auto_quit option will NOT save a .results file along with the NPZ (or CSV) files. This saves time and space, but also means that the tracked portion cannot be loaded via -load afterwards. Useful, if you only want to analyse the resulting data and never look at the tracked video again.
 
 
 
-.. function:: gpu_min_iterations(ulong)
+.. function:: track_trusted_probability(float)
 
-	**default value:** 100
+	**default value:** 0.5
 
 
-	Minimum number of iterations per epoch for training a recognition network.
+	If the probability, that is used to assign an individual to an object, is smaller than this value, the current segment will be ended (thus this will also not be a consecutive segment anymore for this individual).
 
 
 
-.. function:: grid_points(array<vec>)
+.. function:: correct_illegal_lines(bool)
 
-	**default value:** []
+	**default value:** false
 
 
-	Whenever there is an identification network loaded and this array contains more than one point [[x0,y0],[x1,y1],...], then the network will only be applied to blobs within circles around these points. The size of these circles is half of the average distance between the points.
+	In older versions of the software, blobs can be constructed in 'illegal' ways, meaning the lines might be overlapping. If the software is printing warnings about it, this should probably be enabled (makes it slower).
 
 
 
-.. function:: grid_points_scaling(float)
+.. function:: gui_focus_group(array<int>)
 
-	**default value:** 0.8
+	**default value:** []
 
 
-	Scaling applied to the average distance between the points in order to shrink or increase the size of the circles for recognition (see `grid_points`).
+	Focus on this group of individuals.
 
-	.. seealso:: :func:`grid_points`, 
 
 
-.. function:: gui_auto_scale(bool)
+.. function:: track_threshold(int)
 
-	**default value:** false
+	**default value:** 15
 
 
-	If set to true, the tracker will always try to zoom in on the whole group. This is useful for some individuals in a huge video (because if they are too tiny, you cant see them and their posture anymore).
+	Constant used in background subtraction. Pixels with grey values above this threshold will be interpreted as potential individuals, while pixels below this threshold will be ignored.
 
 
 
-.. function:: gui_auto_scale_focus_one(bool)
+.. function:: web_quality(int)
 
-	**default value:** true
+	**default value:** 75
 
 
-	If set to true (and `gui_auto_scale` set to true, too), the tracker will zoom in on the selected individual, if one is selected.
+	JPEG quality of images transferred over the web interface.
 
-	.. seealso:: :func:`gui_auto_scale`, 
 
 
-.. function:: gui_background_color(color)
+.. function:: track_time_probability_enabled(bool)
 
-	**default value:** [0,0,0,150]
+	**default value:** true
 
 
-	Values < 255 will make the background more transparent in standard view. This might be useful with very bright backgrounds.
+	
 
 
 
-.. function:: gui_connectivity_matrix(map<int,array<float>>)
+.. function:: gui_outline_thickness(ulong)
 
-	**default value:** {}
+	**default value:** 1
 
 
-	Internally used to store the connectivity matrix.
+	The thickness of outline / midlines in the GUI.
 
 
 
-.. function:: gui_connectivity_matrix_file(path)
+.. function:: recognition_image_scale(float)
 
-	**default value:** ""
+	**default value:** 1
 
 
-	Path to connectivity table. Expected structure is a csv table with columns [frame | #(track_max_individuals^2) values] and frames in y-direction.
+	Scaling applied to the images before passing them to the network.
 
 
 
-.. function:: gui_draw_only_filtered_out(bool)
+.. function:: threshold_ratio_range(rangef)
 
-	**default value:** false
+	**default value:** [0.5,1]
 
 
-	Only show filtered out blob texts.
+	If `track_threshold_2` is not equal to zero, this ratio will be multiplied by the number of pixels present before the second threshold. If the resulting size falls within the given range, the blob is deemed okay.
 
+	.. seealso:: :func:`track_threshold_2`, 
 
 
-.. function:: gui_equalize_blob_histograms(bool)
+.. function:: recognition_enable(bool)
 
 	**default value:** true
 
 
-	Equalize histograms of blobs wihtin videos (makes them more visible).
+	This enables internal training. Requires Python3 and Keras to be available.
 
 
 
-.. function:: gui_faded_brightness(uchar)
+.. function:: track_intensity_range(rangel)
 
-	**default value:** 255
+	**default value:** [-1,-1]
 
 
-	The alpha value of tracking-related elements when timeline is hidden (0-255).
+	When set to valid values, objects will be filtered to have an average pixel intensity within the given range.
 
 
 
-.. function:: gui_fish_color(string)
+.. function:: speed_extrapolation(float)
 
-	**default value:** "identity"
+	**default value:** 3
 
 
-	
+	Used for matching when estimating the next position of an individual. Smaller values are appropriate for lower frame rates. The higher this value is, the more previous frames will have significant weight in estimating the next position (with an exponential decay).
 
 
 
-.. function:: gui_focus_group(array<int>)
+.. function:: posture_direction_smoothing(ulong)
 
-	**default value:** []
+	**default value:** 0
 
 
-	Focus on this group of individuals.
+	Enables or disables smoothing of the posture orientation based on previous frames (not good for fast turns).
 
 
 
-.. function:: gui_foi_name(string)
+.. function:: gui_show_visualfield(bool)
 
-	**default value:** "correcting"
+	**default value:** false
 
 
-	If not empty, the gui will display the given FOI type in the timeline and allow to navigate between them via M/N.
+	Show/hide the visual field rays.
 
 
 
-.. function:: gui_foi_types(array<string>)
+.. function:: track_max_speed(float)
 
-	**default value:** []
+	**default value:** 50
 
 
-	A list of all the foi types registered.
+	The maximum speed an individual can have (=> the maximum distance an individual can travel within one second) in cm/s. Uses `meta_real_width`.
 
+	.. seealso:: :func:`meta_real_width`, 
 
 
-.. function:: gui_frame(int)
+.. function:: cam_undistort(bool)
 
-	**default value:** 0
+	**default value:** false
 
 
-	The currently visible frame.
+	If set to true, the recorded video image will be undistorted using `cam_undistort_vector` (1x5) and `cam_matrix` (3x3).
 
+	.. seealso:: :func:`cam_undistort_vector`, :func:`cam_matrix`, 
 
 
-.. function:: gui_heatmap_dynamic(bool)
+.. function:: gui_transparent_background(bool)
 
 	**default value:** false
 
 
-	If enabled the heatmap will only show frames before the frame currently displayed in the graphical user interface.
+	If enabled, fonts might look weird but you can record movies (and images) with transparent background (if gui_background_color.alpha is < 255).
 
 
 
-.. function:: gui_heatmap_resolution(uint)
+.. function:: recognition_image_size(size)
 
-	**default value:** 75
+	**default value:** [80,80]
 
 
-	Square resolution of individual heatmaps displayed with `gui_show_heatmap`. Will generate a square grid, each cell with dimensions (video_width / N, video_height / N), and sort all positions of each identity into it.
+	Size of each image generated for network training.
 
-	.. seealso:: :func:`gui_show_heatmap`, 
 
 
-.. function:: gui_heatmap_source(string)
+.. function:: track_threshold_2(int)
 
-	**default value:** ""
+	**default value:** 0
 
 
-	If empty, the source will simply be an individuals identity. Otherwise, information from export data sources will be used.
+	If not zero, a second threshold will be applied to all objects after they have been deemed do be theoretically large enough. Then they are compared to #before_pixels * `threshold_ratio_range` to see how much they have been shrunk).
 
+	.. seealso:: :func:`threshold_ratio_range`, 
 
 
-.. function:: gui_interface_scale(float)
+.. function:: gpu_max_sample_gb(float)
 
-	**default value:** 0.75
+	**default value:** 2
 
 
-	Scales the whole interface. A value greater than 1 will make it smaller.
+	Maximum size of per-individual sample images in GigaBytes. If the collected images are too many, they will be sub-sampled in regular intervals.
 
 
 
-.. function:: gui_max_path_time(float)
+.. function:: track_speed_decay(float)
 
-	**default value:** 3
+	**default value:** 0.7
 
 
-	Length (in time) of the trails shown in GUI.
+	The amount the expected speed is reduced over time when an individual is lost. When individuals collide, depending on the expected behavior for the given species, one should choose different values for this variable. If the individuals usually stop when they collide, this should be set to a value > 0.8. If the individuals are expected to move over one another, the value should be set to a small value > 0.
 
 
 
-.. function:: gui_outline_thickness(ulong)
+.. function:: blob_split_max_shrink(float)
 
-	**default value:** 1
+	**default value:** 0.2
 
 
-	The thickness of outline / midlines in the GUI.
+	The minimum percentage of the starting blob size (after thresholding), that a blob is allowed to be reduced to during splitting. If this value is set too low, the program might start recognizing parts of individual as other individual too quickly.
 
 
 
-.. function:: gui_playback_speed(float)
+.. function:: terminate_training(bool)
 
-	**default value:** 1
+	**default value:** false
 
 
-	Playback speed when pressing SPACE.
+	Setting this to true aborts the training in progress.
 
 
 
-.. function:: gui_recording_format(gui_recording_format_t)
+.. function:: gui_draw_only_filtered_out(bool)
 
-	**default value:** avi
+	**default value:** false
 
-	**possible values:**
-		- `avi`: AVI / video format (codec FFV1 is used in unix systems)
-		- `jpg`: individual images in JPEG format
-		- `png`: individual images in PNG format
 
-	Sets the format for recording mode (when R is pressed in the GUI). Supported formats are 'avi', 'jpg' and 'png'. JPEGs have 75%% compression, AVI is using MJPEG compression.
+	Only show filtered out blob texts.
 
 
 
+.. function:: auto_number_individuals(bool)
 
-.. function:: gui_show_blobs(bool)
+	**default value:** false
 
-	**default value:** true
 
+	Program will automatically try to find the number of individuals (with sizes given in `blob_size_ranges`) and set `track_max_individuals` to that value.
 
-	Showing or hiding individual raw blobs in tracking view (are always shown in RAW mode).
+	.. seealso:: :func:`blob_size_ranges`, :func:`track_max_individuals`, 
 
 
+.. function:: meta_real_width(float)
 
-.. function:: gui_show_boundary_crossings(bool)
+	**default value:** 0
 
-	**default value:** true
 
+	The actual width of what is seen on a video image in centimeters. So for example if the image is cropped exactly to the tank, it would be the width of the tank. Used to convert from pixels to centimeters.
 
-	If set to true (and the number of individuals is set to a number > 0), the tracker will show whenever an individual enters the recognition boundary. Indicated by an expanding cyan circle around it.
 
 
+.. function:: visual_field_eye_offset(float)
 
-.. function:: gui_show_dataset(bool)
+	**default value:** 0.15
 
-	**default value:** false
 
+	A percentage telling the program how much the eye positions are offset from the start of the midline.
 
-	Show/hide detailed dataset information on-screen.
 
 
+.. function:: track_end_segment_for_speed(bool)
 
-.. function:: gui_show_detailed_probabilities(bool)
+	**default value:** true
 
-	**default value:** false
 
+	Sometimes individuals might be assigned to blobs that are far away from the previous position. This could indicate wrong assignments, but not necessarily. If this variable is set to true, consecutive frame segments will end whenever high speeds are reached, just to be on the safe side. For scenarios with lots of individuals (and no recognition) this might spam yellow bars in the timeline and may be disabled.
 
-	Show/hide detailed probability stats when an individual is selected.
 
 
+.. function:: recognition_coeff(ulong)
 
-.. function:: gui_show_export_options(bool)
+	**default value:** 50
 
-	**default value:** false
 
+	
 
-	Show/hide the export options widget.
 
 
+.. function:: recognition_shapes(array<array<vec>>)
 
-.. function:: gui_show_fish(pair<int64,int>)
+	**default value:** []
 
-	**default value:** [-1,-1]
 
+	If `recognition_border` is set to 'shapes', then the identification network will only be applied to blobs within the convex shapes specified here.
 
-	Show debug output for {blob_id, fish_id}.
+	.. seealso:: :func:`recognition_border`, 
 
 
+.. function:: manual_splits(map<int,set<int64>>)
 
-.. function:: gui_show_graph(bool)
+	**default value:** {}
 
-	**default value:** false
 
+	This map contains {frame: [blobid1,blobid2,...]} where frame and blobid are integers. When this is read during tracking for a frame, the tracker will attempt to force-split the given blob ids.
 
-	Show/hide the data time-series graph.
 
 
+.. function:: fishdata_dir(path)
 
-.. function:: gui_show_heatmap(bool)
+	**default value:** "data"
 
-	**default value:** false
 
+	Subfolder (below `output_dir`) where the exported NPZ or CSV files will be saved (see `output_graphs`).
 
-	Showing a heatmap per identity, normalized by maximum samples per grid-cell.
+	.. seealso:: :func:`output_dir`, :func:`output_graphs`, 
 
 
+.. function:: manual_matches(map<int,map<int,int64>>)
 
-.. function:: gui_show_histograms(bool)
+	**default value:** {}
 
-	**default value:** false
 
+	A map of manually defined matches (also updated by GUI menu for assigning manual identities). {{frame: {fish0: blob2, fish1: blob0}}, ...}
 
-	Equivalent to the checkbox visible in GUI on the bottom-left.
 
 
+.. function:: gui_run(bool)
 
-.. function:: gui_show_inactive_individuals(bool)
+	**default value:** false
 
-	**default value:** true
 
+	When set to true, the GUI starts playing back the video and stops once it reaches the end, or is set to false.
 
-	Show/hide individuals that have not been seen for longer than `track_max_reassign_time`.
 
-	.. seealso:: :func:`track_max_reassign_time`, 
 
+.. function:: output_normalize_midline_data(bool)
 
-.. function:: gui_show_manual_matches(bool)
+	**default value:** false
 
-	**default value:** true
 
+	If enabled: save a normalized version of the midline data saved whenever `output_posture_data` is set to true. Normalized means that the position of the midline points is normalized across frames (or the distance between head and point n in the midline array).
 
-	Show/hide manual matches in path.
-
+	.. seealso:: :func:`output_posture_data`, 
 
 
-.. function:: gui_show_memory_stats(bool)
+.. function:: video_size(size)
 
-	**default value:** false
+	**default value:** [-1,-1]
 
 
-	Showing or hiding memory statistics.
+	The dimensions of the currently loaded video.
 
 
 
-.. function:: gui_show_midline(bool)
+.. function:: version(string)
 
-	**default value:** true
+	**default value:** "RC6"
 
 
-	Showing or hiding individual midlines in tracking view.
+	Current application version.
 
 
 
-.. function:: gui_show_midline_histogram(bool)
+.. function:: midline_start_with_head(bool)
 
 	**default value:** false
 
 
-	Displays a histogram for midline lengths.
+	If enabled, the midline is going to be estimated starting at the head instead of the tail.
 
 
 
-.. function:: gui_show_number_individuals(bool)
+.. function:: grid_points_scaling(float)
 
-	**default value:** false
+	**default value:** 0.8
 
 
-	Show/hide the #individuals time-series graph.
+	Scaling applied to the average distance between the points in order to shrink or increase the size of the circles for recognition (see `grid_points`).
 
+	.. seealso:: :func:`grid_points`, 
 
 
-.. function:: gui_show_outline(bool)
+.. function:: outline_resample(float)
 
-	**default value:** true
+	**default value:** 1
 
 
-	Showing or hiding individual outlines in tracking view.
+	Spacing between outline points in pixels, after resampling (normalizing) the outline. A lower value here can drastically increase the number of outline points generated (and decrease speed).
 
 
 
-.. function:: gui_show_paths(bool)
+.. function:: posture_closing_steps(uchar)
 
-	**default value:** true
+	**default value:** 0
 
 
-	Equivalent to the checkbox visible in GUI on the bottom-left.
+	When enabled (> 0), posture will be processed using a combination of erode / dilate in order to close holes in the shape and get rid of extremities. An increased number of steps will shrink the shape, but will also be more time intensive.
 
 
 
-.. function:: gui_show_pixel_grid(bool)
+.. function:: gui_show_detailed_probabilities(bool)
 
 	**default value:** false
 
 
-	Shows the proximity grid generated for all blobs, which is used for history splitting.
+	Show/hide detailed probability stats when an individual is selected.
 
 
 
-.. function:: gui_show_posture(bool)
+.. function:: posture_head_percentage(float)
 
-	**default value:** true
+	**default value:** 0.1
 
 
-	Show/hide the posture window on the top-right.
+	The percentage of the midline-length that the head is moved away from the front of the body.
 
 
 
-.. function:: gui_show_probabilities(bool)
+.. function:: output_image_per_tracklet(bool)
 
 	**default value:** false
 
 
-	Show/hide probability visualisation when an individual is selected.
+	If set to true, the program will output one median image per tracklet (time-series segment) and save it alongside the npz/csv files.
 
 
 
-.. function:: gui_show_recognition_bounds(bool)
+.. function:: gui_show_texts(bool)
 
 	**default value:** true
 
 
-	Shows what is contained within tht recognition boundary as a cyan background. (See `recognition_border` for details.)
+	Showing or hiding individual identity (and related) texts in tracking view.
 
-	.. seealso:: :func:`recognition_border`, 
 
 
-.. function:: gui_show_recognition_summary(bool)
+.. function:: postures_per_thread(float)
 
-	**default value:** false
+	**default value:** 1
 
 
-	Show/hide confusion matrix (if network is loaded).
+	Number of individuals for which postures will be estimated per thread.
 
 
 
-.. function:: gui_show_selections(bool)
+.. function:: outline_curvature_range_ratio(float)
 
-	**default value:** true
+	**default value:** 0.03
 
 
-	Show/hide circles around selected individual.
+	Determines the ratio between number of outline points and distance used to calculate its curvature. Program will look at index +- `ratio * size()` and calculate the distance between these points (see posture window red/green color).
 
+	.. seealso:: :func:`ratio * size()`, 
 
 
-.. function:: gui_show_shadows(bool)
+.. function:: event_min_peak_offset(float)
 
-	**default value:** true
+	**default value:** 0.15
 
 
-	Showing or hiding individual shadows in tracking view.
+	
 
 
 
-.. function:: gui_show_texts(bool)
+.. function:: output_dir(path)
 
-	**default value:** true
+	**default value:** "/Users/tristan/Videos"
 
 
-	Showing or hiding individual identity (and related) texts in tracking view.
+	Default output-/input-directory. Change this in order to omit paths in front of filenames for open and save.
 
 
 
-.. function:: gui_show_uniqueness(bool)
+.. function:: outline_approximate(uchar)
 
-	**default value:** false
+	**default value:** 3
 
 
-	Show/hide uniqueness overview after training.
+	If this is a number > 0, the outline detected from the image will be passed through an elliptical fourier transform with `outline_approximate` number of coefficients. When the given number is sufficiently low, the outline will be smoothed significantly (and more so for lower numbers of coefficients).
 
 
 
-.. function:: gui_show_visualfield(bool)
+.. function:: manual_identities(set<int>)
 
-	**default value:** false
+	**default value:** []
 
 
-	Show/hide the visual field rays.
+	
 
 
 
-.. function:: gui_show_visualfield_ts(bool)
+.. function:: gui_heatmap_value_range(range<double>)
 
-	**default value:** false
+	**default value:** [-1,-1]
 
 
-	Show/hide the visual field time series.
+	Give a custom value range that is used to normalize heatmap cell values.
 
 
 
-.. function:: gui_single_identity_color(color)
+.. function:: posture_closing_size(uchar)
 
-	**default value:** [0,0,0,0]
+	**default value:** 2
 
 
-	If set to something else than transparent, all individuals will be displayed with this color.
+	The kernel size for erosion / dilation of the posture algorithm. Only has an effect with  `posture_closing_steps` > 0.
 
+	.. seealso:: :func:`posture_closing_steps`, 
 
 
-.. function:: gui_transparent_background(bool)
+.. function:: python_path(path)
 
-	**default value:** false
+	**default value:** "/Users/tristan/opt/anaconda3/envs/only_ffmpeg/bin/python3"
 
 
-	If enabled, fonts might look weird but you can record movies (and images) with transparent background (if gui_background_color.alpha is < 255).
+	Path to the python home folder. If left empty, the user is required to make sure that all necessary libraries are in-scope the PATH environment variable.
 
 
 
-.. function:: gui_zoom_limit(size)
+.. function:: individual_prefix(string)
 
-	**default value:** [300,300]
+	**default value:** "fish"
 
 
-	
+	The prefix that is added to all the files containing certain IDs. So individual 0 will turn into '[prefix]0' for all the npz files and within the program.
 
 
 
-.. function:: history_matching_log(path)
+.. function:: gui_show_paths(bool)
 
-	**default value:** ""
+	**default value:** true
 
 
-	If this is set to a valid html file path, a detailed matching history log will be written to the given file for each frame.
+	Equivalent to the checkbox visible in GUI on the bottom-left.
 
 
 
-.. function:: httpd_port(int)
+.. function:: gui_zoom_limit(size)
 
-	**default value:** 8080
+	**default value:** [300,300]
 
 
-	This is where the webserver tries to establish a socket. If it fails, this will be set to the port that was chosen.
+	
 
 
 
-.. function:: huge_timestamp_ends_segment(bool)
+.. function:: output_posture_data(bool)
 
-	**default value:** true
+	**default value:** false
 
 
-	
+	Save posture data npz file along with the usual NPZ/CSV files containing positions and such. If set to true, a file called '`output_dir`/`fish_data_dir`/`filename`\_posture_fishXXX.npz' will be created for each individual XXX.
 
+	.. seealso:: :func:`output_dir`, :func:`fish_data_dir`, :func:`filename`, 
 
 
-.. function:: huge_timestamp_seconds(double)
+.. function:: gui_fish_color(string)
 
-	**default value:** 0.2
+	**default value:** "identity"
 
 
-	Defaults to 0.5s (500ms), can be set to any value that should be recognized as being huge.
+	
 
 
 
-.. function:: image_invert(bool)
+.. function:: gui_show_posture(bool)
 
-	**default value:** false
+	**default value:** true
 
 
-	Inverts the image greyscale values before thresholding.
+	Show/hide the posture window on the top-right.
 
 
 
-.. function:: individual_names(map<int,string>)
+.. function:: gui_faded_brightness(uchar)
 
-	**default value:** {}
+	**default value:** 255
 
 
-	A map of {individual-id: "individual-name", ...} that names individuals in the GUI and exported data.
+	The alpha value of tracking-related elements when timeline is hidden (0-255).
 
 
 
-.. function:: individual_prefix(string)
+.. function:: gui_auto_scale(bool)
 
-	**default value:** "fish"
+	**default value:** false
 
 
-	The prefix that is added to all the files containing certain IDs. So individual 0 will turn into '[prefix]0' for all the npz files and within the program.
+	If set to true, the tracker will always try to zoom in on the whole group. This is useful for some individuals in a huge video (because if they are too tiny, you cant see them and their posture anymore).
 
 
 
-.. function:: individuals_per_thread(float)
+.. function:: gui_show_midline_histogram(bool)
 
-	**default value:** 1
+	**default value:** false
 
 
-	Number of individuals for which positions will be estimated per thread.
+	Displays a histogram for midline lengths.
 
 
 
-.. function:: limit(float)
+.. function:: gui_show_fish(pair<int64,int>)
 
-	**default value:** 0.09
+	**default value:** [-1,-1]
 
 
-	Limit for tailbeat event detection.
+	Show debug output for {blob_id, fish_id}.
 
 
 
-.. function:: log_file(path)
+.. function:: frame_rate(int)
 
-	**default value:** ""
+	**default value:** 0
 
 
-	Set this to a path you want to save the log file to.
+	Specifies the frame rate of the video. It is used e.g. for playback speed and certain parts of the matching algorithm. Will be set by the .settings of a video (or by the video itself).
 
 
 
-.. function:: manual_identities(set<int>)
+.. function:: gui_show_heatmap(bool)
 
-	**default value:** []
+	**default value:** false
 
 
-	
+	Showing a heatmap per identity, normalized by maximum samples per grid-cell.
 
 
 
-.. function:: manual_matches(map<int,map<int,int64>>)
+.. function:: gui_show_boundary_crossings(bool)
 
-	**default value:** {}
+	**default value:** true
 
 
-	A map of manually defined matches (also updated by GUI menu for assigning manual identities). {{frame: {fish0: blob2, fish1: blob0}}, ...}
+	If set to true (and the number of individuals is set to a number > 0), the tracker will show whenever an individual enters the recognition boundary. Indicated by an expanding cyan circle around it.
 
 
 
-.. function:: manual_splits(map<int,set<int64>>)
+.. function:: midline_invert(bool)
 
-	**default value:** {}
+	**default value:** false
 
 
-	This map contains {frame: [blobid1,blobid2,...]} where frame and blobid are integers. When this is read during tracking for a frame, the tracker will attempt to force-split the given blob ids.
+	If enabled, all midlines will be inverted (tail/head swapped).
 
 
 
-.. function:: manually_approved(map<int,int>)
+.. function:: individual_names(map<int,string>)
 
 	**default value:** {}
 
 
-	A list of ranges of manually approved frames that may be used for generating training datasets {232:232,5555:5560}.
-
+	A map of {individual-id: "individual-name", ...} that names individuals in the GUI and exported data.
 
 
-.. function:: match_mode(matching_mode_t)
 
-	**default value:** accurate
+.. function:: gui_interface_scale(float)
 
-	**possible values:**
-		- `accurate`: Maximizes the probability sum by assigning (or potentially not assigning) individuals to objects in the frame. This returns the correct solution, but might take long for high quantities of individuals.
-		- `approximate`: Simply assigns the highest probability edges (blob to individual) to all individuals - first come, first serve. Parameters have to be set very strictly (especially speed) in order to have as few objects to choose from as possible and limit the error.
-		- `hungarian`: The hungarian algorithm (as implemented in O(n^3) by Mattias Andrée `https://github.com/maandree/hungarian-algorithm-n3`).
-		- `benchmark`: Runs all algorithms and pits them against each other, outputting statistics every few frames.
+	**default value:** 0.75
 
-	Changes the default algorithm to be used for matching blobs in one frame to blobs in the next frame. The accurate algorithm performs best, but also scales less well for more individuals than the approximate one. However, if it is too slow (temporarily) in a few frames, the program falls back to using the approximate one that doesnt slow down.
 
+	Scales the whole interface. A value greater than 1 will make it smaller.
 
 
 
-.. function:: matching_probability_threshold(float)
+.. function:: pixel_grid_cells(ulong)
 
-	**default value:** 0.1
+	**default value:** 25
 
 
-	The probability below which a possible connection between blob and identity is considered too low. The probability depends largely upon settings like `track_max_speed`.
+	
 
-	.. seealso:: :func:`track_max_speed`, 
 
 
-.. function:: meta_mass_mg(float)
+.. function:: analysis_paused(bool)
 
-	**default value:** 200
+	**default value:** false
 
 
-	Used for exporting event-energy levels.
+	Halts the analysis.
 
 
 
-.. function:: meta_real_width(float)
+.. function:: use_differences(bool)
 
-	**default value:** 0
+	**default value:** false
 
 
-	The actual width of what is seen on a video image in centimeters. So for example if the image is cropped exactly to the tank, it would be the width of the tank. Used to convert from pixels to centimeters.
+	This should be set to false unless when using really old files.
 
 
 
-.. function:: meta_source_path(path)
+.. function:: gui_show_pixel_grid(bool)
 
-	**default value:** ""
+	**default value:** false
 
 
-	Path of the original video file for conversions (saved as debug info).
+	Shows the proximity grid generated for all blobs, which is used for history splitting.
 
 
 
-.. function:: midline_invert(bool)
+.. function:: gpu_enable_accumulation(bool)
 
-	**default value:** false
+	**default value:** true
 
 
-	If enabled, all midlines will be inverted (tail/head swapped).
+	Enables or disables the idtrackerai-esque accumulation protocol cascade. It is usually a good thing to enable this (especially in more complicated videos), but can be disabled as a fallback (e.g. if computation time is a major constraint).
 
 
 
-.. function:: midline_resolution(int)
+.. function:: gui_show_selections(bool)
 
-	**default value:** 12
+	**default value:** true
 
 
-	Number of midline points that are saved. Higher number increases detail.
+	Show/hide circles around selected individual.
 
 
 
-.. function:: midline_samples(uint64)
+.. function:: gui_show_graph(bool)
 
-	**default value:** 0
+	**default value:** false
 
 
-	The maximum number of samples taken for generating a ``median midline length``. Setting this to 0 removes the limit all together. A limit may be set for very long videos, or videos with lots of individual, for memory reasons.
+	Show/hide the data time-series graph.
 
-	.. seealso:: :func:``, :func:``, 
 
 
-.. function:: midline_start_with_head(bool)
+.. function:: gui_show_probabilities(bool)
 
 	**default value:** false
 
 
-	If enabled, the midline is going to be estimated starting at the head instead of the tail.
+	Show/hide probability visualisation when an individual is selected.
 
 
 
-.. function:: midline_stiff_percentage(float)
+.. function:: gui_show_recognition_summary(bool)
 
-	**default value:** 0.15
+	**default value:** false
 
 
-	Percentage of the midline that can be assumed to be stiff. If the head position seems poorly approximated (straighened out too much), then decrease this value.
+	Show/hide confusion matrix (if network is loaded).
 
 
 
-.. function:: midline_walk_offset(float)
+.. function:: enable_absolute_difference(bool)
 
-	**default value:** 0.025
+	**default value:** true
 
 
-	This percentage of the number of outline points is the amount of points that the midline-algorithm is allowed to move left and right upon each step. Higher numbers will make midlines more straight, especially when extremities are present (that need to be skipped over), but higher numbers will also potentially decrease accuracy for less detailed objects.
+	If set to true, the threshold values will be applied to abs(image - background). Otherwise max(0, image - background).
 
 
 
-.. function:: nowindow(bool)
+.. function:: gui_show_visualfield_ts(bool)
 
 	**default value:** false
 
 
-	If set to true, no GUI will be created on startup (e.g. when starting from SSH).
-
-
-
-.. function:: outline_approximate(uchar)
-
-	**default value:** 3
-
-
-	If this is a number > 0, the outline detected from the image will be passed through an elliptical fourier transform with `outline_approximate` number of coefficients. When the given number is sufficiently low, the outline will be smoothed significantly (and more so for lower numbers of coefficients).
+	Show/hide the visual field time series.
 
 
 
-.. function:: outline_curvature_range_ratio(float)
+.. function:: gui_show_uniqueness(bool)
 
-	**default value:** 0.03
+	**default value:** false
 
 
-	Determines the ratio between number of outline points and distance used to calculate its curvature. Program will look at index +- `ratio * size()` and calculate the distance between these points (see posture window red/green color).
+	Show/hide uniqueness overview after training.
 
-	.. seealso:: :func:`ratio * size()`, 
 
 
-.. function:: outline_resample(float)
+.. function:: web_time_threshold(float)
 
-	**default value:** 1
+	**default value:** 0.05
 
 
-	Spacing between outline points in pixels, after resampling (normalizing) the outline. A lower value here can drastically increase the number of outline points generated (and decrease speed).
+	Maximum refresh rate in seconds for the web interface.
 
 
 
-.. function:: outline_smooth_samples(uchar)
+.. function:: midline_walk_offset(float)
 
-	**default value:** 4
+	**default value:** 0.025
 
 
-	Use N samples for smoothing the outline. More samples will generate a smoother (less detailed) outline.
+	This percentage of the number of outline points is the amount of points that the midline-algorithm is allowed to move left and right upon each step. Higher numbers will make midlines more straight, especially when extremities are present (that need to be skipped over), but higher numbers will also potentially decrease accuracy for less detailed objects.
 
 
 
-.. function:: outline_smooth_step(uchar)
+.. function:: gui_show_histograms(bool)
 
-	**default value:** 1
+	**default value:** false
 
 
-	Jump over N outline points when smoothing (reducing accuracy).
+	Equivalent to the checkbox visible in GUI on the bottom-left.
 
 
 
@@ -1221,403 +1211,431 @@ TRex parameters
 
 
 
-.. function:: output_annotations(map<string,string>)
+.. function:: gui_show_inactive_individuals(bool)
 
-	**default value:** {"ACCELERATION":"cm/s2","ACCELERATION_SMOOTH":"cm/s2","BORDER_DISTANCE":"cm","NEIGHBOR_DISTANCE":"cm","ORIENTATION":"rad","SPEED":"cm/s","SPEED_OLD":"cm/s","SPEED_SMOOTH":"cm/s","VX":"cm/s","VY":"cm/s","X":"cm","Y":"cm","global":"px"}
+	**default value:** true
 
 
-	Units (as a string) of output functions to be annotated in various places like graphs.
+	Show/hide individuals that have not been seen for longer than `track_max_reassign_time`.
 
+	.. seealso:: :func:`track_max_reassign_time`, 
 
 
-.. function:: output_centered(bool)
+.. function:: output_statistics(bool)
 
-	**default value:** false
+	**default value:** true
 
 
-	If set to true, the origin of all X and Y coordinates is going to be set to the center of the video.
+	Save an NPZ file containing an array with shape Nx5 and contents [[adding_frame_seconds, combined_posture_seconds, track_max_individuals, loading_seconds, posture_seconds],...] and an 1D-array containing all frame numbers. If set to true, a file called '`output_dir`/`fish_data_dir`/`filename`\_statistics.npz' will be created. This will not output anything interesting, if the data was loaded instead of analysed.
 
+	.. seealso:: :func:`output_dir`, :func:`fish_data_dir`, :func:`filename`, 
 
 
-.. function:: output_default_options(map<string,array<string>>)
+.. function:: gui_show_shadows(bool)
 
-	**default value:** {"ACCELERATION":["/15","SMOOTH","CENTROID"],"ANGULAR_A":["/1000","SMOOTH","CENTROID"],"ANGULAR_V":["/10","SMOOTH","CENTROID"],"DOT_V":["/10"],"L_V":["/10"],"NEIGHBOR_DISTANCE":["/10"],"NEIGHBOR_VECTOR_T":["/1"],"SPEED":["/10","SMOOTH"],"X":["/100"],"Y":["/100"],"amplitude":["/100"],"event_acceleration":["/10"],"global":["/10"],"midline_length":["/15"],"outline_size":["/100"],"tailbeat_peak":["pm"],"tailbeat_threshold":["pm"],"threshold_reached":["POINTS"],"v_direction":["/10"]}
+	**default value:** true
 
 
-	Default scaling and smoothing options for output functions, which are applied to functions in `output_graphs` during export.
+	Showing or hiding individual shadows in tracking view.
 
-	.. seealso:: :func:`output_graphs`, 
 
 
-.. function:: output_dir(path)
+.. function:: gpu_min_elements(ulong)
 
-	**default value:** "/Users/tristan/Videos"
+	**default value:** 25000
 
 
-	Default output-/input-directory. Change this in order to omit paths in front of filenames for open and save.
+	Minimum number of images being collected, before sending them to the GPU.
 
 
 
-.. function:: output_frame_window(int)
+.. function:: gui_show_outline(bool)
 
-	**default value:** 100
+	**default value:** true
 
 
-	If an individual is selected during CSV output, use these number of frames around it (or -1 for all frames).
+	Showing or hiding individual outlines in tracking view.
 
 
 
-.. function:: output_graphs(array<pair<string,array<string>>>)
+.. function:: gpu_min_iterations(ulong)
 
-	**default value:** [["X",["RAW","WCENTROID"]],["Y",["RAW","WCENTROID"]],["X",["RAW","HEAD"]],["Y",["RAW","HEAD"]],["VX",["RAW","HEAD"]],["VY",["RAW","HEAD"]],["AX",["RAW","HEAD"]],["AY",["RAW","HEAD"]],["ANGLE",["RAW"]],["ANGULAR_V",["RAW"]],["ANGULAR_A",["RAW"]],["MIDLINE_OFFSET",["RAW"]],["normalized_midline",["RAW"]],["midline_length",["RAW"]],["midline_x",["RAW"]],["midline_y",["RAW"]],["segment_length",["RAW"]],["SPEED",["RAW","WCENTROID"]],["SPEED",["SMOOTH","WCENTROID"]],["SPEED",["RAW","PCENTROID"]],["SPEED",["RAW","HEAD"]],["BORDER_DISTANCE",["PCENTROID"]],["time",[]],["timestamp",[]],["frame",[]],["missing",[]],["num_pixels",[]],["ACCELERATION",["RAW","PCENTROID"]],["ACCELERATION",["RAW","WCENTROID"]]]
+	**default value:** 100
 
 
-	The functions that will be exported when saving to CSV, or shown in the graph. [['X',[option], ...]]
+	Minimum number of iterations per epoch for training a recognition network.
 
 
 
-.. function:: output_image_per_tracklet(bool)
+.. function:: gui_show_memory_stats(bool)
 
 	**default value:** false
 
 
-	If set to true, the program will output one median image per tracklet (time-series segment) and save it alongside the npz/csv files.
-
+	Showing or hiding memory statistics.
 
 
-.. function:: output_min_frames(ulong)
 
-	**default value:** 1
+.. function:: panic_button(int)
 
+	**default value:** 0
 
-	Filters all individual with less than N frames when exporting. Individuals with fewer than N frames will also be hidden in the GUI unless `gui_show_inactive_individuals` is enabled (default).
 
-	.. seealso:: :func:`gui_show_inactive_individuals`, 
+	42
 
 
-.. function:: output_normalize_midline_data(bool)
 
-	**default value:** false
+.. function:: gui_show_manual_matches(bool)
 
+	**default value:** true
 
-	If enabled: save a normalized version of the midline data saved whenever `output_posture_data` is set to true. Normalized means that the position of the midline points is normalized across frames (or the distance between head and point n in the midline array).
 
-	.. seealso:: :func:`output_posture_data`, 
+	Show/hide manual matches in path.
 
 
-.. function:: output_npz(bool)
 
-	**default value:** true
+.. function:: gui_mode(mode_t)
 
+	**default value:** tracking
 
-	When pressing the S(ave) button or using auto_quit, this setting allows to switch between CSV and NPZ output. If set to true, all output will be NPZ files (recommended). If set to false, some output (`output_graphs`) will be CSV files, while others (posture data, etc.) will remain in NPZ format due to technical constraints.
 
-	.. seealso:: :func:`output_graphs`, 
+	The currently used display mode for the GUI.
 
 
-.. function:: output_posture_data(bool)
 
-	**default value:** false
+.. function:: video_info(string)
 
+	**default value:** ""
 
-	Save posture data npz file along with the usual NPZ/CSV files containing positions and such. If set to true, a file called '`output_dir`/`fish_data_dir`/`filename`\_posture_fishXXX.npz' will be created for each individual XXX.
 
-	.. seealso:: :func:`output_dir`, :func:`fish_data_dir`, :func:`filename`, 
+	Information on the current video as provided by PV.
 
 
-.. function:: output_prefix(string)
 
-	**default value:** ""
+.. function:: gui_connectivity_matrix(map<int,array<float>>)
 
+	**default value:** {}
 
-	A prefix that is prepended to all output files (csv/npz).
 
+	Internally used to store the connectivity matrix.
 
 
-.. function:: output_recognition_data(bool)
 
-	**default value:** false
+.. function:: gui_single_identity_color(color)
 
+	**default value:** [0,0,0,0]
 
-	Save recognition / probability data npz file along with the usual NPZ/CSV files containing positions and such. If set to true, a file called '`output_dir`/`fish_data_dir`/`filename`\_recognition_fishXXX.npz' will be created for each individual XXX.
 
-	.. seealso:: :func:`output_dir`, :func:`fish_data_dir`, :func:`filename`, 
+	If set to something else than transparent, all individuals will be displayed with this color.
 
 
-.. function:: output_statistics(bool)
 
-	**default value:** true
+.. function:: gui_heatmap_source(string)
 
+	**default value:** ""
 
-	Save an NPZ file containing an array with shape Nx5 and contents [[adding_frame_seconds, combined_posture_seconds, track_max_individuals, loading_seconds, posture_seconds],...] and an 1D-array containing all frame numbers. If set to true, a file called '`output_dir`/`fish_data_dir`/`filename`\_statistics.npz' will be created. This will not output anything interesting, if the data was loaded instead of analysed.
 
-	.. seealso:: :func:`output_dir`, :func:`fish_data_dir`, :func:`filename`, 
+	If empty, the source will simply be an individuals identity. Otherwise, information from export data sources will be used.
 
 
-.. function:: peak_mode(peak_mode_t)
 
-	**default value:** pointy
+.. function:: gui_heatmap_dynamic(bool)
 
-	**possible values:**
-		- `pointy`: The head is broader than the tail.
-		- `broad`: The tail is broader than the head.
+	**default value:** false
 
-	This determines whether the tail of an individual should be expected to be pointy or broad.
 
+	If enabled the heatmap will only show frames before the frame currently displayed in the graphical user interface.
 
 
 
-.. function:: pixel_grid_cells(ulong)
+.. function:: error_terminate(bool)
 
-	**default value:** 25
+	**default value:** false
 
 
 	
 
 
 
-.. function:: posture_closing_size(uchar)
+.. function:: exec(path)
 
-	**default value:** 2
+	**default value:** ""
 
 
-	The kernel size for erosion / dilation of the posture algorithm. Only has an effect with  `posture_closing_steps` > 0.
+	This can be set to the path of an additional settings file that is executed after the normal settings file.
 
-	.. seealso:: :func:`posture_closing_steps`, 
 
 
-.. function:: posture_closing_steps(uchar)
+.. function:: gui_heatmap_frames(uint)
 
 	**default value:** 0
 
 
-	When enabled (> 0), posture will be processed using a combination of erode / dilate in order to close holes in the shape and get rid of extremities. An increased number of steps will shrink the shape, but will also be more time intensive.
-
+	If `gui_heatmap_dynamic` is enabled, this variable determines the range of frames that are considered. If set to 0, all frames up to the current frame are considered. Otherwise, this number determines the number of frames previous to the current frame that are considered.
 
+	.. seealso:: :func:`gui_heatmap_dynamic`, 
 
-.. function:: posture_direction_smoothing(ulong)
 
-	**default value:** 0
+.. function:: recognition_border_size_rescale(float)
 
+	**default value:** 0.5
 
-	Enables or disables smoothing of the posture orientation based on previous frames (not good for fast turns).
 
+	The amount that blob sizes for calculating the heatmap are allowed to go below or above blob_size_ranges (e.g. 0.5 means that the sizes can range between blob_size_ranges.min * (1 - 0.5) and blob_size_ranges.max * (1 + 0.5)).
 
 
-.. function:: posture_head_percentage(float)
 
-	**default value:** 0.1
+.. function:: gui_heatmap_normalization(heatmap_normalization_t)
 
+	**default value:** cell
 
-	The percentage of the midline-length that the head is moved away from the front of the body.
+	**possible values:**
+		- `none`: No normalization at all. Values will only be averaged per cell.
+		- `value`: Normalization based in value-space. The average of each cell will be divided by the maximum value encountered.
+		- `cell`: The cell sum will be divided by the maximum cell value encountered.
 
+	Normalization used for the heatmaps. If `value` is selected, then the maximum of all values encountered will be used to normalize the average of each cell. If `cell` is selected, the sum of each cell will be divided by the maximum cell value encountered.
 
 
-.. function:: postures_per_thread(float)
+	.. seealso:: :func:`value`, :func:`cell`, 
 
-	**default value:** 1
 
+.. function:: midline_stiff_percentage(float)
 
-	Number of individuals for which postures will be estimated per thread.
+	**default value:** 0.15
 
 
+	Percentage of the midline that can be assumed to be stiff. If the head position seems poorly approximated (straighened out too much), then decrease this value.
 
-.. function:: python_path(path)
 
-	**default value:** "/usr/local/bin/python3"
 
+.. function:: visual_field_eye_separation(float)
 
-	Path to the python home folder. If left empty, the user is required to make sure that all necessary libraries are in-scope the PATH environment variable.
+	**default value:** 60
 
 
+	Degrees of separation between the eye and looking straight ahead. Results in the eye looking towards head.angle +- `visual_field_eye_separation`.
 
-.. function:: recognition_border(recognition_border_t)
 
-	**default value:** none
 
-	**possible values:**
-		- `none`: No border at all. All points are inside the recognition boundary. (default)
-		- `heatmap`: Looks at a subset of frames from the video, trying to find out where individuals go and masking all the places they do not.
-		- `outline`: Similar to heatmap, but tries to build a convex border around the around (without holes in it).
-		- `shapes`: Any array of convex shapes. Set coordinates by changing `recognition_shapes`.
-		- `grid`: The points defined in `grid_points` are turned into N different circles inside the arena (with points in `grid_points` being the circle centers), which define in/out if inside/outside any of the circles.
-		- `circle`: The video-file provides a binary mask (e.g. when `cam_circle_mask` was set to true during recording), which is then used to determine in/out.
+.. function:: limit(float)
 
-	This defines the type of border that is used in all automatic recognition routines. Depending on the type set here, you might need to set other parameters as well (e.g. `recognition_shapes`). In general, this defines whether an image of an individual is usable for automatic recognition. If it is inside the defined border, then it will be passed on to the recognition network - if not, then it wont.
+	**default value:** 0.09
 
 
-	.. seealso:: :func:`recognition_shapes`, 
+	Limit for tailbeat event detection.
 
 
-.. function:: recognition_border_shrink_percent(float)
 
-	**default value:** 0.3
+.. function:: gui_equalize_blob_histograms(bool)
 
+	**default value:** true
 
-	The amount by which the recognition border is shrunk after generating it (roughly and depends on the method).
 
+	Equalize histograms of blobs wihtin videos (makes them more visible).
 
 
-.. function:: recognition_border_size_rescale(float)
 
-	**default value:** 0.5
+.. function:: recognition_save_progress_images(bool)
 
+	**default value:** false
 
-	The amount that blob sizes for calculating the heatmap are allowed to go below or above blob_size_ranges (e.g. 0.5 means that the sizes can range between blob_size_ranges.min * (1 - 0.5) and blob_size_ranges.max * (1 + 0.5)).
 
+	If set to true, an image will be saved for all training epochs, documenting the uniqueness in each step.
 
 
-.. function:: recognition_coeff(ulong)
 
-	**default value:** 50
+.. function:: gui_show_export_options(bool)
 
+	**default value:** false
 
-	
 
+	Show/hide the export options widget.
 
 
-.. function:: recognition_enable(bool)
 
-	**default value:** true
+.. function:: gui_max_path_time(float)
 
+	**default value:** 3
 
-	This enables internal training. Requires Python3 and Keras to be available.
 
+	Length (in time) of the trails shown in GUI.
 
 
-.. function:: recognition_image_scale(float)
 
-	**default value:** 1
+.. function:: gui_show_blobs(bool)
 
+	**default value:** true
 
-	Scaling applied to the images before passing them to the network.
 
+	Showing or hiding individual raw blobs in tracking view (are always shown in RAW mode).
 
 
-.. function:: recognition_image_size(size)
 
-	**default value:** [80,80]
+.. function:: settings_file(path)
 
+	**default value:** ""
 
-	Size of each image generated for network training.
 
+	Name of the settings file. By default, this will be set to `filename`.settings in the same folder as `filename`.
 
+	.. seealso:: :func:`filename`, :func:`filename`, 
 
-.. function:: recognition_normalization(recognition_normalization_t)
 
-	**default value:** posture
+.. function:: filename(path)
 
-	**possible values:**
-		- `none`: No normalization. Images will only be cropped out and used as-is.
-		- `moments`: Images will be cropped out and aligned as in idtracker.ai using the main axis calculated using `image moments`.
-		- `posture`: Images will be cropped out and rotated so that the head will be fixed in one position and only the tail moves.
-		- `legacy`: Images will be aligned parallel to the x axis.
+	**default value:** ""
 
-	This enables or disable normalizing the images before training. If set to `none`, the images will be sent to the GPU raw - they will only be cropped out. Otherwise they will be normalized based on head orientation (posture) or the main axis calculated using `image moments`.
 
+	Opened filename (without .pv).
 
-	.. seealso:: :func:`none`, :func:`image moments`, 
 
 
-.. function:: recognition_save_progress_images(bool)
+.. function:: terminate(bool)
 
 	**default value:** false
 
 
-	If set to true, an image will be saved for all training epochs, documenting the uniqueness in each step.
+	If set to true, the application terminates.
 
 
 
-.. function:: recognition_save_training_images(bool)
+.. function:: huge_timestamp_seconds(double)
 
-	**default value:** false
+	**default value:** 0.2
 
 
-	If set to true, the program will save the images used for a successful training of the recognition network to the output path.
+	Defaults to 0.5s (500ms), can be set to any value that should be recognized as being huge.
 
 
 
-.. function:: recognition_shapes(array<array<vec>>)
+.. function:: auto_no_tracking_data(bool)
 
-	**default value:** []
+	**default value:** false
 
 
-	If `recognition_border` is set to 'shapes', then the identification network will only be applied to blobs within the convex shapes specified here.
+	If set to true, the auto_quit option will NOT save any `output_graphs` tracking data - just the posture data (if enabled) and the results file (if not disabled). This saves time and space if that is a need.
 
-	.. seealso:: :func:`recognition_border`, 
+	.. seealso:: :func:`output_graphs`, 
 
 
-.. function:: recognition_smooth_amount(ulong)
+.. function:: blob_size_ranges(BlobSizeRange)
 
-	**default value:** 200
+	**default value:** [[0.1,3]]
 
 
-	
+	Blobs below the lower bound are recognized as noise instead of individuals. Blobs bigger than the upper bound are considered to potentially contain more than one individual. The unit is #pixels * (`meta_real_width` / video_width).
 
+	.. seealso:: :func:`meta_real_width`, 
 
 
-.. function:: settings_file(path)
+.. function:: history_matching_log(path)
 
 	**default value:** ""
 
 
-	Name of the settings file. By default, this will be set to `filename`.settings in the same folder as `filename`.
+	If this is set to a valid html file path, a detailed matching history log will be written to the given file for each frame.
 
-	.. seealso:: :func:`filename`, :func:`filename`, 
 
 
-.. function:: smooth_window(int)
+.. function:: meta_source_path(path)
 
-	**default value:** 2
+	**default value:** ""
 
 
-	Smoothing window used for exported data with the #smooth tag.
+	Path of the original video file for conversions (saved as debug info).
 
 
 
-.. function:: speed_extrapolation(float)
+.. function:: httpd_port(int)
 
-	**default value:** 3
+	**default value:** 8080
 
 
-	Used for matching when estimating the next position of an individual. Smaller values are appropriate for lower frame rates. The higher this value is, the more previous frames will have significant weight in estimating the next position (with an exponential decay).
+	This is where the webserver tries to establish a socket. If it fails, this will be set to the port that was chosen.
 
 
 
-.. function:: tags_path(path)
+.. function:: track_max_individuals(int)
 
-	**default value:** ""
+	**default value:** 0
 
 
-	If this path is set, the program will try to find tags and save them at the specified location.
+	The maximal number of individual that are assigned at the same time (infinite if set to zero). If the given number is below the actual number of individual, then only a (random) subset of individual are assigned and a warning is shown.
 
 
 
-.. function:: terminate_training(bool)
+.. function:: auto_quit(bool)
 
 	**default value:** false
 
 
-	Setting this to true aborts the training in progress.
+	If set to true, the application will automatically save all results and export CSV files and quit, after the analysis is complete.
 
 
 
-.. function:: threshold_ratio_range(rangef)
+.. function:: debug(bool)
 
-	**default value:** [0.5,1]
+	**default value:** false
 
 
-	If `track_threshold_2` is not equal to zero, this ratio will be multiplied by the number of pixels present before the second threshold. If the resulting size falls within the given range, the blob is deemed okay.
+	Enables some verbose debug print-outs.
 
-	.. seealso:: :func:`track_threshold_2`, 
 
 
-.. function:: track_blacklist(array<array<vec>>)
+.. function:: blob_split_global_shrink_limit(float)
 
-	**default value:** []
+	**default value:** 0.2
 
 
-	If this is not empty, objects within the given rectangles or polygons (>= 3 points) [[x0,y0],[x1,y1](, ...)], ...] will be ignored during tracking.
+	The minimum percentage of the minimum in `blob_size_ranges`, that a blob is allowed to be reduced to during splitting. If this value is set too low, the program might start recognizing parts of individual as other individual too quickly.
+
+	.. seealso:: :func:`blob_size_ranges`, 
+
+
+.. function:: blobs_per_thread(float)
+
+	**default value:** 150
+
+
+	Number of blobs for which properties will be calculated per thread.
+
+
+
+.. function:: gui_heatmap_smooth(double)
+
+	**default value:** 0
+
+
+	Value between 0 and 1, think of as `gui_heatmap_smooth` times video width, indicating the maximum upscaled size of the heatmaps shown in the tracker. Makes them prettier.
+
+
+
+.. function:: auto_minmax_size(bool)
+
+	**default value:** false
+
+
+	Program will try to find minimum / maximum size of the individuals automatically for the current `cm_per_pixel` setting. Can only be passed as an argument upon startup. The calculation is based on the median blob size in the video and assumes a relatively low level of noise.
+
+	.. seealso:: :func:`cm_per_pixel`, 
+
+
+.. function:: meta_mass_mg(float)
+
+	**default value:** 200
+
+
+	Used for exporting event-energy levels.
+
+
+
+.. function:: nowindow(bool)
+
+	**default value:** false
+
+
+	If set to true, no GUI will be created on startup (e.g. when starting from SSH).
 
 
 
@@ -1630,161 +1648,170 @@ TRex parameters
 
 
 
-.. function:: track_end_segment_for_speed(bool)
+.. function:: outline_smooth_samples(uchar)
 
-	**default value:** true
+	**default value:** 4
 
 
-	Sometimes individuals might be assigned to blobs that are far away from the previous position. This could indicate wrong assignments, but not necessarily. If this variable is set to true, consecutive frame segments will end whenever high speeds are reached, just to be on the safe side. For scenarios with lots of individuals (and no recognition) this might spam yellow bars in the timeline and may be disabled.
+	Use N samples for smoothing the outline. More samples will generate a smoother (less detailed) outline.
 
 
 
-.. function:: track_intensity_range(rangel)
+.. function:: gui_show_recognition_bounds(bool)
 
-	**default value:** [-1,-1]
+	**default value:** true
 
 
-	When set to valid values, objects will be filtered to have an average pixel intensity within the given range.
+	Shows what is contained within tht recognition boundary as a cyan background. (See `recognition_border` for details.)
 
+	.. seealso:: :func:`recognition_border`, 
 
 
-.. function:: track_max_individuals(int)
+.. function:: debug_recognition_output_all_methods(bool)
 
-	**default value:** 0
+	**default value:** false
 
 
-	The maximal number of individual that are assigned at the same time (infinite if set to zero). If the given number is below the actual number of individual, then only a (random) subset of individual are assigned and a warning is shown.
+	If set to true, a complete training will attempt to output all images for each identity with all available normalization methods.
 
 
 
-.. function:: track_max_reassign_time(float)
+.. function:: output_frame_window(int)
 
-	**default value:** 0.5
+	**default value:** 100
 
 
-	Distance in time (seconds) where the matcher will stop trying to reassign an individual based on previous position. After this time runs out, depending on the settings, the tracker will try to find it based on other criteria, or generate a new individual.
+	If an individual is selected during CSV output, use these number of frames around it (or -1 for all frames).
 
 
 
-.. function:: track_max_speed(float)
+.. function:: gui_background_color(color)
 
-	**default value:** 50
+	**default value:** [0,0,0,150]
 
 
-	The maximum speed an individual can have (=> the maximum distance an individual can travel within one second) in cm/s. Uses `meta_real_width`.
+	Values < 255 will make the background more transparent in standard view. This might be useful with very bright backgrounds.
 
-	.. seealso:: :func:`meta_real_width`, 
 
 
-.. function:: track_posture_threshold(int)
+.. function:: recognition_border_shrink_percent(float)
 
-	**default value:** 15
+	**default value:** 0.3
 
 
-	Same as `track_threshold`, but for posture estimation.
+	The amount by which the recognition border is shrunk after generating it (roughly and depends on the method).
 
-	.. seealso:: :func:`track_threshold`, 
 
 
-.. function:: track_speed_decay(float)
+.. function:: match_mode(matching_mode_t)
 
-	**default value:** 0.7
+	**default value:** accurate
 
+	**possible values:**
+		- `accurate`: Maximizes the probability sum by assigning (or potentially not assigning) individuals to objects in the frame. This returns the correct solution, but might take long for high quantities of individuals.
+		- `approximate`: Simply assigns the highest probability edges (blob to individual) to all individuals - first come, first serve. Parameters have to be set very strictly (especially speed) in order to have as few objects to choose from as possible and limit the error.
+		- `hungarian`: The hungarian algorithm (as implemented in O(n^3) by Mattias Andrée `https://github.com/maandree/hungarian-algorithm-n3`).
+		- `benchmark`: Runs all algorithms and pits them against each other, outputting statistics every few frames.
 
-	The amount the expected speed is reduced over time when an individual is lost. When individuals collide, depending on the expected behavior for the given species, one should choose different values for this variable. If the individuals usually stop when they collide, this should be set to a value > 0.8. If the individuals are expected to move over one another, the value should be set to a small value > 0.
+	Changes the default algorithm to be used for matching blobs in one frame to blobs in the next frame. The accurate algorithm performs best, but also scales less well for more individuals than the approximate one. However, if it is too slow (temporarily) in a few frames, the program falls back to using the approximate one that doesnt slow down.
 
 
 
-.. function:: track_threshold(int)
 
-	**default value:** 15
+.. function:: app_name(string)
 
+	**default value:** "TRex"
 
-	Constant used in background subtraction. Pixels with grey values above this threshold will be interpreted as potential individuals, while pixels below this threshold will be ignored.
 
+	Name of the application.
 
 
-.. function:: track_threshold_2(int)
 
-	**default value:** 0
+.. function:: log_file(path)
 
+	**default value:** ""
 
-	If not zero, a second threshold will be applied to all objects after they have been deemed do be theoretically large enough. Then they are compared to #before_pixels * `threshold_ratio_range` to see how much they have been shrunk).
 
-	.. seealso:: :func:`threshold_ratio_range`, 
+	Set this to a path you want to save the log file to.
 
 
-.. function:: track_time_probability_enabled(bool)
 
-	**default value:** true
+.. function:: auto_apply(bool)
 
+	**default value:** false
 
-	
 
+	If set to true, the application will automatically apply the network with existing weights once the analysis is done. It will then automatically correct and reanalyse the video.
 
 
-.. function:: track_whitelist(array<array<vec>>)
 
-	**default value:** []
+.. function:: build(string)
 
+	**default value:** ""
 
-	If this is not empty, objects within the given rectangles or polygons (>= 3 points) [[x0,y0],[x1,y1](, ...)], ...] will be the only objects being tracked. (overwrites `track_blacklist`)
 
-	.. seealso:: :func:`track_blacklist`, 
+	Current build version
 
 
-.. function:: tracklet_max_images(ulong)
 
-	**default value:** 0
+.. function:: auto_no_memory_stats(bool)
 
+	**default value:** true
 
-	Maximum number of images that are being output per tracklet given that `output_image_per_tracklet` is true. If the number is 0, then every image will be exported that has been recognized as an individual.
 
-	.. seealso:: :func:`output_image_per_tracklet`, 
+	If set to true, no memory statistics will be saved on auto_quit.
 
 
-.. function:: tracklet_normalize_orientation(bool)
 
-	**default value:** true
+.. function:: track_max_reassign_time(float)
 
+	**default value:** 0.5
 
-	If enabled, all exported tracklet images are normalized according to the calculated posture orientation, so that all heads are looking to the left and only the body moves.
 
+	Distance in time (seconds) where the matcher will stop trying to reassign an individual based on previous position. After this time runs out, depending on the settings, the tracker will try to find it based on other criteria, or generate a new individual.
 
 
-.. function:: tracklet_restore_split_blobs(bool)
 
-	**default value:** true
+.. function:: ffmpeg_path(path)
 
+	**default value:** ""
 
-	If enabled, all exported tracklet images are checked for missing pixels. When a blob is too close to another blob, parts of the other blob might be erased so the individuals can be told apart. If enabled, another mask will be saved, that contains only the blob in focus, without the rest-pixels.
 
+	Path to an ffmpeg executable file. This is used for converting videos after recording them (from the GUI). It is not a critical component of the software, but mostly for convenience.
 
 
-.. function:: use_differences(bool)
 
-	**default value:** false
+.. function:: recognition_normalization(recognition_normalization_t)
 
+	**default value:** posture
 
-	This should be set to false unless when using really old files.
+	**possible values:**
+		- `none`: No normalization. Images will only be cropped out and used as-is.
+		- `moments`: Images will be cropped out and aligned as in idtracker.ai using the main axis calculated using `image moments`.
+		- `posture`: Images will be cropped out and rotated so that the head will be fixed in one position and only the tail moves.
+		- `legacy`: Images will be aligned parallel to the x axis.
 
+	This enables or disable normalizing the images before training. If set to `none`, the images will be sent to the GPU raw - they will only be cropped out. Otherwise they will be normalized based on head orientation (posture) or the main axis calculated using `image moments`.
 
 
-.. function:: version(string)
+	.. seealso:: :func:`none`, :func:`image moments`, 
 
-	**default value:** "RC6"
 
+.. function:: tracklet_restore_split_blobs(bool)
+
+	**default value:** true
 
-	Current application version.
 
+	If enabled, all exported tracklet images are checked for missing pixels. When a blob is too close to another blob, parts of the other blob might be erased so the individuals can be told apart. If enabled, another mask will be saved, that contains only the blob in focus, without the rest-pixels.
 
 
-.. function:: video_info(string)
+
+.. function:: output_prefix(string)
 
 	**default value:** ""
 
 
-	Information on the current video as provided by PV.
+	A prefix that is prepended to all output files (csv/npz).
 
 
 
@@ -1797,48 +1824,118 @@ TRex parameters
 
 
 
-.. function:: video_size(size)
+.. function:: gui_show_midline(bool)
 
-	**default value:** [-1,-1]
+	**default value:** true
 
 
-	The dimensions of the currently loaded video.
+	Showing or hiding individual midlines in tracking view.
 
 
 
-.. function:: visual_field_eye_offset(float)
+.. function:: outline_smooth_step(uchar)
 
-	**default value:** 0.15
+	**default value:** 1
 
 
-	A percentage telling the program how much the eye positions are offset from the start of the midline.
+	Jump over N outline points when smoothing (reducing accuracy).
 
 
 
-.. function:: visual_field_eye_separation(float)
+.. function:: gui_playback_speed(float)
 
-	**default value:** 60
+	**default value:** 1
 
 
-	Degrees of separation between the eye and looking straight ahead. Results in the eye looking towards head.angle +- `visual_field_eye_separation`.
+	Playback speed when pressing SPACE.
 
 
 
-.. function:: web_quality(int)
+.. function:: gui_recording_format(gui_recording_format_t)
+
+	**default value:** avi
+
+	**possible values:**
+		- `avi`: AVI / video format (codec FFV1 is used in unix systems)
+		- `jpg`: individual images in JPEG format
+		- `png`: individual images in PNG format
+
+	Sets the format for recording mode (when R is pressed in the GUI). Supported formats are 'avi', 'jpg' and 'png'. JPEGs have 75%% compression, AVI is using MJPEG compression.
+
+
+
+
+.. function:: gui_heatmap_resolution(uint)
 
 	**default value:** 75
 
 
-	JPEG quality of images transferred over the web interface.
+	Square resolution of individual heatmaps displayed with `gui_show_heatmap`. Will generate a square grid, each cell with dimensions (video_width / N, video_height / N), and sort all positions of each identity into it.
 
+	.. seealso:: :func:`gui_show_heatmap`, 
 
 
-.. function:: web_time_threshold(float)
+.. function:: tracklet_normalize_orientation(bool)
 
-	**default value:** 0.05
+	**default value:** true
 
 
-	Maximum refresh rate in seconds for the web interface.
+	If enabled, all exported tracklet images are normalized according to the calculated posture orientation, so that all heads are looking to the left and only the body moves.
+
+
+
+.. function:: midline_resolution(uint)
+
+	**default value:** 12
+
+
+	Number of midline points that are saved. Higher number increases detail.
+
+
+
+.. function:: gui_show_number_individuals(bool)
+
+	**default value:** false
+
+
+	Show/hide the #individuals time-series graph.
+
+
+
+.. function:: cmd_line(string)
+
+	**default value:** ""
+
+
+	An approximation of the command-line arguments passed to the program.
+
+
+
+.. function:: track_whitelist(array<array<vec>>)
+
+	**default value:** []
+
+
+	If this is not empty, objects within the given rectangles or polygons (>= 3 points) [[x0,y0],[x1,y1](, ...)], ...] will be the only objects being tracked. (overwrites `track_blacklist`)
+
+	.. seealso:: :func:`track_blacklist`, 
+
+
+.. function:: calculate_posture(bool)
+
+	**default value:** true
+
+
+	Enables or disables posture calculation. Can only be set before the video is analysed (e.g. in a settings file or as a startup parameter).
+
+
+
+.. function:: gui_frame(int)
+
+	**default value:** 0
+
+
+	The currently visible frame.
 
 
 
diff --git a/docs/run.rst b/docs/run.rst
index 01c3cd3..9289d42 100644
--- a/docs/run.rst
+++ b/docs/run.rst
@@ -30,7 +30,7 @@ Running TGrabs
 
 Running |grabs| usually involves the following parameters::
 
-	./framegrabber -i [SOURCE] -o [DESTINATION] [ADDITIONAL]
+	./tgrabs -i [SOURCE] -o [DESTINATION] [ADDITIONAL]
 
 **Source** can be any of the following:
 
@@ -49,9 +49,9 @@ Running TRex
 
 The tracker only expects an input file::
 
-	./tracker -i [VIDEONAME]
+	./trex -i [VIDEONAME]
 
-``VIDEONAME`` is either a full path to the video file, or the name of a video file in the default output folder (``~/Videos`` by default). This will open the tracker with all settings set to default, except if there is a ``[VIDEONAME].settings`` file present next to the video file or in the default output folder.
+``VIDEONAME`` is either a full path to the video file, or the name of a video file in the default output folder (``~/Videos`` by default). This will open |trex| with all settings set to default, except if there is a ``[VIDEONAME].settings`` file present next to the video file or in the default output folder.
 
 Just like with |grabs|, you can attach any number of additional parameters to the command-line, simply using ``-PARAMETER VALUE`` (see :doc:`parameters_trex`).
 
@@ -68,14 +68,17 @@ Segmentation/objects barely visible or too large!
 		
 Trajectories jump around a lot for no particular reason!
 	Changing :func:`track_max_speed` might help to mitigate this problem. Generally this can be the symptom of many different problems that lead to individuals being lost: size, speed, visibility issues, etc.: Sometimes individuals are lost because they are moving too fast (faster than the maximally allowed speed), or because they are expected to move much faster. Try lowering or increasing that limit. To get a hint at which speed to set, open up |trex|, track a few frames and select an individual - if there are consecutive frames for that individual, it will display a cm/s speed in the top-left overlay.
-	
+
+I set :func:`track_max_individuals` to zero, but it still does not track all individuals!
+	Probably what's happening is that you have not created a ``.settings`` file for the given video yet (or not in the right folder). Try that, and then attach the command-line option again.
+
 |trex| is really **laggy** and frequently complains about **too many combinations**!
 	Pause the analysis (``,`` key, this may take a few seconds). The matching algorithm has diffculty separating individuals into distinct cliques of neighboring individuals, or there are simply too many of them. This could be because your video contains too many trackable objects and no limit on the number of individuals has been set (:func:`track_max_individuals`), or there are significant time-jumps in the video. If the number of individuals should be much lower than detected, check your :func:`track_threshold`/:func:`blob_size_ranges` settings. It is advisable to start |trex| with likely parameters, or pausing analysis to change parameters. Otherwise, if that does not fix anything, check your settings for :func:`track_max_speed`, which controls the size of the neighbourhood to be considered during matching, and reduce it until there are no further warnings.
 	
 |grabs| does not quit and only shows "[...] not properly closed [...]"!
 	You may have to forcibly quit the application, either using a task manager, or by finding and manually ending its process::
 		
-		ps aux | grep framegrabber
+		ps aux | grep tgrabs
 
 I have attached my Basler camera, and now |grabs| is stuck initializing the camera! 
 	Most likely the camera driver crashed. Try restarting your computer to fix it.
\ No newline at end of file
-- 
GitLab