diff --git a/Application/closed_loop.py b/Application/closed_loop.py index d59c07b02e2e4575ade92be49c4434ce75e4b2f5..2b27ed456f30cd5d90a437b6731eaf4f38b7edb9 100644 --- a/Application/closed_loop.py +++ b/Application/closed_loop.py @@ -1,5 +1,4 @@ import numpy as np -import cv2 as cv import TRex import subprocess from queue import Queue, Empty @@ -68,11 +67,11 @@ def update_tracking(): if len(midlines) > i and not np.isinf(midline[0]).any(): print(midlines, i, midline[0].min(), midline[0].max()) - cv.circle(image, tuple(midline[0]), 5, (255, 0, 0), -1) - for j in range(1, len(midline)): - cv.line(image, tuple(midline[j-1, :]), tuple(midline[j]), (255, 255, 255)) + #cv.circle(image, tuple(midline[0]), 5, (255, 0, 0), -1) + #for j in range(1, len(midline)): + # cv.line(image, tuple(midline[j-1, :]), tuple(midline[j]), (255, 255, 255)) - cv.circle(image, pos, 5, color, -1) + #cv.circle(image, pos, 5, color, -1) if key != 1 or i >= len(visual_field): continue @@ -81,9 +80,9 @@ def update_tracking(): continue j = np.where(ids == id)[0] other = tuple(((positions[j] + centers[j]) * scale_factor)[0].astype(np.int)) - cv.line(image, pos, other, (255, 255, 255)) + #cv.line(image, pos, other, (255, 255, 255)) #print("tracking", frame, key, positions[key][3]) #cv.imwrite("image.png", image) - cv.putText(image, str(frame), (10, 10), cv.FONT_HERSHEY_PLAIN, 0.5, (255, 255, 255)) - image = cv.cvtColor(image, cv.COLOR_BGR2RGB) - TRex.imshow("image", image) + #cv.putText(image, str(frame), (10, 10), cv.FONT_HERSHEY_PLAIN, 0.5, (255, 255, 255)) + #image = cv.cvtColor(image, cv.COLOR_BGR2RGB) + #TRex.imshow("image", image) diff --git a/Application/learn_static.py b/Application/learn_static.py index 3fc5a8e6271592fb9377561443a38d6609525e7e..6ffa4ce644695ba74ffcc25265586824416da5c2 100644 --- a/Application/learn_static.py +++ b/Application/learn_static.py @@ -12,7 +12,6 @@ from keras.models import Sequential import keras import keras.backend as K import numpy as np -import cv2 as cv from keras.preprocessing.image import ImageDataGenerator from keras.utils import np_utils import TRex diff --git a/Application/utils.py b/Application/utils.py index 4680f47b7d00b2b9a24ed9a37a4070bc0a3b4fe0..f2cc3dd982a9e32e6f16c11c3f718e91221b1903 100644 --- a/Application/utils.py +++ b/Application/utils.py @@ -23,7 +23,6 @@ def figure_as_image(): from PIL import Image import matplotlib.pyplot as plt import numpy as np - import cv2 as cv import os plt.gcf().set_tight_layout(True) @@ -43,7 +42,6 @@ def show_figure(title="plot", output_path="", im = None): import TRex from PIL import Image import numpy as np - import cv2 as cv import os if type(im) == type(None): diff --git a/conda/meta.yaml b/conda/meta.yaml index d46547cf92c665d9a74d8547571c47a2a544987e..11b3102b2a82778d854eb73933f12d590311e67c 100644 --- a/conda/meta.yaml +++ b/conda/meta.yaml @@ -4,7 +4,7 @@ package: source: git_url: https://github.com/mooch443/trex.git - git_rev: 55c25dd862ceb5931193cf0d91f783562203d193 + git_rev: 24f2483b311d0944374f0b817b1cab91a294f94c git_depth: 1 build: @@ -73,8 +73,6 @@ requirements: - python [win] - ffmpeg ==4.0 [not win] - python =3.6 [not win] - - py-opencv - - opencv - glfw [linux] about: diff --git a/docs/install.rst b/docs/install.rst index 3c5384791329f8275a7df00f876412e3ad9dc694..2cfc80bebb8c46f2750ceedf55557476f963e915 100644 --- a/docs/install.rst +++ b/docs/install.rst @@ -12,11 +12,11 @@ Installation The easy way ************ -|trex| supports all major platforms. There is an easy way to install |trex| using Anaconda, by creating a new virtual environment (here named ``tracking``, which you can replace):: +|trex| supports all major platforms. There is an easy way to install |trex| using Anaconda, by creating a new virtual environment (here named ``tracking``, which you can replace) -- the down-side is, that it needs to be compiled with fewer optimzations and features than a manually compiled one due to compatibility and licensing issues:: conda create -n tracking -c main -c conda-forge -c trexing trex -The conda version does not offer support for Basler cameras. If you need to use |grabs| with machine vision cameras, please consider compiling the software yourself -- it has other advantages, too (such as enabling some Metal features on macOS and getting a squeaky clean, new version)! +The conda version does not offer support for Basler cameras. If you need to use |grabs| with machine vision cameras, please consider compiling the software yourself -- it has other advantages, too (such as enabling some Metal features on macOS, architecture-specific optimizations and just getting a squeaky clean, new version)! Compile it yourself *******************