summaryrefslogtreecommitdiff
path: root/test-cli/test/helpers/detect.py
diff options
context:
space:
mode:
authorHector Fernandez <hector@iatec.biz>2020-09-29 13:27:32 +0200
committerHector Fernandez <hector@iatec.biz>2020-09-30 14:30:05 +0200
commitca73fcc336edc23db5750e93b6e1014d32a54ea5 (patch)
tree890e4786f1c241a79b44a99dd8447062572cbc62 /test-cli/test/helpers/detect.py
parentd8d4684c24a7c34334bb0b1d74dead69282e8c46 (diff)
downloadboard-ca73fcc336edc23db5750e93b6e1014d32a54ea5.zip
board-ca73fcc336edc23db5750e93b6e1014d32a54ea5.tar.gz
board-ca73fcc336edc23db5750e93b6e1014d32a54ea5.tar.bz2
Added new test to validate video output with a webcam.
Diffstat (limited to 'test-cli/test/helpers/detect.py')
-rw-r--r--test-cli/test/helpers/detect.py61
1 files changed, 61 insertions, 0 deletions
diff --git a/test-cli/test/helpers/detect.py b/test-cli/test/helpers/detect.py
new file mode 100644
index 0000000..193dabf
--- /dev/null
+++ b/test-cli/test/helpers/detect.py
@@ -0,0 +1,61 @@
+import cv2
+import numpy as np
+
+
+class Detect_Color(object):
+ oFrame = None
+ img_hsv = None
+ __red_lower1 = [0, 50, 20]
+ __red_upper1 = [5, 255, 255]
+ __red_lower2 = [175, 50, 20]
+ __red_upper2 = [180, 255, 255]
+ __blue_lower = [110, 50, 50]
+ __blue_upper = [130, 255, 255]
+ __green_lower = [36, 25, 25]
+ __green_upper = [86, 255, 255]
+
+ def __init__(self, frame):
+ self.oFrame = frame
+ self.__hist()
+
+ def __hist(self):
+ self.img_hsv = cv2.cvtColor(self.oFrame, cv2.COLOR_BGR2HSV)
+
+ def getRed(self):
+ return self.__detect_two_areas([0, 50, 20], [5, 255, 255], [175, 50, 20], [180, 255, 255])
+
+ def getBlue(self):
+ return self.__detect_one_area([110, 50, 50], [130, 255, 255])
+
+ def getGreen(self):
+ return self.__detect_one_area([36, 25, 25], [86, 255, 255])
+
+ def __detect_one_area(self, lower, upper):
+ a_lower = np.array(lower)
+ a_upper = np.array(upper)
+ c_mask = cv2.inRange(self.img_hsv, a_lower, a_upper)
+ croped = cv2.bitwise_and(self.oFrame, self.oFrame, mask=c_mask)
+ mean_v = cv2.mean(self.oFrame, mask=c_mask)
+ mean = {}
+ mean['R'] = mean_v[2]
+ mean['G'] = mean_v[1]
+ mean['B'] = mean_v[0]
+ count = cv2.countNonZero(c_mask)
+ return c_mask, croped, mean, count
+
+ def __detect_two_areas(self, lower1, upper1, lower2, upper2):
+ a1_lower = np.array(lower1)
+ a1_upper = np.array(upper1)
+ a2_lower = np.array(lower2)
+ a2_upper = np.array(upper2)
+ c_mask1 = cv2.inRange(self.img_hsv, a1_lower, a1_upper)
+ c_mask2 = cv2.inRange(self.img_hsv, a2_lower, a2_upper)
+ c_mask = cv2.bitwise_or(c_mask1, c_mask2)
+ croped = cv2.bitwise_and(self.oFrame, self.oFrame, mask=c_mask)
+ mean_v = cv2.mean(self.oFrame, mask=c_mask)
+ mean = {}
+ mean['R'] = mean_v[2]
+ mean['G'] = mean_v[1]
+ mean['B'] = mean_v[0]
+ count = cv2.countNonZero(c_mask)
+ return c_mask, croped, mean, count