summaryrefslogtreecommitdiff
path: root/test-cli/test/helpers/detect.py
diff options
context:
space:
mode:
authorManel Caro <mcaro@iatec.biz>2021-11-06 16:28:38 +0100
committerManel Caro <mcaro@iatec.biz>2021-11-06 16:28:38 +0100
commitcf19bfe18cbd283b188a858ee1629f9909c924f4 (patch)
tree1efb23519727130058401df090ab1b5f4cc8ba99 /test-cli/test/helpers/detect.py
parentb6932fbaf898724ae87c29f8965621610f377084 (diff)
parentd5b273a3b58a250742049df4ca0ef0ba54f53d33 (diff)
downloadboard-rel.0.1.zip
board-rel.0.1.tar.gz
board-rel.0.1.tar.bz2
Merge branch 'sopa-test'rel.0.1sopa-test
Diffstat (limited to 'test-cli/test/helpers/detect.py')
-rw-r--r--test-cli/test/helpers/detect.py61
1 files changed, 61 insertions, 0 deletions
diff --git a/test-cli/test/helpers/detect.py b/test-cli/test/helpers/detect.py
new file mode 100644
index 0000000..193dabf
--- /dev/null
+++ b/test-cli/test/helpers/detect.py
@@ -0,0 +1,61 @@
+import cv2
+import numpy as np
+
+
+class Detect_Color(object):
+ oFrame = None
+ img_hsv = None
+ __red_lower1 = [0, 50, 20]
+ __red_upper1 = [5, 255, 255]
+ __red_lower2 = [175, 50, 20]
+ __red_upper2 = [180, 255, 255]
+ __blue_lower = [110, 50, 50]
+ __blue_upper = [130, 255, 255]
+ __green_lower = [36, 25, 25]
+ __green_upper = [86, 255, 255]
+
+ def __init__(self, frame):
+ self.oFrame = frame
+ self.__hist()
+
+ def __hist(self):
+ self.img_hsv = cv2.cvtColor(self.oFrame, cv2.COLOR_BGR2HSV)
+
+ def getRed(self):
+ return self.__detect_two_areas([0, 50, 20], [5, 255, 255], [175, 50, 20], [180, 255, 255])
+
+ def getBlue(self):
+ return self.__detect_one_area([110, 50, 50], [130, 255, 255])
+
+ def getGreen(self):
+ return self.__detect_one_area([36, 25, 25], [86, 255, 255])
+
+ def __detect_one_area(self, lower, upper):
+ a_lower = np.array(lower)
+ a_upper = np.array(upper)
+ c_mask = cv2.inRange(self.img_hsv, a_lower, a_upper)
+ croped = cv2.bitwise_and(self.oFrame, self.oFrame, mask=c_mask)
+ mean_v = cv2.mean(self.oFrame, mask=c_mask)
+ mean = {}
+ mean['R'] = mean_v[2]
+ mean['G'] = mean_v[1]
+ mean['B'] = mean_v[0]
+ count = cv2.countNonZero(c_mask)
+ return c_mask, croped, mean, count
+
+ def __detect_two_areas(self, lower1, upper1, lower2, upper2):
+ a1_lower = np.array(lower1)
+ a1_upper = np.array(upper1)
+ a2_lower = np.array(lower2)
+ a2_upper = np.array(upper2)
+ c_mask1 = cv2.inRange(self.img_hsv, a1_lower, a1_upper)
+ c_mask2 = cv2.inRange(self.img_hsv, a2_lower, a2_upper)
+ c_mask = cv2.bitwise_or(c_mask1, c_mask2)
+ croped = cv2.bitwise_and(self.oFrame, self.oFrame, mask=c_mask)
+ mean_v = cv2.mean(self.oFrame, mask=c_mask)
+ mean = {}
+ mean['R'] = mean_v[2]
+ mean['G'] = mean_v[1]
+ mean['B'] = mean_v[0]
+ count = cv2.countNonZero(c_mask)
+ return c_mask, croped, mean, count