Skip to content
Snippets Groups Projects
Commit 36eb9db9 authored by Reko Vuorinen's avatar Reko Vuorinen
Browse files

Tried to fix the ARTag detection

parent 13ce8130
No related branches found
No related tags found
No related merge requests found
......@@ -92,7 +92,7 @@ class GateNavigator(Node):
self.get_logger().info(">>> Running in FLIGHT MODE <<<")
self.get_logger().info(f"Subscribed to {image_topic}, publishing to {cmd_vel_topic}.")
self.get_logger().info(f"Green HSV range: {self.hsv_lower_green} to {self.hsv_upper_green}")
self.get_logger().info(f"Wood HSV range: {self.hsv_lower_wood} to {self.hsv_upper_wood}")
# Initial Takeoff
......@@ -177,41 +177,82 @@ class GateNavigator(Node):
if self.check_and_land_on_red(red_mask):
return # Land triggered, skip rest
contours, _ = cv2.findContours(green_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
ar_tag_active = False
gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
parameters = aruco.DetectorParameters_create()
# (same fine-tuning you already had)
parameters.adaptiveThreshWinSizeMin = 5
parameters.adaptiveThreshWinSizeMax = 25
parameters.adaptiveThreshWinSizeStep = 5
parameters.adaptiveThreshConstant = 7
parameters.minMarkerPerimeterRate = 0.02
parameters.maxMarkerPerimeterRate = 4.0
parameters.polygonalApproxAccuracyRate = 0.04
parameters.minCornerDistanceRate = 0.05
parameters.minMarkerDistanceRate = 0.05
parameters.cornerRefinementMethod = aruco.CORNER_REFINE_SUBPIX
parameters.errorCorrectionRate = 0.7
corners, ids, _ = aruco.detectMarkers(gray, aruco_dict,
parameters=parameters)
self.gate_detected = False
best_cx, best_cy = self.image_center_x, self.image_center_y
# -------------------------------------------------------------------------
# NEW LOGIC – handle 3 + separate green blobs by averaging their centers
# -------------------------------------------------------------------------
if ids is not None:
ar_tag_active = True
tag_centers = []
detected_ids_list = ids.flatten().tolist()
for i, corner_set in enumerate(corners):
c = corner_set[0] # shape (4,2)
cx, cy = int(np.mean(c[:, 0])), int(np.mean(c[:, 1]))
tag_centers.append((cx, cy))
cv2.circle(cv_image, (cx, cy), 5, (0, 255, 255), -1)
cv2.putText(cv_image, str(detected_ids_list[i]),
(cx + 10, cy + 10), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (255, 0, 255), 2)
best_cx = int(np.mean([p[0] for p in tag_centers]))
best_cy = int(np.mean([p[1] for p in tag_centers]))
cv2.circle(cv_image, (best_cx, best_cy), 7, (255, 0, 255), -1)
aruco.drawDetectedMarkers(cv_image, corners, ids)
self.gate_detected = True
self.passing_gate = True
self.last_gate_time = self.get_clock().now()
if not ar_tag_active:
contours, _ = cv2.findContours(green_mask, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
if contours:
# only keep contours whose area is significant for a gate candidate
sig_contours = [c for c in contours if cv2.contourArea(c) > self.min_contour_area] # << mod
if len(sig_contours) >= 3: # << mod
centers = [] # << mod
for c in sig_contours: # << mod
M = cv2.moments(c) # << mod
if M["m00"] == 0: # << mod
continue # << mod
cx = int(M["m10"] / M["m00"]) # << mod
cy = int(M["m01"] / M["m00"]) # << mod
centers.append((cx, cy)) # << mod
cv2.drawContours(cv_image, [c], -1, (0, 255, 0), 2) # << mod
if centers: # << mod
best_cx = int(np.mean([p[0] for p in centers])) # << mod
best_cy = int(np.mean([p[1] for p in centers])) # << mod
self.gate_detected = True # << mod
self.passing_gate = True # << mod
self.last_gate_time = self.get_clock().now() # << mod
cv2.circle(cv_image, (best_cx, best_cy), 7, (0, 0, 255), -1) # << mod
cv2.putText(cv_image, f"Gate ({best_cx},{best_cy})", # << mod
(best_cx - 40, best_cy - 20), cv2.FONT_HERSHEY_SIMPLEX, # << mod
0.5, (255, 255, 255), 2) # << mod
else: # << mod
# ---------- original single-contour logic (unchanged) ----------
sig_contours = [c for c in contours if cv2.contourArea(c) > self.min_contour_area]
if len(sig_contours) >= 3:
centers = []
for c in sig_contours:
M = cv2.moments(c)
if M["m00"] == 0:
continue
cx = int(M["m10"] / M["m00"])
cy = int(M["m01"] / M["m00"])
centers.append((cx, cy))
cv2.drawContours(cv_image, [c], -1, (0, 255, 0), 2)
if centers:
best_cx = int(np.mean([p[0] for p in centers]))
best_cy = int(np.mean([p[1] for p in centers]))
self.gate_detected = True
self.passing_gate = True
self.last_gate_time = self.get_clock().now()
cv2.circle(cv_image, (best_cx, best_cy), 7, (0, 0, 255), -1)
cv2.putText(cv_image, f"Gate ({best_cx},{best_cy})",
(best_cx - 40, best_cy - 20), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (255, 255, 255), 2)
else:
largest_contour = max(sig_contours, key=cv2.contourArea) if sig_contours else None
if largest_contour is not None:
M = cv2.moments(largest_contour)
......@@ -228,9 +269,7 @@ class GateNavigator(Node):
cv2.circle(cv_image, (cx, cy), 7, (0, 0, 255), -1)
cv2.putText(cv_image, f"Gate ({cx},{cy})", (cx - 40, cy - 20),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
# -------------------- end of new / modified section ----------------------
# ---------------- remainder of function is UNCHANGED --------------------
# Control Logic Calculation
twist_msg = Twist()
......@@ -265,92 +304,6 @@ class GateNavigator(Node):
else:
self.passing_gate = False
else:
# ARTag detection
gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50) # VERIFY THIS!
# Detector Parameters
parameters = aruco.DetectorParameters_create()
#--------------------------------------------------------------------------------------------
# Thresholding Parameters
# Lowering min window size can help detect smaller tags
parameters.adaptiveThreshWinSizeMin = 5 # Default: 3
# Increasing max window size can help with varying lighting
parameters.adaptiveThreshWinSizeMax = 25 # Default: 23
# Step size for adaptive thresholding windows
parameters.adaptiveThreshWinSizeStep = 5 # Default: 10
# Constant subtracted from mean during adaptive thresholding
parameters.adaptiveThreshConstant = 7 # Default: 7
# Contour Filtering Parameters
# Minimum perimeter relative to image dimensions (decrease if tags are very small)
parameters.minMarkerPerimeterRate = 0.02 # Default: 0.03 (Value depends heavily on expected tag size in image)
# Maximum perimeter relative to image dimensions (increase if tags can be very large/close)
parameters.maxMarkerPerimeterRate = 4.0 # Default: 4.0
# Accuracy for polygonal approximation (small adjustments might help)
parameters.polygonalApproxAccuracyRate = 0.04 # Default: 0.03
# Minimum distance between corners (increase slightly if getting false positives from noise)
parameters.minCornerDistanceRate = 0.05 # Default: 0.05
# Minimum distance between different markers (increase if markers are close and misidentified)
parameters.minMarkerDistanceRate = 0.05 # Default: 0.05
# Corner Refinement
# Use sub-pixel refinement for potentially better accuracy (but slightly slower)
parameters.cornerRefinementMethod = aruco.CORNER_REFINE_SUBPIX # Default: CORNER_REFINE_NONE
# Other options: aruco.CORNER_REFINE_CONTOUR, aruco.CORNER_REFINE_APRILTAG (if using AprilTags)
# Error Correction
# Allow slightly more error correction (might help with damaged/occluded tags, but increases false positive risk)
parameters.errorCorrectionRate = 0.7 # Default: 0.6
#-------------------------------------------------------------------------------------------------------------
corners, ids, rejected = aruco.detectMarkers(gray, aruco_dict, parameters=parameters) # Rejected candidates for debugging
# Handle 1 or more tags
if ids is not None:
tag_centers = []
detected_ids_list = ids.flatten().tolist() # Get a flat list of detected IDs
for i, corner_set in enumerate(corners):
c = corner_set[0] # shape (4,2)
cx = int(np.mean(c[:, 0]))
cy = int(np.mean(c[:, 1]))
tag_centers.append((cx, cy))
# Draw tag center and ID
cv2.circle(cv_image, (cx, cy), 5, (0, 255, 255), -1)
cv2.putText(cv_image, str(detected_ids_list[i]), (cx + 10, cy + 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 255), 2)
# Calculate the average center of ALL detected tags
avg_cx = int(np.mean([p[0] for p in tag_centers]))
avg_cy = int(np.mean([p[1] for p in tag_centers]))
error_x = avg_cx - self.image_center_x
error_y = avg_cy - self.image_center_y
calculated_angular_z = self.kp_yaw * float(error_x)
calculated_linear_z = self.kp_alt * float(error_y)
# Maintain forward speed when tags are detected
calculated_linear_x = self.target_forward_speed
twist_msg.angular.z = calculated_angular_z
twist_msg.linear.z = calculated_linear_z
twist_msg.linear.x = calculated_linear_x
num_tags = len(ids)
self.get_logger().info(f"{num_tags} AR Tag(s) Found (IDs: {detected_ids_list})! Center:({avg_cx},{avg_cy}), Err:({error_x},{error_y}), Cmd: lin.x={twist_msg.linear.x:.2f}, lin.z={twist_msg.linear.z:.2f}, ang.z={twist_msg.angular.z:.2f}", throttle_duration_sec=1.0)
# Set passing_gate mode active (even if only one tag is seen)
self.passing_gate = True
self.last_gate_time = self.get_clock().now()
# Visualize overall target center and detected markers
cv2.circle(cv_image, (avg_cx, avg_cy), 7, (255, 0, 255), -1) # Magenta for overall center
aruco.drawDetectedMarkers(cv_image, corners, ids) # Draw borders
else:
# No Gate (Color) or AR Tags Found -> Search Rotation
search_twist = Twist()
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment