+
+
REST Endpoints
+ Observers
+
+
+
+
+ Lifecycle
+
+
+
+
+ Operations Primitives
+
+
+ Others
+
+
+
+
+
+
Params
+
+ p1:
+
+
+
+ p2:
+
+
+
+
+
Table:
+
+
+
+
+
+
Tray 1:
+
+
+
+
Tray 2:
+
+
+
+
+
Tray 3:
+
+
+
+
Grip:
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/src/sorting_demo/src/__init__.py b/src/sorting_demo/src/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/sorting_demo/src/sorting_demo/__init__.py b/src/sorting_demo/src/sorting_demo/__init__.py
new file mode 100755
index 0000000..e69de29
diff --git a/src/sorting_demo/src/sorting_demo/concepts/__init__.py b/src/sorting_demo/src/sorting_demo/concepts/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/sorting_demo/src/sorting_demo/concepts/block.py b/src/sorting_demo/src/sorting_demo/concepts/block.py
new file mode 100644
index 0000000..b835047
--- /dev/null
+++ b/src/sorting_demo/src/sorting_demo/concepts/block.py
@@ -0,0 +1,63 @@
+import re
+from rospy_message_converter import message_converter
+import demo_constants
+
+class BlockState:
+ regex = re.compile(r'block(\d+)\.*')
+
+ def __init__(self, id, pose):
+ self.perception_id = None
+
+ self.gazebo_id = id
+ self.pose = pose
+ self.color = None
+ self.homogeneous_transform = None
+
+ if not demo_constants.is_real_robot():
+ search = BlockState.regex.search(id)
+ self.num = int(search.group(1))
+ else:
+ self.num = int(id)
+
+ self.gazebo_pose = None
+ self.grasp_pose = None
+ self.place_pose = None
+
+
+ self.table_grasp_pose = None
+ self.tray_place_pose = None
+ self.table_place_pose = None
+
+ # the 2d blob point estimabion based on the head_image processing
+ self.hue_estimation = None
+
+ # the color estimation based on the head image processing
+ self.hue_estimation = None
+
+ # the 3d pose estimation from the head image processing
+ self.headview_pose_estimation = None
+
+ self.tabletop_arm_view_estimated_pose = None
+ self.traytop_arm_view_estimated_pose =None
+
+ self.tray = None
+
+
+
+ def __str__(self):
+ return "[Block estpos = %s]" % str(self.headview_pose_estimation)
+
+ def get_state(self):
+ return {"id": self.perception_id , "table_pose": message_converter.convert_ros_message_to_dictionary(self.gazebo_pose),
+ "color": self.color}
+
+ @staticmethod
+ def is_block(id):
+ num = BlockState.regex.search(id)
+ return num is not None
+
+ def get_color(self):
+ return self.color
+
+ def is_color(self, color):
+ return color.upper() in self.color.upper()
diff --git a/src/sorting_demo/src/sorting_demo/concepts/gripper_state.py b/src/sorting_demo/src/sorting_demo/concepts/gripper_state.py
new file mode 100644
index 0000000..30243e6
--- /dev/null
+++ b/src/sorting_demo/src/sorting_demo/concepts/gripper_state.py
@@ -0,0 +1,5 @@
+class GripperState:
+ def __init__(self):
+ self.holding_block = None
+ def get_state(self):
+ return {"holding_block": None if self.holding_block is None else self.holding_block.get_state()}
\ No newline at end of file
diff --git a/src/sorting_demo/src/sorting_demo/concepts/table.py b/src/sorting_demo/src/sorting_demo/concepts/table.py
new file mode 100644
index 0000000..89a2c8b
--- /dev/null
+++ b/src/sorting_demo/src/sorting_demo/concepts/table.py
@@ -0,0 +1,17 @@
+class Table:
+ def __init__(self):
+ self.blocks = []
+
+ def get_blocks(self):
+ return self.blocks
+
+ def get_state(self):
+ return {"blocks": [b.get_state() for b in self.blocks]}
+
+ def notify_gripper_pick(self, b,gripper):
+ self.blocks.remove(b)
+ gripper.holding_block = b
+
+ def notify_gripper_place(self, b, gripper):
+ gripper.holding_block = None
+ self.blocks.append(b)
\ No newline at end of file
diff --git a/src/sorting_demo/src/sorting_demo/concepts/tray.py b/src/sorting_demo/src/sorting_demo/concepts/tray.py
new file mode 100644
index 0000000..854358f
--- /dev/null
+++ b/src/sorting_demo/src/sorting_demo/concepts/tray.py
@@ -0,0 +1,82 @@
+import copy
+import re
+
+import math
+import rospy
+from rospy_message_converter import message_converter
+import tf
+import tf.transformations
+from geometry_msgs.msg import Quaternion
+
+class TrayState:
+ regex = re.compile(r'tray(\d+)\.*')
+
+ def __init__(self, gazebo_id, pose, TRAY_SURFACE_THICKNESS=0.04):
+ self.gazebo_id = gazebo_id
+ self.pose = pose
+ self.gazebo_pose = None
+ self.TRAY_SURFACE_THICKNESS =TRAY_SURFACE_THICKNESS
+ search = TrayState.regex.search(gazebo_id)
+ self.num = int(search.group(1))
+ self.blocks = []
+
+ @staticmethod
+ def is_tray(id):
+ num = TrayState.regex.search(id)
+ return num is not None
+
+ def notify_place_block(self, block, gripper_state):
+ block.tray = self
+ self.blocks.append(block)
+ gripper_state.holding_block = None
+
+
+ def notify_pick_block(self, block, gripper_state):
+ block.tray = None
+ self.blocks.remove(block)
+ gripper_state.holding_block = block
+
+ def get_tray_pick_location(self):
+ """
+ provides the grasping pose for the tray
+
+ :return: geometry_msg/Pose
+ """
+ copyfinalpose = copy.deepcopy(self.gazebo_pose)
+ copyfinalpose.position.y -= 0.15
+ copyfinalpose.position.z -= 0.02
+ return copyfinalpose
+
+ def get_tray_place_block_pose(self):
+ #return self.gazebo_pose
+ copygazebopose = copy.deepcopy(self.gazebo_pose)
+
+ yoffset = -0.08 + 0.075 * len(self.blocks)
+ copygazebopose.position.y -= yoffset
+ copygazebopose.position.z += self.TRAY_SURFACE_THICKNESS
+
+ zrot = tf.transformations.quaternion_from_euler(0, 0, -math.pi/2.0)
+
+ trayorientatin = [copygazebopose.orientation.x, copygazebopose.orientation.y, copygazebopose.orientation.z, copygazebopose.orientation.w]
+ # oorient = [overhead_orientation.x,overhead_orientation.y,overhead_orientation.z,overhead_orientation.w]
+
+ # resultingorient = tf.transformations.quaternion_multiply(cubeorientation, tf.transformations.quaternion_conjugate(oorient))
+ resultingorient = tf.transformations.quaternion_multiply(trayorientatin, zrot)
+
+ # resultingorient = cubeorientation
+
+
+ copygazebopose.orientation = Quaternion(x=resultingorient[0], y=resultingorient[1], z=resultingorient[2],
+ w=resultingorient[3])
+
+
+ rospy.logwarn("Tray Place location (objects %d, %lf): " % (len(self.blocks), yoffset) + str(copygazebopose))
+
+ return copygazebopose
+
+
+ def get_state(self):
+ return {"pose": message_converter.convert_ros_message_to_dictionary(self.gazebo_pose), "blocks": [b.get_state() for b in self.blocks]}
+
+ def __str__(self):
+ return "Tray: "+ str(self.gazebo_id) + ",num: " + str(self.num) + " -> " + str(len(self.blocks)) + " items"
diff --git a/src/sorting_demo/src/sorting_demo/cv_detection_camera_helper.py b/src/sorting_demo/src/sorting_demo/cv_detection_camera_helper.py
new file mode 100644
index 0000000..42c204b
--- /dev/null
+++ b/src/sorting_demo/src/sorting_demo/cv_detection_camera_helper.py
@@ -0,0 +1,100 @@
+#!/usr/bin/python
+import Queue
+import numpy
+
+import rospy
+import tf
+
+from sensor_msgs.msg import CameraInfo
+from image_geometry import PinholeCameraModel
+
+from visualization_msgs.msg import *
+
+import intera_interface
+
+class CameraHelper:
+ """
+ A helper class to take pictures with the Sawyer camera and unproject points from the images
+ """
+ def __init__(self, camera_name, base_frame, table_height):
+ """
+ Initialize the instance
+
+ :param camera_name: The camera name. One of (head_camera, right_hand_camera)
+ :param base_frame: The frame for the robot base
+ :param table_height: The table height with respect to base_frame
+ """
+ self.camera_name = camera_name
+ self.base_frame = base_frame
+ self.table_height = table_height
+
+ self.image_queue = Queue.Queue()
+ self.pinhole_camera_model = PinholeCameraModel()
+ self.tf_listener = tf.TransformListener()
+
+ camera_info_topic = "/io/internal_camera/{}/camera_info".format(camera_name)
+ camera_info = rospy.wait_for_message(camera_info_topic, CameraInfo)
+
+ self.pinhole_camera_model.fromCameraInfo(camera_info)
+
+ cameras = intera_interface.Cameras()
+ cameras.set_callback(camera_name, self.__show_image_callback, rectify_image=True)
+
+ def __show_image_callback(self, img_data):
+ """
+ Image callback for the camera
+
+ :param img_data: The image received from the camera
+ """
+ if self.image_queue.empty():
+ self.image_queue.put(img_data)
+
+ def take_single_picture(self):
+ """
+ Take one picture from the specified camera
+
+ :returns: The image
+ """
+ with self.image_queue.mutex:
+ self.image_queue.queue.clear()
+
+ cameras = intera_interface.Cameras()
+
+ cameras.start_streaming(self.camera_name)
+
+ image_data = self.image_queue.get(block=True, timeout=None)
+
+ cameras.stop_streaming(self.camera_name)
+
+ return image_data
+
+ def project_point_on_table(self, point):
+ """
+ Projects the 2D point from the camera image on the table
+
+ :param point: The 2D point in the form (x, y)
+ :return: The 3D point in the coordinate space of the frame that was specified when the object was initialized
+ """
+
+ # Get camera frame name
+ camera_frame = self.pinhole_camera_model.tfFrame()
+
+ # Check that both frames exist
+ if self.tf_listener.frameExists(self.base_frame) and self.tf_listener.frameExists(camera_frame):
+ # Get transformation
+ time = self.tf_listener.getLatestCommonTime(self.base_frame, camera_frame)
+ camera_translation_base, camera_orientation_base = self.tf_listener.lookupTransform(self.base_frame, camera_frame, time)
+
+ # Unproject point
+ unprojected_ray_camera = self.pinhole_camera_model.projectPixelTo3dRay(point)
+
+ # Rotate ray based on the frame transformation
+ camera_frame_rotation_matrix = tf.transformations.quaternion_matrix(camera_orientation_base)
+ unprojected_ray_base = numpy.dot(camera_frame_rotation_matrix[:3,:3], unprojected_ray_camera)
+
+ # Intersect ray with base plane
+ point_height = camera_translation_base[2] - self.table_height
+ factor = -point_height / unprojected_ray_base[2]
+ intersection = camera_translation_base + unprojected_ray_base * factor
+
+ return intersection
diff --git a/src/sorting_demo/src/sorting_demo/cv_detection_head.py b/src/sorting_demo/src/sorting_demo/cv_detection_head.py
new file mode 100755
index 0000000..482efa6
--- /dev/null
+++ b/src/sorting_demo/src/sorting_demo/cv_detection_head.py
@@ -0,0 +1,277 @@
+#!/usr/bin/python
+import sys
+import numpy
+import os
+
+from matplotlib import pyplot
+
+import rospy
+import cv2
+import rospkg
+
+from cv_bridge import CvBridge, CvBridgeError
+
+from visualization_msgs.msg import *
+
+from cv_detection_camera_helper import CameraHelper
+
+def get_blobs_info(cv_image):
+ """
+ Gets information about the colored blobs in the image
+
+ :param cv_image: The OpenCV image to get blobs from
+ :return: A dictionary containing an array of points for each colored blob found, represented by its hue.
+ For example, in an image with 2 red blobs and 3 blue blobs, it will return
+ {0: [(639, 558), (702, 555)], 120: [(567, 550), (698, 515), (648, 515)]}
+ """
+
+ # Show original image
+ #cv2.imshow("Original image", cv_image)
+
+ # Apply blur
+ BLUR_SIZE = 3
+ cv_image_blur = cv2.GaussianBlur(cv_image, (BLUR_SIZE, BLUR_SIZE), 0)
+ #cv2.imshow("Blur", cv_image_blur)
+
+ # Apply ROI mask
+ mask_path = rospkg.RosPack().get_path('sorting_demo') + "/share/head_mask.png"
+ cv_mask = cv2.imread(mask_path)
+ cv_mask = cv2.cvtColor(cv_mask, cv2.COLOR_BGR2GRAY)
+
+ cv_image_masked = cv2.bitwise_and(cv_image_blur, cv_image_blur, mask=cv_mask)
+ #cv2.imshow("Masked original", cv_image_masked)
+
+ # HSV split
+ cv_image_hsv = cv2.cvtColor(cv_image_masked, cv2.COLOR_BGR2HSV)
+ cv_image_h, cv_image_s, cv_image_v = cv2.split(cv_image_hsv)
+ #cv2.imshow("Image H", cv_image_h)
+ #cv2.imshow("Image S", cv_image_s)
+ #cv2.imshow("Image V", cv_image_v)
+
+ # Apply CLAHE to Value channel
+ CLAHE_SIZE = 16
+ clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(CLAHE_SIZE, CLAHE_SIZE))
+ cv_image_v_clahe = clahe.apply(cv_image_v)
+
+ # Merge channels
+ cv_image_clahe = cv2.merge((cv_image_h, cv_image_s, cv_image_v_clahe))
+ #cv_image_clahe_rgb = cv2.cvtColor(cv_image_clahe, cv2.COLOR_HSV2BGR)
+ #cv2.imshow("CLAHE", cv_image_clahe_rgb)
+
+ # Multiply Saturation and Value channels to separate the cubes, removing the table
+ cv_image_sv_multiplied = cv2.multiply(cv_image_s, cv_image_v_clahe, scale=1/255.0)
+ #cv2.imshow("Image S*V", cv_image_sv_multiplied)
+
+ # Binarize the result
+ BIN_THRESHOLD = 64
+ #_, cv_image_binary = cv2.threshold(cv_image_sv_multiplied, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
+ _, cv_image_binary = cv2.threshold(cv_image_sv_multiplied, BIN_THRESHOLD, 255, cv2.THRESH_BINARY)
+ #cv2.imshow("Threshold", cv_image_binary)
+
+ # Erode the image to use as mask
+ EROSION_SIZE = 3
+ erosion_kernel = numpy.ones((EROSION_SIZE, EROSION_SIZE), numpy.uint8)
+ cv_image_binary_eroded = cv2.erode(cv_image_binary, erosion_kernel)
+
+ # Calculate H-channel histogram, applying the binarized image as mask
+ cv_image_h_histogram = cv2.calcHist([cv_image_h], [0], cv_image_binary_eroded, [180], [0, 179])
+
+ # Smoothen the histogram to find local maxima
+ HISTOGRAM_BLUR_SIZE = 11
+ histogram_length = len(cv_image_h_histogram)
+ cv_image_h_histogram_wrap = cv2.repeat(cv_image_h_histogram, 3, 1)
+ cv_image_h_histogram_smooth = cv2.GaussianBlur(cv_image_h_histogram_wrap, (HISTOGRAM_BLUR_SIZE, HISTOGRAM_BLUR_SIZE), 0)
+ cv_image_h_histogram_cut = cv_image_h_histogram_smooth[histogram_length : 2 * histogram_length]
+
+ # Collect peaks
+ histogram_peaks = [i for i in range(len(cv_image_h_histogram_cut))
+ if cv_image_h_histogram_cut[(i - 1) % histogram_length] < cv_image_h_histogram_cut[i] > cv_image_h_histogram_cut[(i + 1) % histogram_length]]
+
+ # Filter peaks
+ PEAK_MINIMUM = 100 # Ideally below the value of a single cube
+ PEAK_MAXIMUM = 500 # Ideally above the value of all the cubes of a single color
+ histogram_high_peaks = filter(lambda p : PEAK_MINIMUM < cv_image_h_histogram_cut[p] < PEAK_MAXIMUM, histogram_peaks)
+ #print(histogram_high_peaks)
+ #pyplot.clf()
+ #pyplot.plot(cv_image_h_histogram_cut)
+ #pyplot.pause(0.001)
+
+ # Process every color found in the histogram
+ blob_info = {}
+ cv_image_contours_debug = cv2.cvtColor(cv_image_binary_eroded, cv2.COLOR_GRAY2BGR)
+
+ for current_hue in histogram_high_peaks:
+ # Perform a Hue rotation that will be used to make detecting the edge colors easier (red in HSV corresponds to both 0 and 180)
+ HUE_AMPLITUDE = 5
+ cv_image_h_rotated = cv_image_h.astype(numpy.int16)
+ cv_image_h_rotated[:] -= current_hue
+ cv_image_h_rotated[:] += HUE_AMPLITUDE
+ cv_image_h_rotated = numpy.remainder(cv_image_h_rotated, 180)
+ cv_image_h_rotated = cv_image_h_rotated.astype(numpy.uint8)
+ #cv2.imshow("Hue rotation {}".format(histogram_high_peaks.index(current_hue)), cv_image_h_rotated)
+
+ # Binarize using range function
+ cv_image_h_inrange = cv2.inRange(cv_image_h_rotated, 0, HUE_AMPLITUDE * 2)
+
+ # Apply binary mask (consider that both black and the edge color have hue 0)
+ cv_image_h_masked = cv2.bitwise_and(cv_image_h_inrange, cv_image_h_inrange, mask=cv_image_binary_eroded)
+ #cv2.imshow("inRange {}".format(histogram_high_peaks.index(current_hue)), cv_image_h_masked)
+
+ # Find contours
+ _, contours, _ = cv2.findContours(cv_image_h_masked.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
+
+ # Filter by area
+ MINIMUM_AREA_SIZE = 50
+ contours_filtered_area = filter(lambda cnt: cv2.contourArea(cnt) > MINIMUM_AREA_SIZE, contours)
+
+ # Calculate convex hull
+ convex_contours = [cv2.convexHull(cnt) for cnt in contours_filtered_area]
+
+ contour_color_hsv = numpy.array([[[current_hue, 255, 255]]], numpy.uint8)
+ contour_color_rgb = cv2.cvtColor(contour_color_hsv, cv2.COLOR_HSV2BGR)[0][0].tolist()
+ #cv2.drawContours(cv_image_contours_debug, convex_contours, -1, contour_color_rgb, 1)
+
+ # Find centroids
+ contour_moments = [cv2.moments(cnt) for cnt in convex_contours]
+ contour_centroids = [(int(moments["m10"] / moments["m00"]), int(moments["m01"] / moments["m00"])) for moments in contour_moments if moments["m00"] != 0]
+ for (cx, cy) in contour_centroids:
+ cv2.circle(cv_image_contours_debug, (cx, cy), 3, contour_color_rgb, cv2.FILLED)
+
+ # Collect data
+ blob_info[current_hue] = contour_centroids
+
+ cv2.imwrite("/tmp/head_contours.jpg", cv_image_contours_debug)
+
+ return blob_info
+
+def __publish_marker(publisher, index, point):
+ """
+ Publishes a cube-shaped marker
+
+ :param index: The marker index
+ :param point: The 3D position of the marker
+ """
+ marker = Marker()
+ marker.header.frame_id = "base"
+ marker.id = index
+ marker.type = Marker.CUBE
+ marker.scale.x = 0.04
+ marker.scale.y = 0.04
+ marker.scale.z = 0.04
+ marker.color.r = 1.0
+ marker.color.g = 1.0
+ marker.color.b = 0.0
+ marker.color.a = 1.0
+ marker.pose.position.x = point[0]
+ marker.pose.position.y = point[1]
+ marker.pose.position.z = point[2]
+
+ publisher.publish(marker)
+
+def test_head_ros():
+ """
+ Test the blob detection and CameraHelper class using ROS
+ """
+ rospy.init_node('cv_detection_head_camera')
+
+ camera_name = "head_camera"
+
+ TABLE_HEIGHT = demo_constants.TABLE_HEIGHT
+ camera_helper = CameraHelper(camera_name, "base", TABLE_HEIGHT)
+
+ bridge = CvBridge()
+
+ publisher = rospy.Publisher("cube_position_estimation", Marker, queue_size=10)
+
+ try:
+ while not rospy.is_shutdown():
+ # Take picture
+ img_data = camera_helper.take_single_picture()
+
+ # Convert to OpenCV format
+ cv_image = bridge.imgmsg_to_cv2(img_data, "bgr8")
+
+ # Save for debugging
+ #cv2.imwrite("/tmp/debug.png", cv_image)
+
+ # Get color blobs info
+ blob_info = get_blobs_info(cv_image)
+
+ # Project the points on 3D space
+ points = [y for x in blob_info.values() for y in x]
+
+ for index, point in enumerate(points):
+ projected = camera_helper.project_point_on_table(point)
+ __publish_marker(publisher, index, projected)
+
+ # Wait for a key press
+ cv2.waitKey(1)
+
+ rospy.sleep(0.1)
+ except CvBridgeError, err:
+ rospy.logerr(err)
+
+ # Exit
+ cv2.destroyAllWindows()
+
+def test_head_cam():
+ """
+ Test the blob detection using a USB camera
+ """
+
+ # Create a video capture object
+ capture = cv2.VideoCapture(1)
+ capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
+ capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
+
+ while True:
+ # Capture a frame
+ ret, cv_image = capture.read()
+
+ if ret:
+ # Save for debugging
+ #cv2.imwrite("/tmp/debug.png", cv_image)
+
+ # Get color blobs info
+ blob_info = get_blobs_info(cv_image)
+ print(blob_info)
+
+ # Check for a press on the Escape key
+ if cv2.waitKey(1) & 0xFF == 27:
+ break
+
+ # Exit
+ capture.release()
+ cv2.destroyAllWindows()
+
+def test_head_debug():
+ """
+ Test the blob detection using images on disk
+ """
+
+ # Get files
+ path = rospkg.RosPack().get_path('sorting_demo') + "/share/test_head_simulator"
+ files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
+ #print(files)
+
+ # Process files
+ for f in files:
+ # Get image path
+ image_path = os.path.join(path, f)
+ print(image_path)
+
+ # Read image
+ cv_image = cv2.imread(image_path)
+
+ # Get color blobs info
+ blob_info = get_blobs_info(cv_image)
+ print(blob_info)
+
+ # Wait for a key press
+ cv2.waitKey(0)
+
+ # Exit
+ cv2.destroyAllWindows()
+
+if __name__ == '__main__':
+ sys.exit(test_head_debug())
diff --git a/src/sorting_demo/src/sorting_demo/cv_detection_right_hand.py b/src/sorting_demo/src/sorting_demo/cv_detection_right_hand.py
new file mode 100755
index 0000000..0eb7404
--- /dev/null
+++ b/src/sorting_demo/src/sorting_demo/cv_detection_right_hand.py
@@ -0,0 +1,357 @@
+#!/usr/bin/python
+import sys
+import numpy
+import os
+import math
+import functools
+
+import rospy
+import cv2
+import rospkg
+
+from cv_bridge import CvBridge, CvBridgeError
+
+from cv_detection_camera_helper import CameraHelper
+
+imgindex = 0
+
+def __create_mask_image_from_template(reference_image, template, pos_x, pos_y):
+ '''
+ Resize a template image to match a reference image size, placing the template's center in the given position, to be used as a mask
+
+ :param reference_image: The reference image that the returned mask is intended to be applied to. The returned mask will have the same size as this image
+ :param template: The template to resize
+ :param pos_x: The x position where to place the center of the template in the final image
+ :param pos_y: The y position where to place the center of the template in the final image
+ :return: The resized, correctly positioned template
+ '''
+
+ # Get image and template sizes
+ reference_image_height, reference_image_width = reference_image.shape
+ template_height, template_width = template.shape
+
+ # Get the position the template should be placed at
+ pos_x_corrected = pos_x - template_width // 2
+ pos_y_corrected = pos_y - template_height // 2
+
+ # Calculate bottom and right margins
+ bottom_margin = reference_image_height - template_height - pos_y_corrected
+ right_margin = reference_image_width - template_width - pos_x_corrected
+
+ # Add the borders to the template image
+ border_top = max(0, pos_y_corrected)
+ border_bottom = max(0, bottom_margin)
+ border_left = max(0, pos_x_corrected)
+ border_right = max(0, right_margin)
+ mask_image = cv2.copyMakeBorder(template, border_top, border_bottom, border_left, border_right, cv2.BORDER_CONSTANT, value=0)
+
+ # Crop the image, in case the template ended up outside of the image
+ crop_top = int(math.fabs(min(0, pos_y_corrected)))
+ crop_bottom = crop_top + reference_image_height
+ crop_left = int(math.fabs(min(0, pos_x_corrected)))
+ crop_right = crop_left + reference_image_width
+ mask_image_cropped = mask_image[crop_top:crop_bottom,crop_left:crop_right]
+
+ return mask_image_cropped
+
+def __rotate_image_size_corrected(image, angle):
+ # Calculate max size for the rotated template and image offset
+ image_size_height, image_size_width = image.shape
+ image_center_x = image_size_width // 2
+ image_center_y = image_size_height // 2
+
+ # Create rotation matrix
+ rotation_matrix = cv2.getRotationMatrix2D((image_center_x, image_center_y), -angle, 1)
+
+ # Apply offset
+ new_image_size = int(math.ceil(cv2.norm((image_size_height, image_size_width), normType=cv2.NORM_L2)))
+ rotation_matrix[0, 2] += (new_image_size - image_size_width) / 2
+ rotation_matrix[1, 2] += (new_image_size - image_size_height) / 2
+
+ # Apply rotation to the template
+ image_rotated = cv2.warpAffine(image, rotation_matrix, (new_image_size, new_image_size))
+ return image_rotated
+
+def __apply_template_matching(angle, template, image):
+ # Rotate the template
+ template_rotated = __rotate_image_size_corrected(template, angle)
+
+ # Apply template matching
+ image_templated = cv2.matchTemplate(image, template_rotated, cv2.TM_CCOEFF_NORMED)
+
+ # Correct template matching image size difference
+ template_rotated_height, template_rotated_width = template_rotated.shape
+ template_half_height = template_rotated_height // 2
+ template_half_width = template_rotated_width // 2
+
+ image_templated_inrange_size_corrected = cv2.copyMakeBorder(image_templated, template_half_height, template_half_height, template_half_width, template_half_width, cv2.BORDER_CONSTANT, value=0)
+
+ # Calculate maximum match coefficient
+ max_match = numpy.max(image_templated_inrange_size_corrected)
+
+ return (max_match, angle, template_rotated, image_templated_inrange_size_corrected)
+
+def get_cubes_z_rotation(cv_image, CUBE_SIZE=90):
+ """
+ Gets the cubes rotation in the Z plane from an image. The results are sorted by distance to the center of the image
+
+ :param cv_image: The OpenCV image to get the cubes from
+ :return: An array containing the positions, angles and clearances of the cubes that have been found. The returned angles lie in the interval [-45, 45)
+ For example:
+ [((388, 526), -41.0, True, True), ((556, 524), -31.0, True, True), ((474, 382), -31.0, True, False)]
+
+ """
+
+ # Show original image
+ #cv2.imshow("Original image", cv_image)
+
+ # Convert to grayscale
+ cv_image_grayscale = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)
+
+ # Apply blur
+ BLUR_SIZE = 3
+ cv_image_blur = cv2.GaussianBlur(cv_image_grayscale, (BLUR_SIZE, BLUR_SIZE), 0)
+ #cv2.imshow("Blur", cv_image_blur)
+
+ # Apply CLAHE
+ CLAHE_SIZE = 64
+ clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(CLAHE_SIZE, CLAHE_SIZE))
+ cv_image_clahe = clahe.apply(cv_image_blur)
+ #cv2.imshow("CLAHE", cv_image_clahe)
+
+ # Apply Canny filter
+ sigma = 0.33
+ median = numpy.median(cv_image_clahe)
+ lower = int(max(0, (1.0 - sigma) * median))
+ upper = int(min(255, (1.0 + sigma) * median))
+ cv_image_canny = cv2.Canny(cv_image_clahe, lower, upper)
+ #cv2.imshow("Canny", cv_image_canny)
+
+ # Apply dilation
+ DILATION_SIZE = 5
+ dilation_kernel = numpy.ones((DILATION_SIZE, DILATION_SIZE), numpy.uint8)
+ cv_image_dilated = cv2.dilate(cv_image_canny, dilation_kernel, iterations = 1)
+ #cv2.imshow("Dilation", cv_image_dilated)
+
+ # Find contours
+ _, contours, _ = cv2.findContours(cv_image_dilated.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
+
+ # Simplify contours
+ #contours_approx = [cv2.approxPolyDP(cnt, 0.005 * cv2.arcLength(cnt, True), True) for cnt in contours]
+
+ # Draw image with filled contours
+ cv_image_height, cv_image_width = cv_image_grayscale.shape
+ cv_image_contours_filled = numpy.zeros((cv_image_height, cv_image_width), numpy.uint8)
+ cv_image_contours_filled = cv2.fillPoly(cv_image_contours_filled, pts=contours, color=255)
+ #cv2.imshow("Contours", cv_image_contours_filled)
+
+ # Create cube image for template matching
+ cv_image_cube_template = numpy.full((CUBE_SIZE, CUBE_SIZE, 1), 255, numpy.uint8)
+
+ CUBE_BORDER_SIZE = 4
+ cv_image_cube_template_border = cv2.copyMakeBorder(cv_image_cube_template, CUBE_BORDER_SIZE, CUBE_BORDER_SIZE, CUBE_BORDER_SIZE, CUBE_BORDER_SIZE, cv2.BORDER_CONSTANT, value=0)
+
+ # Create mask for clearance check
+ CLEARANCE_AREA_LENGTH = CUBE_SIZE / 2
+ CLEARANCE_AREA_MARGIN = 20
+ clearance_check_mask = numpy.full((CUBE_SIZE + 2 * CLEARANCE_AREA_MARGIN, CUBE_SIZE), 0, numpy.uint8)
+ clearance_check_mask = cv2.copyMakeBorder(clearance_check_mask, CLEARANCE_AREA_LENGTH, CLEARANCE_AREA_LENGTH, 0, 0, cv2.BORDER_CONSTANT, value=255)
+
+ # Calculate the angles that will be used when rotating the template (between -45 and 45 because of square symmetry)
+ range_subdivisions = 90
+ all_angles = numpy.arange(-45, 45, 90.0 / range_subdivisions)
+
+ # Look for cubes in the image and erase them until none are found
+ cube_positions_and_angles = []
+ original_image_loop = cv_image_contours_filled.copy()
+ while True:
+ #cv2.imshow("Loop image", original_image_loop)
+ #cv2.waitKey(0)
+
+ # Apply template matching rotating the template
+ apply_template_matching_partial = functools.partial(__apply_template_matching, template=cv_image_cube_template_border, image=original_image_loop)
+ template_matching_results = map(apply_template_matching_partial, all_angles)
+
+ # Get max matching coefficient
+ template_matching_results_max_values = [value for value, _, _, _ in template_matching_results]
+ template_matching_results_max = max(template_matching_results_max_values)
+
+ # Check if the match coefficient is good enough
+ TEMPLATE_MATCHING_MAX_THRESHOLD = 0.5
+ if template_matching_results_max < TEMPLATE_MATCHING_MAX_THRESHOLD:
+ break
+
+ # Collect best match
+ template_matching_results_max_index = template_matching_results_max_values.index(template_matching_results_max)
+ _, angle, template_rotated, image_templated = template_matching_results[template_matching_results_max_index]
+
+ # Find location
+ _, _, _, (max_loc_x, max_loc_y) = cv2.minMaxLoc(image_templated)
+
+ # Apply template as a mask to the original image, deleting the area it matched
+ template_mask = __create_mask_image_from_template(original_image_loop, template_rotated, max_loc_x, max_loc_y)
+
+ template_mask_inverted = cv2.bitwise_not(template_mask)
+
+ original_image_loop = cv2.bitwise_and(original_image_loop, template_mask_inverted)
+
+ # Rotate the clearance check mask to create the two needed for the clearance test
+ clearance_check_mask_rotated_0 = __rotate_image_size_corrected(clearance_check_mask, angle)
+ clearance_check_mask_rotated_90 = __rotate_image_size_corrected(clearance_check_mask, angle + 90)
+ #cv2.imshow("Clearance check mask rotated 0", clearance_check_mask_rotated_0)
+ #cv2.imshow("Clearance check mask rotated 90", clearance_check_mask_rotated_90)
+
+ # Create mask image from the clearance check mask
+ clearance_check_mask_full_size_0 = __create_mask_image_from_template(cv_image_contours_filled, clearance_check_mask_rotated_0, max_loc_x, max_loc_y)
+ clearance_check_mask_full_size_90 = __create_mask_image_from_template(cv_image_contours_filled, clearance_check_mask_rotated_90, max_loc_x, max_loc_y)
+ #cv2.imshow("Clearance check mask 0 full", clearance_check_mask_full_size_0)
+ #cv2.imshow("Clearance check mask 90 full", clearance_check_mask_full_size_90)
+
+ # Apply clearance mask to the original filled-contours image
+ original_image = cv_image_contours_filled
+ original_image_clearance_mask_applied_0 = cv2.bitwise_and(original_image, clearance_check_mask_full_size_0)
+ original_image_clearance_mask_applied_90 = cv2.bitwise_and(original_image, clearance_check_mask_full_size_90)
+ #cv2.imshow("Clearance check mask 0 applied", original_image_clearance_mask_applied_0)
+ #cv2.imshow("Clearance check mask 90 applied", original_image_clearance_mask_applied_90)
+
+ # Check clearance
+ clearance_0_count = cv2.countNonZero(original_image_clearance_mask_applied_0)
+ clearance_90_count = cv2.countNonZero(original_image_clearance_mask_applied_90)
+
+ CLEARANCE_THRESHOLD = 50
+ clearance_0 = clearance_0_count < CLEARANCE_THRESHOLD
+ clearance_90 = clearance_90_count < CLEARANCE_THRESHOLD
+
+ # Store result
+ cube_positions_and_angles.append(((max_loc_x, max_loc_y), angle, clearance_0, clearance_90))
+
+ # Sort cube positions by distance to the center of the image
+ image_center_x = cv_image_height // 2
+ image_center_y = cv_image_width // 2
+ image_center = (image_center_x, image_center_y)
+ cube_positions_and_angles_sorted = sorted(cube_positions_and_angles, key=lambda (position, angle, clearance_0, clearance_90) : cv2.norm(position, image_center, normType=cv2.NORM_L2))
+
+ # Draw debug image
+ template_matching_debug_image = cv_image.copy()
+ for ((x, y), angle, clear_0, clear_90) in cube_positions_and_angles_sorted:
+ rotated_rectangle = cv2.boxPoints(((x, y), (CUBE_SIZE, CUBE_SIZE), angle))
+ rotated_rectangle = numpy.int0(rotated_rectangle)
+ cv2.drawContours(template_matching_debug_image, [rotated_rectangle], -1, (255, 0, 0), 2)
+
+ clearance_points_rectangle = cv2.boxPoints(((x, y), (CUBE_SIZE * 0.6, CUBE_SIZE * 0.6), angle + 45))
+ clearance_points_rectangle = numpy.int0(clearance_points_rectangle)
+
+ clearance_bools = [clear_90, clear_0]
+ for (i, (point_x, point_y)) in enumerate(clearance_points_rectangle):
+ current_clearance = clearance_bools[i % len(clearance_bools)]
+ clearance_circle_color = current_clearance and (0, 255, 0) or (0, 0, 255)
+ cv2.circle(template_matching_debug_image, (point_x, point_y), 5, clearance_circle_color, cv2.FILLED)
+ cv2.circle(template_matching_debug_image, (point_x, point_y), 5, (255, 255, 255), 2)
+
+ #cv2.imshow("Template matching result", template_matching_debug_image)
+ global imgindex
+ cv2.imwrite("/tmp/img"+ str(imgindex)+".jpg", template_matching_debug_image)
+ imgindex+=1
+ #cv2.waitKey(0)
+
+ return cube_positions_and_angles_sorted
+
+def test_right_hand_ros():
+ """
+ Test the cube orientation sensing using ROS
+ """
+ rospy.init_node('cv_detection_right_hand_camera')
+
+ camera_name = "right_hand_camera"
+
+ camera_helper = CameraHelper(camera_name, "base", 0)
+
+ bridge = CvBridge()
+
+ try:
+ while not rospy.is_shutdown():
+ # Take picture
+ img_data = camera_helper.take_single_picture()
+
+ # Convert to OpenCV format
+ cv_image = bridge.imgmsg_to_cv2(img_data, "bgr8")
+
+ # Save for debugging
+ #cv2.imwrite("/tmp/debug.png", cv_image)
+
+ # Get cube rotation
+ angles = get_cubes_z_rotation(cv_image)
+ print(angles)
+
+ # Wait for a key press
+ cv2.waitKey(1)
+
+ rospy.sleep(0.1)
+ except CvBridgeError, err:
+ rospy.logerr(err)
+
+ # Exit
+ cv2.destroyAllWindows()
+
+def test_right_hand_cam():
+ """
+ Test the blob detection using a USB camera
+ """
+
+ # Create a video capture object
+ capture = cv2.VideoCapture(1)
+ capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
+ capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
+
+ while True:
+ # Capture a frame
+ ret, cv_image = capture.read()
+
+ if ret:
+ # Save for debugging
+ #cv2.imwrite("/tmp/debug.png", cv_image)
+
+ # Get cube rotation
+ angles = get_cubes_z_rotation(cv_image)
+ print(angles)
+
+ # Check for a press on the Escape key
+ if cv2.waitKey(1) & 0xFF == 27:
+ break
+
+ # Exit
+ capture.release()
+ cv2.destroyAllWindows()
+
+def test_right_hand_debug():
+ """
+ Test the cube orientation sensing using images on disk
+ """
+
+ # Get files
+ path = rospkg.RosPack().get_path('sorting_demo') + "/share/test_right_hand_simulator"
+ files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
+ #files = ["border.png"]
+ #print(files)
+
+ # Process files
+ for f in files:
+ # Get image path
+ image_path = os.path.join(path, f)
+ print(image_path)
+
+ # Read image
+ cv_image = cv2.imread(image_path)
+
+ # Get cube rotation
+ angles = get_cubes_z_rotation(cv_image)
+ print(angles)
+
+ # Wait for a key press
+ cv2.waitKey(0)
+
+ # Exit
+ cv2.destroyAllWindows()
+
+if __name__ == '__main__':
+ sys.exit(test_right_hand_debug())
diff --git a/src/sorting_demo/src/sorting_demo/demo_constants.py b/src/sorting_demo/src/sorting_demo/demo_constants.py
new file mode 100644
index 0000000..c1cef02
--- /dev/null
+++ b/src/sorting_demo/src/sorting_demo/demo_constants.py
@@ -0,0 +1,34 @@
+#!/usr/bin/python
+
+import rospy
+
+CUBE_EDGE_LENGTH = 0.04
+
+BLOCK_COLOR_MAPPINGS = [
+ {"material": "Gazebo/Green"},
+ {"material": "Gazebo/Red"},
+ {"material": "Gazebo/Blue"},
+ {"material": "Gazebo/Green"},
+ {"material": "Gazebo/Red"},
+ {"material": "Gazebo/Blue"},
+ {"material": "Gazebo/Red"},
+ {"material": "Gazebo/Blue"},
+ {"material": "Gazebo/Green"}
+]
+
+TRAY_COLORS = ["Red", "Green", "Blue"]
+
+TABLE_HEIGHT = -0.15
+
+TRAY_SURFACE_THICKNESS = 0.04
+
+ARM_TOP_VIEW_Z_OFFSET = 0.05 # meters
+
+SIMULATE_TRAY_BLOCK_DETECTION = True
+
+
+def is_real_robot():
+ if rospy.has_param("/use_sim_time"):
+ return not rospy.get_param("/use_sim_time")
+ else:
+ return False
diff --git a/src/sorting_demo/src/sorting_demo/environment_estimation.py b/src/sorting_demo/src/sorting_demo/environment_estimation.py
new file mode 100644
index 0000000..35caefb
--- /dev/null
+++ b/src/sorting_demo/src/sorting_demo/environment_estimation.py
@@ -0,0 +1,469 @@
+#!/usr/bin/python
+import copy
+import random
+import re
+
+import math
+import rospy
+import tf
+import tf.transformations
+from cv_bridge import CvBridge
+from gazebo_msgs.msg import LinkStates, sys
+from geometry_msgs.msg import Pose, Point, Quaternion
+from concepts.block import BlockState
+from concepts.tray import TrayState
+from concepts.table import Table
+
+from cv_detection_head import CameraHelper, get_blobs_info
+
+from cv_detection_right_hand import get_cubes_z_rotation
+from utils.mathutils import *
+import demo_constants
+from threading import RLock
+import cv2
+
+
+class EnvironmentEstimation:
+ def __init__(self):
+ """
+ """
+ self.gazebo_trays = []
+ self.gazebo_blocks = []
+
+ self.trays = []
+ self.blocks = []
+
+ self.tf_broacaster = tf.TransformBroadcaster()
+ self.tf_listener = tf.TransformListener()
+
+ # initial simulated implementation
+ pub = rospy.Subscriber('/gazebo/link_states', LinkStates, self.simulated_link_state_callback, queue_size=10)
+
+ self.gazebo_world_to_ros_transform = None
+ self.original_blocks_poses_ = None
+ self.mutex = RLock()
+
+ TABLE_HEIGHT = -0.12
+ self.head_camera_helper = CameraHelper("head_camera", "base", TABLE_HEIGHT)
+ self.bridge = CvBridge()
+ self.block_pose_estimation_head_camera = None
+ self.table = Table()
+
+ self.hand_camera_helper = CameraHelper("right_hand_camera", "base", TABLE_HEIGHT)
+
+ if demo_constants.is_real_robot():
+ k = 3
+ for i in xrange(k):
+ for j in xrange(k):
+ q = tf.transformations.quaternion_from_euler(random.uniform(0, 2 * math.pi),
+ random.uniform(0, 2 * math.pi),
+ random.uniform(0, 2 * math.pi))
+
+ block = BlockState(id=str(len(self.gazebo_blocks)),
+ pose=Pose(position=Point(x=0.45 + j * 0.15 + random.uniform(-1, 1) * 0.03,
+ y=-0.15 + i * 0.15 + random.uniform(-1, 1) * 0.03,
+ z=0.7725),
+ orientation=Quaternion(x=q[0], y=q[1], z=q[2], w=q[3])))
+
+ self.gazebo_blocks.append(block)
+
+ def identify_block_from_aproximated_point(self, projected):
+ """
+ :param projected:
+ :return:
+ """
+ bestindex = -1
+ bestdist = sys.float_info.max
+ for index, b in enumerate(self.blocks):
+ p1 = b.pose.position
+ p2 = projected
+ dx = p1.x - p2[0]
+ dy = p1.y - p2[1]
+ dz = p1.z - p2[2]
+
+ dist = math.sqrt(dx * dx + dy * dy + dz * dz)
+
+ if dist < bestdist:
+ bestdist = dist
+ bestindex = index
+
+ if bestindex != -1:
+ return self.blocks[bestindex]
+ else:
+ return None
+
+ def compute_block_pose_estimation_from_arm_camera(self, CUBE_SIZE=150):
+ # get latest image from topic
+ rospy.sleep(0.3)
+ # Take picture
+ img_data = self.hand_camera_helper.take_single_picture()
+
+ # Convert to OpenCV format
+ cv_image = self.bridge.imgmsg_to_cv2(img_data, "bgr8")
+
+ camwtrans = None
+ camwrot = None
+
+ try:
+ (camwtrans, camwrot) = self.tf_listener.lookupTransform('/right_hand_camera_optical', '/base',
+ rospy.Time(0))
+ except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as ex:
+ rospy.logerr(ex.message)
+
+ rotmat = tf.transformations.quaternion_matrix(camwrot)
+ transmat = tf.transformations.translation_matrix((camwtrans))
+ zaxis = rotmat[2, :]
+
+ cameratransform = tf.transformations.concatenate_matrices(rotmat, transmat)
+
+ # empirically obtained with a drawing, and rviz... the optical z axis is the one that is looking fordware (world x)
+ # also weirdly the frame axis in the rotation matrix are rows instead of columns
+ camera_yaw_angle = math.atan2(zaxis[1], zaxis[0])
+
+ rospy.logwarn("camera angle:" + str(camera_yaw_angle * 180.0 / math.pi))
+ rospy.logwarn("camera rot:" + str(rotmat))
+ rospy.logwarn("zaxis camera vector:" + str(zaxis))
+ # Save for debugging
+ # cv2.imwrite("/tmp/debug.png", cv_image)
+
+ # Get cube rotation
+ detected_cubes_info = get_cubes_z_rotation(cv_image, CUBE_SIZE=CUBE_SIZE)
+ center = (cv_image.shape[1] / 2, cv_image.shape[0] / 2)
+
+ def cubedistToCenter(cube):
+ # ((370, 224), 26.0, True, False)
+ dx = cube[0][0] - center[0]
+ dy = cube[0][1] - center[1]
+
+ return dx * dx + dy * dy
+
+ sorted_center_cubes = sorted(detected_cubes_info, key=cubedistToCenter)
+
+ try:
+ cube = sorted_center_cubes[0]
+
+ image_cube_angle = cube[1] * (math.pi / 180.0)
+ graspA = cube[2]
+ graspB = cube[3]
+
+ rospy.logwarn("image detected cube angle: " + str(image_cube_angle))
+
+ final_cube_yaw_angle = camera_yaw_angle - image_cube_angle
+
+ while final_cube_yaw_angle > math.pi / 4:
+ final_cube_yaw_angle -= math.pi / 2
+
+ while final_cube_yaw_angle < -math.pi / 4:
+ final_cube_yaw_angle += math.pi / 2
+
+ # select the other grasping
+ if not graspA and graspB:
+ rospy.logwarn("Swiching grasping orientation for current block (grasping clearance)")
+ if final_cube_yaw_angle > 0:
+ final_cube_yaw_angle -= math.pi / 2
+ else:
+ final_cube_yaw_angle += math.pi / 2
+
+ projected = self.hand_camera_helper.project_point_on_table(cube[0])
+ poseq = tf.transformations.quaternion_from_euler(0, 0, final_cube_yaw_angle)
+
+ rospy.logwarn("quaternion angle:" + str(poseq))
+ self.tf_broacaster.sendTransform(projected, poseq, rospy.Time(0), "estimated_cube_1", "base")
+ rospy.logwarn(projected)
+
+ # cv2.imshow("cube detection", cv_image)
+ # cv2.waitKey(0)
+
+
+ return Pose(position=Point(x=projected[0], y=projected[1], z=projected[1]),
+ orientation=Quaternion(x=poseq[0], y=poseq[1], z=poseq[2], w=poseq[3])), graspA or graspB
+ except Exception as ex:
+ rospy.logwarn("erroneous cube detection")
+ # cv2.imshow("erroneus cube detection", cv_image)
+ # cv2.waitKey(0)
+ return None, False
+
+ def compute_block_pose_estimations_from_head_camera(self):
+ """
+ For each block updates:
+ - block.headview_pose_estimation
+ - block.hue_estimation
+ :return:
+ """
+ try:
+ self.mutex.acquire()
+ img_data = self.head_camera_helper.take_single_picture()
+
+ # Convert to OpenCV format
+ cv_image = self.bridge.imgmsg_to_cv2(img_data, "bgr8")
+ cv2.imwrite("/tmp/last_head_picture.jpg", cv_image)
+
+ rospy.logwarn("processing head camera image to find blocks")
+ blobs_info = get_blobs_info(cv_image)
+
+ index = 0
+ ptinfos = []
+ for huekey in blobs_info.keys():
+ points = blobs_info[huekey]
+ rospy.logwarn("blob position[%d]: %s" % (index, str(points)))
+ for point in points:
+ ptinfos.append([huekey, point])
+ index += 1
+
+ detected_blocks = []
+
+ for huekey, point2d in ptinfos:
+ projected = self.head_camera_helper.project_point_on_table(point2d)
+ rospy.logwarn("projected: %s" % str(projected))
+
+ block = self.identify_block_from_aproximated_point(projected)
+ if block is None:
+ continue
+
+ detected_blocks.append(block)
+
+ block.headview_proj_estimation = point2d
+
+ block.headview_proj_estimation = projected
+ block.hue_estimation = huekey
+ block.headview_pose_estimation = Pose(
+ position=Point(x=projected[0], y=projected[1], z=projected[2]),
+ orientation=Quaternion(x=0, y=0, z=0, w=1))
+
+ rospy.logwarn("blob identified: " + str(block))
+
+ rospy.logwarn("Table blocks:")
+ self.table.blocks = detected_blocks
+ for b in self.table.blocks:
+ rospy.logwarn(b)
+
+ self.blocks = self.table.blocks
+
+ for tray in self.trays:
+
+ rospy.logwarn("Tray blocks:")
+ for b in tray.blocks:
+ rospy.logwarn(b)
+
+ self.blocks = self.blocks + tray.blocks
+
+ rospy.logwarn("All known blocks:")
+ for b in self.blocks:
+ rospy.logwarn(b)
+ finally:
+ self.mutex.release()
+
+ def simulated_link_state_callback(self, links):
+ """
+ string[] name
+ geometry_msgs/Pose[] pose
+ geometry_msgs/Point position
+ float64 x
+ float64 y
+ float64 z
+ geometry_msgs/Quaternion orientation
+ float64 x
+ float64 y
+ float64 z
+ float64 w
+ geometry_msgs/Twist[] twist
+ geometry_msgs/Vector3 linear
+ float64 x
+ float64 y
+ float64 z
+ geometry_msgs/Vector3 angular
+ float64 x
+ float64 y
+ float64 z
+
+ :param links:
+ :return:
+ """
+ try:
+ self.mutex.acquire()
+ blocks = []
+ trays = []
+
+ base_index = [i for i, name in enumerate(links.name) if name == "sawyer::base"][0]
+ self.gazebo_world_to_ros_transform = links.pose[base_index]
+
+ for i, name in enumerate(links.name):
+ pose = links.pose[i]
+
+ if BlockState.is_block(name):
+ item = self.get_block_by_gazebo_id(name)
+ if item is None:
+ # rospy.logwarn("block create name: "+ name)
+ item = BlockState(id=name, pose=pose)
+ item.color = demo_constants.BLOCK_COLOR_MAPPINGS[item.num]["material"].replace("Gazebo/", "")
+ else:
+ item.pose = pose
+
+ blocks.append(item)
+ elif TrayState.is_tray(name):
+ item = self.get_tray_by_gazebo_id(name)
+ # item = None
+ if item is None:
+ item = TrayState(gazebo_id=name, pose=pose,
+ TRAY_SURFACE_THICKNESS=demo_constants.TRAY_SURFACE_THICKNESS)
+ item.color = demo_constants.TRAY_COLORS[item.num].replace("Gazebo/", "")
+ else:
+ item.pose = pose
+
+ trays.append(item)
+ else:
+ continue
+
+ # rospy.logwarn("simulated object state: " + name + " -> " + item.num)
+
+ self.gazebo_blocks = blocks
+ self.gazebo_trays = trays
+ except Exception as ex:
+ rospy.logerr(ex.message)
+ finally:
+ self.mutex.release()
+
+ def update(self):
+ """
+ this method basically double buffers the state of block and trays. It also publishes tf.
+ For the simulated case it copies from gazebo_blocks and gazebo_trays to blocks and trays
+ :return:
+ """
+
+ try:
+ self.mutex.acquire()
+
+ collections = [self.gazebo_blocks, self.gazebo_trays]
+
+ blocks = []
+ trays = []
+
+ if not demo_constants.is_real_robot():
+ # publish tfs
+ basehomopose = get_homo_matrix_from_pose_msg(self.gazebo_world_to_ros_transform, tag="base")
+
+ for items in collections:
+ for item in items:
+
+ # block homogeneous transform
+ homo_pose = get_homo_matrix_from_pose_msg(item.pose, tag="block")
+
+ # rospy.logwarn("TF PUBLISH..." + str(homo_pose))
+ # rospy.logwarn("item state: " + str(item))
+
+ transf_homopose = inverse_compose(basehomopose, homo_pose)
+
+ trans = tf.transformations.translation_from_matrix(transf_homopose)
+ quat = tf.transformations.quaternion_from_matrix(transf_homopose)
+
+ self.tf_broacaster.sendTransform(trans,
+ quat,
+ rospy.Time.now(),
+ item.gazebo_id,
+ "world")
+
+ item.gazebo_pose = homotransform_to_pose_msg(transf_homopose)
+
+ if isinstance(item, BlockState):
+ blocks.append(item)
+ elif isinstance(item, TrayState):
+ trays.append(item)
+ # else:
+ # rospy.logwarn("DETECTED ITEM:" + str(item))
+
+ self.blocks = blocks
+ self.trays = trays
+
+ # rospy.logwarn("GAZEBO blocks update lenght: %d"%len(self.gazebo_blocks))
+ # rospy.logwarn("blocks update lenght: %d"%len(self.blocks))
+ if self.original_blocks_poses_ is None:
+ self.original_blocks_poses_ = [copy.deepcopy(block.gazebo_pose) for block in blocks]
+
+ finally:
+ self.mutex.release()
+
+ def get_blocks(self):
+ """
+ :return array of (name, geometry_msgs.msg.Pose)
+ """
+ try:
+ self.mutex.acquire()
+ return [b for b in self.blocks]
+ finally:
+ self.mutex.release()
+
+ def get_block_by_gazebo_id(self, gazebo_id):
+ """
+ :param gazebo_id:
+ :return:
+ """
+ try:
+ self.mutex.acquire()
+
+ filtered_blocks = [block for block in self.blocks if block.gazebo_id == gazebo_id]
+ if len(filtered_blocks) == 0:
+ return None
+ else:
+ return filtered_blocks[0]
+ finally:
+ self.mutex.release()
+
+ def get_original_block_poses(self):
+ """
+ :return:
+ """
+ try:
+ self.mutex.acquire()
+ return [copy.deepcopy(p) for p in self.original_blocks_poses_]
+ finally:
+ self.mutex.release()
+
+ def get_tray_by_gazebo_id(self, gazebo_id):
+ """
+ :param gazebo_id:
+ :return:
+ """
+ try:
+ self.mutex.acquire()
+ filtered_trays = [tray for tray in self.trays if tray.gazebo_id == gazebo_id]
+ if len(filtered_trays) == 0:
+ return None
+ else:
+ return filtered_trays[0]
+ finally:
+ self.mutex.release()
+
+ def get_tray_by_color(self, color):
+ """
+ :param id:
+ :return:
+ """
+ color = color.replace("Gazebo/", "")
+ rospy.logwarn("by color: " + str(color))
+ rospy.logwarn("by color: " + str(self.trays))
+
+ filtered_trays = [tray for tray in self.trays if tray.color == color]
+ if len(filtered_trays) == 0:
+ return None
+ else:
+ return filtered_trays[0]
+
+ def get_tray_by_num(self, num):
+ """
+ :param num:
+ :return:
+ """
+ rospy.logwarn("by num: " + str(num))
+ rospy.logwarn("by trays: " + str([t.num for t in self.trays]))
+
+ filtered_trays = [tray for tray in self.trays if int(tray.num) == int(num)]
+ if len(filtered_trays) == 0:
+ return None
+ else:
+ return filtered_trays[0]
+
+ def get_trays(self):
+ """
+ :return array of (name, geometry_msgs.msg.Pose)
+ """
+ return self.trays
diff --git a/src/sorting_demo/src/sorting_demo/gazebo_models.py b/src/sorting_demo/src/sorting_demo/gazebo_models.py
new file mode 100644
index 0000000..32f43ab
--- /dev/null
+++ b/src/sorting_demo/src/sorting_demo/gazebo_models.py
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+import math
+import random
+import sys
+
+import rospkg
+import rospy
+import tf
+import xacro
+
+from geometry_msgs.msg import Pose, Point, Quaternion
+from gazebo_msgs.srv import SpawnModel, DeleteModel
+
+from demo_constants import BLOCK_COLOR_MAPPINGS, CUBE_EDGE_LENGTH
+
+
+def spawn_urdf(name, description_xml, pose, reference_frame):
+ rospy.wait_for_service('/gazebo/spawn_urdf_model')
+ try:
+ spawn_urdf = rospy.ServiceProxy('/gazebo/spawn_urdf_model', SpawnModel)
+ resp_urdf = spawn_urdf(name, description_xml, "/", pose, reference_frame)
+ except rospy.ServiceException as e:
+ rospy.logerr("Spawn URDF service call failed: {0}".format(e))
+
+
+def load_xacro_file(file_path, mappings):
+ urdf_doc = xacro.process_file(file_path, mappings=mappings)
+ urdf_xml = urdf_doc.toprettyxml(indent=' ', encoding='utf-8')
+ urdf_xml = urdf_xml.replace('\n', '')
+ return urdf_xml
+
+
+def spawn_xacro_urdf_model(name, path, pose, reference_frame, mappings):
+ description_xml = load_xacro_file(path, mappings)
+ spawn_urdf(name, description_xml, pose, reference_frame)
+
+def spawn_xacro_sdf_model(name, path, pose, reference_frame, mappings):
+ description_xml = load_xacro_file(path, mappings)
+ spawn_sdf(name, description_xml, pose, reference_frame)
+
+def spawn_urdf_model(name, path, pose, reference_frame):
+ description_xml = ''
+ with open(path, "r") as model_file:
+ description_xml = model_file.read().replace('\n', '')
+
+ spawn_urdf(name, description_xml, pose, reference_frame)
+
+
+def spawn_sdf(name, description_xml, pose, reference_frame):
+ rospy.wait_for_service('/gazebo/spawn_sdf_model')
+ try:
+ spawn_sdf = rospy.ServiceProxy('/gazebo/spawn_sdf_model', SpawnModel)
+ resp_sdf = spawn_sdf(name, description_xml, "/", pose, reference_frame)
+ except rospy.ServiceException as e:
+ rospy.logerr("Spawn SDF service call failed: {0}".format(e))
+
+def spawn_sdf_model(name, path, pose, reference_frame):
+ # Load Model SDF
+ description_xml = ''
+ with open(path, "r") as model_file:
+ description_xml = model_file.read().replace('\n', '')
+ spawn_sdf(name, description_xml, pose,reference_frame)
+
+
+
+def load_gazebo_models():
+ model_list = []
+
+ world_reference_frame = "world"
+
+ # sorting_demo model path
+ sorting_demo_models_path = rospkg.RosPack().get_path('sorting_demo') + "/models/"
+
+ # Spawn Blocks Table
+ blocks_table_name = "blocks_table"
+ blocks_table_path = sorting_demo_models_path + "table/model.sdf"
+ blocks_table_pose = Pose(position=Point(x=0.75, y=0.0, z=0.0))
+
+ spawn_sdf_model(blocks_table_name, blocks_table_path, blocks_table_pose, world_reference_frame)
+ model_list.append(blocks_table_name)
+
+ # Spawn Trays Table
+ trays_table_name = "trays_table"
+ trays_table_path = sorting_demo_models_path + "table/model.sdf"
+ trays_table_pose = Pose(position=Point(x=0.0, y=0.95, z=0.0))
+
+ spawn_sdf_model(trays_table_name, trays_table_path, trays_table_pose, world_reference_frame)
+ model_list.append(trays_table_name)
+
+ # Spawn trays
+ tray_path = sorting_demo_models_path + "tray/tray.urdf.xacro"
+
+ tray_poses = [Pose(position=Point(x=-0.3, y=0.7, z=0.7828)),
+ Pose(position=Point(x=0.0, y=0.7, z=0.7828)),
+ Pose(position=Point(x=0.3, y=0.7, z=0.7828))]
+
+ for (i, pose) in enumerate(tray_poses):
+ name = "tray{}".format(i)
+ spawn_xacro_urdf_model(name, tray_path, pose, world_reference_frame, {})
+ model_list.append(name)
+
+ # Spawn blocks
+ block_path = sorting_demo_models_path + "block/block.urdf.xacro"
+
+ block_poses = []
+ k = 3
+ for i in xrange(k):
+ for j in xrange(k):
+ q = tf.transformations.quaternion_from_euler(random.uniform(0, 2 * math.pi), random.uniform(0, 2 * math.pi),
+ random.uniform(0, 2 * math.pi))
+
+
+ block_poses.append(Pose(position=Point(x=0.45 + j * 0.15 + random.uniform(-1, 1) * 0.03,
+ y=-0.15 + i * 0.15 + random.uniform(-1, 1) * 0.03, z=0.7725),
+ orientation=Quaternion(x=q[0], y=q[1], z=q[2], w=q[3])))
+
+ #block_poses.append(Pose(position=Point(x= 0.45 + j*0.12 +random.uniform(-1, 1)*0.03 , y= -0.15 + i * 0.15 +random.uniform(-1, 1)*0.03, z=0.7725)))
+
+ """
+ Pose(position=Point(x=0.60, y=0.1265, z=0.7725)),
+ Pose(position=Point(x=0.80, y=0.12, z=0.7725)),
+ Pose(position=Point(x=0.60, y=-0.1, z=0.7725)),
+ Pose(position=Point(x=0.80, y=-0.1, z=0.7725)),
+ Pose(position=Point(x=0.4225, y=-0.1, z=0.7725)),
+ Pose(position=Point(x=0.60, y=-0.35, z=0.7725)),
+ Pose(position=Point(x=0.80, y=-0.35, z=0.7725)),
+ Pose(position=Point(x=0.4225, y=-0.35, z=0.7725))
+ """
+
+ for (i, (pose, color_mappings)) in enumerate(zip(block_poses, BLOCK_COLOR_MAPPINGS)):
+ name = "block{}".format(i)
+
+ mappings = {"edge_length" : str(CUBE_EDGE_LENGTH)}
+ mappings.update(color_mappings)
+
+ spawn_xacro_urdf_model(name, block_path, pose, world_reference_frame, mappings)
+ model_list.append(name)
+
+ return model_list, block_poses
+
+
+def delete_gazebo_models(model_list):
+ # This will be called on ROS Exit, deleting Gazebo models
+ # Do not wait for the Gazebo Delete Model service, since
+ # Gazebo should already be running. If the service is not
+ # available since Gazebo has been killed, it is fine to error out
+ try:
+ delete_model = rospy.ServiceProxy('/gazebo/delete_model', DeleteModel)
+
+ for model in model_list:
+ resp_delete = delete_model(model)
+ except rospy.ServiceException as e:
+ print("Delete Model service call failed: {0}".format(e))
diff --git a/src/sorting_demo/src/sorting_demo/goto_angles_in_contact.py b/src/sorting_demo/src/sorting_demo/goto_angles_in_contact.py
new file mode 100644
index 0000000..a2256ad
--- /dev/null
+++ b/src/sorting_demo/src/sorting_demo/goto_angles_in_contact.py
@@ -0,0 +1,281 @@
+#! /usr/bin/env python
+
+# Copyright (c) 2016-2018, Rethink Robotics Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from rospkg.distro import current_distro_codename
+
+import rospy
+import argparse
+from geometry_msgs.msg import Pose
+from intera_motion_interface import (
+ MotionTrajectory,
+ MotionWaypoint,
+ MotionWaypointOptions,
+ InteractionOptions
+)
+from intera_motion_msgs.msg import TrajectoryOptions
+from intera_interface import Limb
+from intera_motion_interface.utility_functions import int2bool
+
+
+def interaction_joint_trajectory(limb,
+ joint_angles,
+ trajType,
+ interaction_active,
+ interaction_control_mode,
+ interaction_frame,
+ speed_ratio,
+ accel_ratio,
+ K_impedance,
+ max_impedance,
+ in_endpoint_frame,
+ force_command,
+ K_nullspace,
+ endpoint_name,
+ timeout,
+ disable_damping_in_force_control,
+ disable_reference_resetting,
+ rotations_for_constrained_zeroG):
+ try:
+
+ traj = MotionTrajectory(limb=limb)
+
+ wpt_opts = MotionWaypointOptions(max_joint_speed_ratio=speed_ratio,
+ max_joint_accel=accel_ratio)
+
+ waypoint = MotionWaypoint(options = wpt_opts.to_msg(), limb = limb)
+
+ # one single waypoint
+ current_joint_angles = limb.joint_ordered_angles()
+ waypoint.set_joint_angles(joint_angles = current_joint_angles )
+ traj.append_waypoint(waypoint.to_msg())
+
+ if len(current_joint_angles ) != len(joint_angles):
+ rospy.logerr('The number of joint_angles must be %d', len(current_joint_angles ))
+ return None
+
+ # ----- testing intermediate points with real robot
+ middle_joint_angles = [ (current_joint_angles[i] + joint_angles[i])/2.0 for i in xrange(len(current_joint_angles))]
+ waypoint.set_joint_angles(joint_angles=middle_joint_angles)
+ traj.append_waypoint(waypoint.to_msg())
+
+ waypoint.set_joint_angles(joint_angles = joint_angles)
+ traj.append_waypoint(waypoint.to_msg())
+ # ----- end testing intermediate points with real robot
+
+ # set the interaction control options in the current configuration
+ interaction_options = InteractionOptions()
+ trajectory_options = TrajectoryOptions()
+ trajectory_options.interaction_control = True
+ trajectory_options.interpolation_type = trajType
+
+ interaction_options.set_interaction_control_active(int2bool(interaction_active))
+ interaction_options.set_K_impedance(K_impedance)
+ interaction_options.set_max_impedance(int2bool(max_impedance))
+ interaction_options.set_interaction_control_mode(interaction_control_mode)
+ interaction_options.set_in_endpoint_frame(int2bool(in_endpoint_frame))
+ interaction_options.set_force_command(force_command)
+ interaction_options.set_K_nullspace(K_nullspace)
+ interaction_options.set_endpoint_name(endpoint_name)
+ if len(interaction_frame) < 7:
+ rospy.logerr('The number of elements must be 7!')
+ elif len(interaction_frame) == 7:
+
+ quat_sum_square = interaction_frame[3]*interaction_frame[3] + interaction_frame[4]*interaction_frame[4] + interaction_frame[5]*interaction_frame[5] + interaction_frame[6]*interaction_frame[6]
+ if quat_sum_square < 1.0 + 1e-7 and quat_sum_square > 1.0 - 1e-7:
+ target_interaction_frame = Pose()
+ target_interaction_frame.position.x = interaction_frame[0]
+ target_interaction_frame.position.y = interaction_frame[1]
+ target_interaction_frame.position.z = interaction_frame[2]
+ target_interaction_frame.orientation.w = interaction_frame[3]
+ target_interaction_frame.orientation.x = interaction_frame[4]
+ target_interaction_frame.orientation.y = interaction_frame[5]
+ target_interaction_frame.orientation.z = interaction_frame[6]
+ interaction_options.set_interaction_frame(target_interaction_frame)
+ else:
+ rospy.logerr('Invalid input to quaternion! The quaternion must be a unit quaternion!')
+ else:
+ rospy.logerr('Invalid input to interaction_frame!')
+
+ interaction_options.set_disable_damping_in_force_control(disable_damping_in_force_control)
+ interaction_options.set_disable_reference_resetting(disable_reference_resetting)
+ interaction_options.set_rotations_for_constrained_zeroG(rotations_for_constrained_zeroG)
+
+ trajectory_options.interaction_params = interaction_options.to_msg()
+ traj.set_trajectory_options(trajectory_options)
+
+ result = traj.send_trajectory(timeout=timeout)
+ if result is None:
+ rospy.logerr('Trajectory FAILED to send!')
+ return
+
+ if result.result:
+ rospy.loginfo('Motion controller successfully finished the trajectory with interaction options set!')
+ else:
+ rospy.logerr('Motion controller failed to complete the trajectory with error %s',
+ result.errorId)
+
+ # print the resultant interaction options
+ rospy.loginfo('Interaction Options:\n%s', interaction_options.to_msg())
+
+ except rospy.ROSInterruptException:
+ rospy.logerr('Keyboard interrupt detected from the user. %s',
+ 'Exiting before trajectory completion.')
+
+
+def main():
+ """
+ Move the robot arm to the specified configuration
+ with the desired interaction control options.
+ Call using:
+ $ rosrun intera_examples go_to_joint_angles_in_contact.py [arguments: see below]
+
+ -q 0.0 0.0 0.0 0.0 0.0 0.0 0.0
+ --> Go to joint pose: 0.0 0.0 0.0 0.0 0.0 0.0 0.0 using default settings
+
+ -q 0.1 -0.2 0.15 -0.05 -0.08 0.14 -0.04 -s 0.1
+ --> Go to pose [...] with a speed ratio of 0.1
+
+ -q -0.2 0.1 0.1 0.2 -0.3 0.2 0.4 -s 0.9 -a 0.1
+ --> Go to pose [...] witha speed ratio of 0.9 and an accel ratio of 0.1
+
+ --trajType CARTESIAN
+ --> Use a Cartesian interpolated endpoint path to reach the goal
+
+ === Interaction Mode options ===
+
+ -st 1
+ --> Set the interaction controller state (1 for True, 0 for False) in the current configuration
+
+ -k 500.0 500.0 500.0 10.0 10.0 10.0
+ --> Set K_impedance to [500.0 500.0 500.0 10.0 10.0 10.0] in the current configuration
+
+ -m 0
+ --> Set max_impedance to False for all 6 directions in the current configuration
+
+ -m 1 1 0 1 1 1
+ --> Set max_impedance to [True True False True True True] in the current configuration
+
+ -kn 5.0 3.0 5.0 4.0 6.0 4.0 6.0
+ --> Set K_nullspace to [5.0 3.0 5.0 4.0 6.0 4.0 6.0] in the current configuration
+
+ -f 0.0 0.0 30.0 0.0 0.0 0.0
+ --> Set force_command to [0.0 0.0 30.0 0.0 0.0 0.0] in the current configuration
+
+ -ef
+ --> Set in_endpoint_frame to True in the current configuration
+
+ -en 'right_hand'
+ --> Specify the desired endpoint frame where impedance and force control behaviors are defined
+
+ -md 1
+ --> Set interaction_control_mode to impedance mode for all 6 directions in the current configuration
+ (1: impedance, 2: force, 3: impedance with force limit, 4: force with motion limit)
+
+ -md 1 1 2 1 1 1
+ --> Set interaction_control_mode to [impedance, impedance, force, impedance, impedance, impedance] in the current configuration
+ (1: impedance, 2: force, 3: impedance with force limit, 4: force with motion limit)
+ """
+
+ arg_fmt = argparse.RawDescriptionHelpFormatter
+ parser = argparse.ArgumentParser(formatter_class=arg_fmt,
+ description=main.__doc__)
+ parser.add_argument(
+ "-q", "--joint_angles", type=float,
+ nargs='+', default=[0.0, -0.9, 0.0, 1.8, 0.0, -0.9, 0.0],
+ help="A list of joint angles, one for each of the 7 joints, J0...J6")
+ parser.add_argument(
+ "-s", "--speed_ratio", type=float, default=0.1,
+ help="A value between 0.001 (slow) and 1.0 (maximum joint velocity)")
+ parser.add_argument(
+ "-a", "--accel_ratio", type=float, default=0.5,
+ help="A value between 0.001 (slow) and 1.0 (maximum joint accel)")
+ parser.add_argument(
+ "-t", "--trajType", type=str, default='JOINT',
+ choices=['JOINT', 'CARTESIAN'],
+ help="trajectory interpolation type")
+ parser.add_argument(
+ "-st", "--interaction_active", type=int, default=1, choices = [0, 1],
+ help="Activate (1) or Deactivate (0) interaction controller")
+ parser.add_argument(
+ "-k", "--K_impedance", type=float,
+ nargs='+', default=[1300.0, 1300.0, 1300.0, 30.0, 30.0, 30.0],
+ help="A list of desired stiffnesses, one for each of the 6 directions -- stiffness units are (N/m) for first 3 and (Nm/rad) for second 3 values")
+ parser.add_argument(
+ "-m", "--max_impedance", type=int,
+ nargs='+', default=[1, 1, 1, 1, 1, 1], choices = [0, 1],
+ help="A list of impedance modulation state, one for each of the 6 directions (a single value can be provided to apply the same value to all the directions) -- 0 for False, 1 for True")
+ parser.add_argument(
+ "-md", "--interaction_control_mode", type=int,
+ nargs='+', default=[1, 1, 1, 1, 1, 1], choices = [1,2,3,4],
+ help="A list of desired interaction control mode (1: impedance, 2: force, 3: impedance with force limit, 4: force with motion limit), one for each of the 6 directions")
+ parser.add_argument(
+ "-fr", "--interaction_frame", type=float,
+ nargs='+', default=[0, 0, 0, 1, 0, 0, 0],
+ help="Specify the reference frame for the interaction controller -- first 3 values are positions [m] and last 4 values are orientation in quaternion (w, x, y, z)")
+ parser.add_argument(
+ "-ef", "--in_endpoint_frame", action='store_true', default=False,
+ help="Set the desired reference frame to endpoint frame; otherwise, it is base frame by default")
+ parser.add_argument(
+ "-en", "--endpoint_name", type=str, default='right_hand',
+ help="Set the desired endpoint frame by its name; otherwise, it is right_hand frame by default")
+ parser.add_argument(
+ "-f", "--force_command", type=float,
+ nargs='+', default=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
+ help="A list of desired force commands, one for each of the 6 directions -- in force control mode this is the vector of desired forces/torques to be regulated in (N) and (Nm), in impedance with force limit mode this vector specifies the magnitude of forces/torques (N and Nm) that the command will not exceed")
+ parser.add_argument(
+ "-kn", "--K_nullspace", type=float,
+ nargs='+', default=[5.0, 10.0, 5.0, 10.0, 5.0, 10.0, 5.0],
+ help="A list of desired nullspace stiffnesses, one for each of the 7 joints (a single value can be provided to apply the same value to all the directions) -- units are in (Nm/rad)")
+ parser.add_argument(
+ "-dd", "--disable_damping_in_force_control", action='store_true', default=False,
+ help="Disable damping in force control")
+ parser.add_argument(
+ "-dr", "--disable_reference_resetting", action='store_true', default=False,
+ help="The reference signal is reset to actual position to avoid jerks/jumps when interaction parameters are changed. This option allows the user to disable this feature.")
+ parser.add_argument(
+ "-rc", "--rotations_for_constrained_zeroG", action='store_true', default=False,
+ help="Allow arbitrary rotational displacements from the current orientation for constrained zero-G (use only for a stationary reference orientation)")
+ parser.add_argument(
+ "--timeout", type=float, default=None,
+ help="Max time in seconds to complete motion goal before returning. None is interpreted as an infinite timeout.")
+
+ args = parser.parse_args(rospy.myargv()[1:])
+
+
+ rospy.init_node('go_to_joint_angles_in_contact_py')
+ limb = Limb()
+
+ interaction_joint_trajectory(limb,
+ joint_angles = args.joint_angles,
+ trajType= args.trajType,
+ interaction_control_mode= args.interaction_control_mode,
+ interaction_active= args.interaction_active,
+ interaction_frame= args.interaction_frame,
+ speed_ratio = args.speed_ratio,
+ accel_ratio = args.accel_ratio,
+ K_impedance= args.K_impedance,
+ max_impedance = args.max_impedance,
+ in_endpoint_frame = args.in_endpoint_frame,
+ force_command = args.force_command,
+ K_nullspace = args.K_nullspace,
+ endpoint_name = args.endpoint_name,
+ timeout = args.timeout,
+ disable_damping_in_force_control=args.disable_damping_in_force_control,
+ disable_reference_resetting = args.disable_reference_resetting ,
+ rotations_for_constrained_zeroG = args.rotations_for_constrained_zeroG
+ )
+
+if __name__ == '__main__':
+ main()
diff --git a/src/sorting_demo/src/sorting_demo/gripper_action_server.py b/src/sorting_demo/src/sorting_demo/gripper_action_server.py
new file mode 100755
index 0000000..0f1ad92
--- /dev/null
+++ b/src/sorting_demo/src/sorting_demo/gripper_action_server.py
@@ -0,0 +1,166 @@
+#! /usr/bin/env python
+
+from math import fabs
+
+import rospy
+
+import actionlib
+import demo_constants
+
+from control_msgs.msg import (
+ GripperCommandAction,
+ GripperCommandFeedback,
+ GripperCommandResult,
+)
+import real_gripper
+from real_gripper import PSGGripper
+import intera_interface
+
+
+class GripperActionServer(object):
+ def __init__(self):
+ self._ns = "/robot/gripper_action_server"
+
+ if demo_constants.is_real_robot():
+ self._gripper = PSGGripper() # intera_interface.Gripper()
+ else:
+ self._gripper = intera_interface.Gripper()
+
+ # Action Server
+ self._server = actionlib.SimpleActionServer(
+ self._ns,
+ GripperCommandAction,
+ execute_cb=self._on_gripper_action,
+ auto_start=False)
+ self._action_name = rospy.get_name()
+ self._server.start()
+ self._gripper.set_dead_zone("0.021")
+
+ # Action Feedback/Result
+ self.feedback_msg = GripperCommandFeedback()
+ self.result_msg = GripperCommandResult()
+
+ # Initialize Parameters
+ self._timeout = 5.0
+
+ def _update_feedback(self, position):
+ self.feedback_msg.position = self._gripper.get_position()
+ self.feedback_msg.effort = self._gripper.get_force()
+ self.feedback_msg.stalled = False
+ self.feedback_msg.reached_goal = False
+
+ self._server.publish_feedback(self.feedback_msg)
+
+ def _command_gripper_update(self, position, is_close_goal):
+ if is_close_goal:
+ self._gripper.close()
+ rospy.logwarn("cmd: close")
+ else:
+ self._gripper.open()
+ rospy.logwarn("cmd: open")
+
+ # self._gripper.set_position(position)
+
+ def check_success(self, position, close_goal):
+
+ rospy.logwarn("gripping force: " + str(self._gripper.get_force()))
+ rospy.logwarn("gripper position: " + str(self._gripper.get_position()))
+ rospy.logwarn("gripper position deadzone: " + str(self._gripper.get_dead_zone()))
+
+ if not self._gripper.is_moving():
+ success = True
+ else:
+ success = False
+
+ # success = fabs(self._gripper.get_position() - position) < self._gripper.get_dead_zone()
+
+
+ rospy.logwarn("gripping success: " + str(success))
+
+ return success
+
+ def _on_gripper_action(self, goal):
+ # Store position and effort from call
+ # Position to 0:100 == close:open
+ position = goal.command.position
+ effort = goal.command.max_effort
+
+ # Check for errors
+ if self._gripper.has_error():
+ rospy.logerr("%s: Gripper error - please restart action server." %
+ (self._action_name,))
+ self._server.set_aborted()
+
+ is_close_goal = position < 0.02
+
+ # Reset feedback/result
+ self._update_feedback(position)
+
+ # 20 Hz gripper state rate
+ control_rate = rospy.Rate(20.0)
+
+ # Record start time
+ start_time = rospy.get_time()
+
+ def now_from_start(start):
+ return rospy.get_time() - start
+
+ self._command_gripper_update(position, is_close_goal)
+ rospy.sleep(0.2)
+
+ # Continue commanding goal until success or timeout
+ while ((now_from_start(start_time) < self._timeout or
+ self._timeout < 0.0) and not rospy.is_shutdown()):
+ if self._server.is_preempt_requested():
+ self._gripper.stop()
+ rospy.loginfo("%s: Gripper Action Preempted" %
+ (self._action_name,))
+ self._server.set_preempted(self.result_msg)
+ return
+ self._update_feedback(position)
+ if self.check_success(position, is_close_goal):
+ self._server.set_succeeded(self.result_msg)
+ return
+ self._command_gripper_update(position, is_close_goal)
+ control_rate.sleep()
+
+ # Gripper failed to achieve goal before timeout/shutdown
+ self._gripper.stop()
+ if not rospy.is_shutdown():
+ rospy.logerr("%s: Gripper Command Not Achieved in Allotted Time" %
+ (self._action_name,))
+ self._update_feedback(position)
+ self._server.set_aborted(self.result_msg)
+
+
+import argparse
+
+import rospy
+
+from dynamic_reconfigure.server import Server
+
+
+def start_server(gripper):
+ print("Initializing node... ")
+ rospy.init_node("rsdk_gripper_action_server%s" %
+ ("" if gripper == 'both' else "_" + gripper,))
+ print("Initializing gripper action server...")
+
+ gas = GripperActionServer()
+
+ print("Running. Ctrl-c to quit")
+ rospy.spin()
+
+
+def main():
+ arg_fmt = argparse.ArgumentDefaultsHelpFormatter
+ parser = argparse.ArgumentParser(formatter_class=arg_fmt)
+ parser.add_argument("-g", "--gripper", dest="gripper", default="both",
+ choices=['both', 'left', 'right'],
+ help="gripper action server limb", )
+ args = parser.parse_args(rospy.myargv()[1:])
+ start_server(args.gripper)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/src/sorting_demo/src/sorting_demo/program.py b/src/sorting_demo/src/sorting_demo/program.py
new file mode 100755
index 0000000..47df9cd
--- /dev/null
+++ b/src/sorting_demo/src/sorting_demo/program.py
@@ -0,0 +1,58 @@
+#!/usr/bin/python
+import functools
+import sys
+import rospy
+
+from task_planner import TaskPlanner
+import demo_constants
+import gazebo_models
+
+
+def main():
+ rospy.init_node("sorting_demo")
+ rospy.logwarn("Hello world")
+
+ if not demo_constants.is_real_robot():
+ # Load Gazebo Models via Spawning Services
+ # Note that the models reference is the /world frame
+ # and the IK operates with respect to the /base frame
+ model_list, original_block_poses = gazebo_models.load_gazebo_models()
+ # Remove models from the scene on shutdown
+ rospy.on_shutdown(functools.partial(gazebo_models.delete_gazebo_models, model_list))
+
+ rospy.logwarn("Creating task planner")
+
+ task_planner = TaskPlanner()
+
+ rospy.logwarn("Hello world")
+
+ task_planner.create_go_home_task(check_obstacles=False).result()
+
+ task_facade = task_planner.get_task_facade()
+
+ task_facade.start()
+
+ task_facade.run_rest_server()
+
+ # rospy.sleep(15)
+ # task_facade.pick_block_by_color("BLUE")
+ # task_facade.put_block_into_tray("BLUE", "1")
+
+ # task_facade.put_all_contents_on_table()
+
+ # task_facade.pause()
+
+ # rospy.sleep(30)
+ # task_facade.resume()
+
+ # task_facade.stop("GREEN")
+
+ rospy.logwarn("task planner spin")
+ task_planner.spin()
+
+ # ask the robot to greet
+ # task_facade.greet()
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/src/sorting_demo/src/sorting_demo/real_gripper.py b/src/sorting_demo/src/sorting_demo/real_gripper.py
new file mode 100644
index 0000000..adb6dc6
--- /dev/null
+++ b/src/sorting_demo/src/sorting_demo/real_gripper.py
@@ -0,0 +1,32 @@
+
+
+class PSGGripper():
+ def __init__(self):
+ pass
+
+ def get_position(self):
+ return 0
+
+ def get_force(self):
+ return 0
+
+ def set_dead_zone(self, v):
+ return 0
+
+ def get_dead_zone(self):
+ return 0
+
+ def is_moving(self):
+ return 0
+
+ def has_error(self):
+ return False
+
+ def stop(self):
+ pass
+
+ def close(self):
+ pass
+
+ def open(self):
+ pass
\ No newline at end of file
diff --git a/src/sorting_demo/src/sorting_demo/reinforcement_learning/task_space_simulation.py b/src/sorting_demo/src/sorting_demo/reinforcement_learning/task_space_simulation.py
new file mode 100644
index 0000000..aa4eeb0
--- /dev/null
+++ b/src/sorting_demo/src/sorting_demo/reinforcement_learning/task_space_simulation.py
@@ -0,0 +1,62 @@
+import gym
+from gym import utils
+from gym.envs.toy_text import discrete
+import numpy as np
+
+class PositionEnv(discrete.DiscreteEnv):
+ """
+ Grid 5x5 world: 25 slots 4 posibles values (B,G,R,-)
+ 25^4 = 390625 states (390K)
+
+ Actions: 25 (pick or place)
+
+ The action policy is going to be: 390K
+
+ """
+
+ def __init__(self, nS, nA, P, isd):
+ super(PositionEnv, self).__init__(nS, nA, P, isd)
+
+ self.desc = np.array()
+ self.rows = 5
+ self.cols = 5
+ self.values = 3 + 1
+
+ self.states = (self.rows*self.cols)^(self.values)
+ self.actions = self.rows* self.cols
+
+
+ # transition model
+ # {action: [(probability, nextstate, reward, done)]}
+ self.P = {s: {a: [] for a in range(self.actions)} for s in range(self.states)}
+
+
+ def is_holand_flag(self, ):
+ pass
+
+ def encode(self):
+ pass
+
+
+ def step(self,action):
+ pass
+
+
+ def render(self):
+ pass
+
+
+def encode_state_aux(i, values, slots):
+ if slots == 1:
+ return [i]
+ else:
+ current = i % values
+ return encode_state_aux(i / values, values, slots - 1) + [current]
+
+slots = 16
+values = 4
+
+count = values**slots
+
+for i in xrange(count):
+ print str(i)+ ":" + str(encode_state_aux(i, values, slots))
\ No newline at end of file
diff --git a/src/sorting_demo/src/sorting_demo/robot_control.py b/src/sorting_demo/src/sorting_demo/robot_control.py
new file mode 100755
index 0000000..e9de81e
--- /dev/null
+++ b/src/sorting_demo/src/sorting_demo/robot_control.py
@@ -0,0 +1,103 @@
+#!/usr/bin/python
+import copy
+from datetime import time
+
+import intera_interface
+import rospy
+from geometry_msgs.msg import (
+ Pose,
+)
+
+from sorting_demo import demo_constants
+from real_gripper import PSGGripper
+
+class SawyerRobotControl(object):
+ def __init__(self, trajectory_planner, limb="right", hover_distance=0.08, tip_name="right_gripper_tip"):
+ """
+ :param limb:
+ :param hover_distance:
+ :param tip_name:
+ """
+ self.trajectory_planner = trajectory_planner
+ self._limb_name = limb # string
+ self._tip_name = tip_name # string
+ self._hover_distance = hover_distance # in meters
+ self._limb = intera_interface.Limb(limb)
+
+ if demo_constants.is_real_robot():
+ self._gripper =PSGGripper()
+ else:
+ self._gripper = intera_interface.Gripper()
+
+ self._robot_enable = intera_interface.RobotEnable()
+
+ # verify robot is enabled
+ print("Getting robot state... ")
+ self._rs = intera_interface.RobotEnable(intera_interface.CHECK_VERSION)
+ self._init_state = self._rs.state().enabled
+ print("Enabling robot... ")
+ self._rs.enable()
+
+ """
+ def move_to_start(self, start_angles=None):
+ '''
+ :param start_angles:
+ :return:
+ '''
+ print("Moving the {0} arm to start pose...".format(self._limb_name))
+ if not start_angles:
+ start_angles = dict(zip(self._joint_names, [0] * 7))
+ self._guarded_move_to_joint_position(start_angles)
+ self.gripper_open()
+ """
+
+
+
+ def gripper_open(self):
+ """
+ :return:
+ """
+ rospy.logwarn("OPENING GRIPPER")
+ self._gripper.open()
+ while self._gripper.is_moving() and not rospy.is_shutdown():
+ rospy.sleep(0.4)
+
+ def gripper_close(self):
+ """
+ :return:
+ """
+ rospy.logwarn("CLOSING GRIPPER")
+ self._gripper.close()
+
+ while self._gripper.is_moving() and not rospy.is_shutdown():
+ rospy.sleep(0.1)
+
+ def _guarded_move_to_joint_position(self, joint_angles, timeout=5.0):
+ """
+ :param joint_angles:
+ :param timeout:
+ :return:
+ """
+ if rospy.is_shutdown():
+ return
+ if joint_angles:
+ self._limb.move_to_joint_positions(joint_angles, timeout=timeout)
+ else:
+ rospy.logerr("No Joint Angles provided for move_to_joint_positions. Staying put.")
+
+
+
+
+ def disable(self):
+ """
+
+ :return:
+ """
+ self._robot_enable.disable()
+
+ def enable(self):
+ """
+
+ :return:
+ """
+ self._robot_enable.enable()
diff --git a/src/sorting_demo/src/sorting_demo/robot_funcs.py b/src/sorting_demo/src/sorting_demo/robot_funcs.py
new file mode 100644
index 0000000..31f2507
--- /dev/null
+++ b/src/sorting_demo/src/sorting_demo/robot_funcs.py
@@ -0,0 +1,18 @@
+import math
+import rospy
+
+
+def force_joint_limits(jntspos):
+ joint_limits = [(-3.0503, 3.0503), (-3.8095, 2.2736), (-3.0426, 3.0426), (-3.0439, 3.0439)]
+
+ rospy.logwarn(jntspos)
+ for i in xrange(len(joint_limits)):
+ lower = joint_limits[i][0]
+ upper = joint_limits[i][1]
+ if jntspos[i] < lower and (jntspos[i] + 2.0 * math.pi) < upper:
+ jntspos[i] = jntspos[i] + 2 * math.pi
+
+ if jntspos[i] > upper and (jntspos[i] - 2.0 * math.pi) > lower:
+ jntspos[i] = jntspos[i] - 2 * math.pi
+
+ return jntspos
\ No newline at end of file
diff --git a/src/sorting_demo/src/sorting_demo/robot_tasks_facade.py b/src/sorting_demo/src/sorting_demo/robot_tasks_facade.py
new file mode 100644
index 0000000..38b805b
--- /dev/null
+++ b/src/sorting_demo/src/sorting_demo/robot_tasks_facade.py
@@ -0,0 +1,266 @@
+import flask
+import rospy
+from flask import Flask
+
+
+class RobotTaskFacade:
+ """
+ class specially designed to control the robot via voice commands. Some examples could be:
+ 'Hello robot'
+ 'Hey robot! What task are you doing now?'
+ 'Hey robot! Put all red pieces on the tray 2'
+ 'Hey robot! Pause your work'
+ 'Hey robot! Give me a red brick'
+ 'Hey robot! Stop sorting green pieces'
+ 'Hey robot! restart your work'
+ 'Hey robot! What is the color of the piece you are grabbing'
+ 'Hey robot! Put all tray contents on the table'
+ """
+
+ def __init__(self, task_planner):
+ self.task_planner = task_planner
+
+ self.app = Flask(__name__)
+
+ @self.app.route("/state")
+ def get_state():
+ try:
+ return flask.json.jsonify(response="ACK", result=self.get_state())
+ except Exception as ex:
+ return flask.json.jsonify(response="ERR", message=ex.message)
+
+ @self.app.route("/current_task")
+ def get_current_task():
+ try:
+ return flask.json.jsonify(response= "ACK", result = self.get_current_task())
+ except Exception as ex:
+ return flask.json.jsonify(response= "ERR", message = ex.message)
+
+ @self.app.route("/count_table_pieces/