diff --git a/src/sorting_demo/launch/robot_sorting_demo_control.launch b/src/sorting_demo/launch/robot_sorting_demo_control.launch
index c925bd6..bea305d 100644
--- a/src/sorting_demo/launch/robot_sorting_demo_control.launch
+++ b/src/sorting_demo/launch/robot_sorting_demo_control.launch
@@ -12,6 +12,6 @@
-
+
\ No newline at end of file
diff --git a/src/sorting_demo/src/sorting_demo/concepts/block.py b/src/sorting_demo/src/sorting_demo/concepts/block.py
index 717c4fa..bf5df3f 100644
--- a/src/sorting_demo/src/sorting_demo/concepts/block.py
+++ b/src/sorting_demo/src/sorting_demo/concepts/block.py
@@ -1,19 +1,24 @@
import re
from rospy_message_converter import message_converter
-
+import demo_constants
class BlockState:
regex = re.compile(r'block(\d+)\.*')
- def __init__(self, gazebo_id, pose):
+ def __init__(self, id, pose):
self.perception_id = None
- self.gazebo_id = gazebo_id
+ self.gazebo_id = id
self.pose = pose
self.color = None
self.homogeneous_transform = None
- search = BlockState.regex.search(gazebo_id)
- self.num = int(search.group(1))
+
+ if not demo_constants.REAL_ROBOT:
+ search = BlockState.regex.search(id)
+ self.num = int(search.group(1))
+ else:
+ self.num = int(id)
+
self.gazebo_pose = None
self.grasp_pose = None
self.place_pose = None
diff --git a/src/sorting_demo/src/sorting_demo/environment_estimation.py b/src/sorting_demo/src/sorting_demo/environment_estimation.py
index 8737f1a..5ee0ef4 100644
--- a/src/sorting_demo/src/sorting_demo/environment_estimation.py
+++ b/src/sorting_demo/src/sorting_demo/environment_estimation.py
@@ -1,5 +1,6 @@
#!/usr/bin/python
import copy
+import random
import re
import math
@@ -13,7 +14,6 @@ from concepts.block import BlockState
from concepts.tray import TrayState
from concepts.table import Table
-
from cv_detection_head import CameraHelper, get_blobs_info
from cv_detection_right_hand import get_cubes_z_rotation
@@ -51,6 +51,22 @@ class EnvironmentEstimation:
self.hand_camera_helper = CameraHelper("right_hand_camera", "base", TABLE_HEIGHT)
+ if demo_constants.REAL_ROBOT:
+ k = 3
+ for i in xrange(k):
+ for j in xrange(k):
+ q = tf.transformations.quaternion_from_euler(random.uniform(0, 2 * math.pi),
+ random.uniform(0, 2 * math.pi),
+ random.uniform(0, 2 * math.pi))
+
+ block = BlockState(id=str(len(self.gazebo_blocks)),
+ pose=Pose(position=Point(x=0.45 + j * 0.15 + random.uniform(-1, 1) * 0.03,
+ y=-0.15 + i * 0.15 + random.uniform(-1, 1) * 0.03,
+ z=0.7725),
+ orientation=Quaternion(x=q[0], y=q[1], z=q[2], w=q[3])))
+
+ self.gazebo_blocks.append(block)
+
def identify_block_from_aproximated_point(self, projected):
"""
:param projected:
@@ -77,7 +93,7 @@ class EnvironmentEstimation:
return None
def compute_block_pose_estimation_from_arm_camera(self, CUBE_SIZE=150):
- #get latest image from topic
+ # get latest image from topic
rospy.sleep(0.3)
# Take picture
img_data = self.hand_camera_helper.take_single_picture()
@@ -143,7 +159,7 @@ class EnvironmentEstimation:
# select the other grasping
if not graspA and graspB:
rospy.logwarn("Swiching grasping orientation for current block (grasping clearance)")
- if final_cube_yaw_angle >0:
+ if final_cube_yaw_angle > 0:
final_cube_yaw_angle -= math.pi / 2
else:
final_cube_yaw_angle += math.pi / 2
@@ -155,16 +171,16 @@ class EnvironmentEstimation:
self.tf_broacaster.sendTransform(projected, poseq, rospy.Time(0), "estimated_cube_1", "base")
rospy.logwarn(projected)
- #cv2.imshow("cube detection", cv_image)
- #cv2.waitKey(0)
+ # cv2.imshow("cube detection", cv_image)
+ # cv2.waitKey(0)
return Pose(position=Point(x=projected[0], y=projected[1], z=projected[1]),
- orientation=Quaternion(x=poseq[0], y=poseq[1], z=poseq[2], w=poseq[3])),graspA or graspB
+ orientation=Quaternion(x=poseq[0], y=poseq[1], z=poseq[2], w=poseq[3])), graspA or graspB
except Exception as ex:
rospy.logwarn("erroneous cube detection")
- #cv2.imshow("erroneus cube detection", cv_image)
- #cv2.waitKey(0)
+ # cv2.imshow("erroneus cube detection", cv_image)
+ # cv2.waitKey(0)
return None, False
def compute_block_pose_estimations_from_head_camera(self):
@@ -180,7 +196,7 @@ class EnvironmentEstimation:
# Convert to OpenCV format
cv_image = self.bridge.imgmsg_to_cv2(img_data, "bgr8")
- cv2.imwrite("/tmp/last_head_picture.jpg",cv_image)
+ cv2.imwrite("/tmp/last_head_picture.jpg", cv_image)
rospy.logwarn("processing head camera image to find blocks")
blobs_info = get_blobs_info(cv_image)
@@ -278,7 +294,7 @@ class EnvironmentEstimation:
item = self.get_block_by_gazebo_id(name)
if item is None:
# rospy.logwarn("block create name: "+ name)
- item = BlockState(gazebo_id=name, pose=pose)
+ item = BlockState(id=name, pose=pose)
item.color = demo_constants.BLOCK_COLOR_MAPPINGS[item.num]["material"].replace("Gazebo/", "")
else:
item.pose = pose
@@ -288,7 +304,8 @@ class EnvironmentEstimation:
item = self.get_tray_by_gazebo_id(name)
# item = None
if item is None:
- item = TrayState(gazebo_id=name, pose=pose, TRAY_SURFACE_THICKNESS= demo_constants.TRAY_SURFACE_THICKNESS)
+ item = TrayState(gazebo_id=name, pose=pose,
+ TRAY_SURFACE_THICKNESS=demo_constants.TRAY_SURFACE_THICKNESS)
item.color = demo_constants.TRAY_COLORS[item.num].replace("Gazebo/", "")
else:
item.pose = pose
@@ -321,45 +338,46 @@ class EnvironmentEstimation:
blocks = []
trays = []
- # publish tfs
- basehomopose = get_homo_matrix_from_pose_msg(self.gazebo_world_to_ros_transform, tag="base")
+ if not demo_constants.REAL_ROBOT:
+ # publish tfs
+ basehomopose = get_homo_matrix_from_pose_msg(self.gazebo_world_to_ros_transform, tag="base")
- for items in collections:
- for item in items:
+ for items in collections:
+ for item in items:
- # block homogeneous transform
- homo_pose = get_homo_matrix_from_pose_msg(item.pose, tag="block")
+ # block homogeneous transform
+ homo_pose = get_homo_matrix_from_pose_msg(item.pose, tag="block")
- # rospy.logwarn("TF PUBLISH..." + str(homo_pose))
- # rospy.logwarn("item state: " + str(item))
+ # rospy.logwarn("TF PUBLISH..." + str(homo_pose))
+ # rospy.logwarn("item state: " + str(item))
- transf_homopose = inverse_compose(basehomopose, homo_pose)
+ transf_homopose = inverse_compose(basehomopose, homo_pose)
- trans = tf.transformations.translation_from_matrix(transf_homopose)
- quat = tf.transformations.quaternion_from_matrix(transf_homopose)
+ trans = tf.transformations.translation_from_matrix(transf_homopose)
+ quat = tf.transformations.quaternion_from_matrix(transf_homopose)
- self.tf_broacaster.sendTransform(trans,
- quat,
- rospy.Time.now(),
- item.gazebo_id,
- "world")
+ self.tf_broacaster.sendTransform(trans,
+ quat,
+ rospy.Time.now(),
+ item.gazebo_id,
+ "world")
- item.gazebo_pose = homotransform_to_pose_msg(transf_homopose)
+ item.gazebo_pose = homotransform_to_pose_msg(transf_homopose)
- if isinstance(item, BlockState):
- blocks.append(item)
- elif isinstance(item, TrayState):
- trays.append(item)
- # else:
- # rospy.logwarn("DETECTED ITEM:" + str(item))
+ if isinstance(item, BlockState):
+ blocks.append(item)
+ elif isinstance(item, TrayState):
+ trays.append(item)
+ # else:
+ # rospy.logwarn("DETECTED ITEM:" + str(item))
- self.blocks = blocks
- self.trays = trays
+ self.blocks = blocks
+ self.trays = trays
- # rospy.logwarn("GAZEBO blocks update lenght: %d"%len(self.gazebo_blocks))
- # rospy.logwarn("blocks update lenght: %d"%len(self.blocks))
- if self.original_blocks_poses_ is None:
- self.original_blocks_poses_ = [copy.deepcopy(block.gazebo_pose) for block in blocks]
+ # rospy.logwarn("GAZEBO blocks update lenght: %d"%len(self.gazebo_blocks))
+ # rospy.logwarn("blocks update lenght: %d"%len(self.blocks))
+ if self.original_blocks_poses_ is None:
+ self.original_blocks_poses_ = [copy.deepcopy(block.gazebo_pose) for block in blocks]
finally:
self.mutex.release()
@@ -420,7 +438,6 @@ class EnvironmentEstimation:
:param id:
:return:
"""
-
color = color.replace("Gazebo/", "")
rospy.logwarn("by color: " + str(color))
rospy.logwarn("by color: " + str(self.trays))
diff --git a/src/sorting_demo/src/sorting_demo/gripper_action_server.py b/src/sorting_demo/src/sorting_demo/gripper_action_server.py
index 73c6b7b..5efbc1a 100755
--- a/src/sorting_demo/src/sorting_demo/gripper_action_server.py
+++ b/src/sorting_demo/src/sorting_demo/gripper_action_server.py
@@ -11,14 +11,15 @@ from control_msgs.msg import (
GripperCommandFeedback,
GripperCommandResult,
)
-
+import real_gripper
+from real_gripper import PSGGripper
import intera_interface
class GripperActionServer(object):
def __init__(self):
self._ns = "/robot/gripper_action_server"
- self._gripper = intera_interface.Gripper()
+ self._gripper = PSGGripper() # intera_interface.Gripper()
# Action Server
self._server = actionlib.SimpleActionServer(
diff --git a/src/sorting_demo/src/sorting_demo/program.py b/src/sorting_demo/src/sorting_demo/program.py
index fe4eacb..743aa56 100755
--- a/src/sorting_demo/src/sorting_demo/program.py
+++ b/src/sorting_demo/src/sorting_demo/program.py
@@ -5,19 +5,23 @@ import sys
import rospy
import gazebo_models
from task_planner import TaskPlanner
-
+import demo_constants
def main():
rospy.init_node("sorting_demo")
- # Load Gazebo Models via Spawning Services
- # Note that the models reference is the /world frame
- # and the IK operates with respect to the /base frame
- model_list, original_block_poses = gazebo_models.load_gazebo_models()
- # Remove models from the scene on shutdown
+ rospy.logwarn("Hello world")
- rospy.on_shutdown(functools.partial(gazebo_models.delete_gazebo_models, model_list))
+ if not demo_constants.REAL_ROBOT:
+ # Load Gazebo Models via Spawning Services
+ # Note that the models reference is the /world frame
+ # and the IK operates with respect to the /base frame
+ model_list, original_block_poses = gazebo_models.load_gazebo_models()
+ # Remove models from the scene on shutdown
+ rospy.on_shutdown(functools.partial(gazebo_models.delete_gazebo_models, model_list))
+
+ rospy.logwarn("Creating task planner")
task_planner = TaskPlanner()
diff --git a/src/sorting_demo/src/sorting_demo/real_gripper.py b/src/sorting_demo/src/sorting_demo/real_gripper.py
new file mode 100644
index 0000000..adb6dc6
--- /dev/null
+++ b/src/sorting_demo/src/sorting_demo/real_gripper.py
@@ -0,0 +1,32 @@
+
+
+class PSGGripper():
+ def __init__(self):
+ pass
+
+ def get_position(self):
+ return 0
+
+ def get_force(self):
+ return 0
+
+ def set_dead_zone(self, v):
+ return 0
+
+ def get_dead_zone(self):
+ return 0
+
+ def is_moving(self):
+ return 0
+
+ def has_error(self):
+ return False
+
+ def stop(self):
+ pass
+
+ def close(self):
+ pass
+
+ def open(self):
+ pass
\ No newline at end of file
diff --git a/src/sorting_demo/src/sorting_demo/robot_control.py b/src/sorting_demo/src/sorting_demo/robot_control.py
index 65cd818..e4c1f40 100755
--- a/src/sorting_demo/src/sorting_demo/robot_control.py
+++ b/src/sorting_demo/src/sorting_demo/robot_control.py
@@ -8,6 +8,8 @@ from geometry_msgs.msg import (
Pose,
)
+from sorting_demo import demo_constants
+from real_gripper import PSGGripper
class SawyerRobotControl(object):
def __init__(self, trajectory_planner, limb="right", hover_distance=0.08, tip_name="right_gripper_tip"):
@@ -21,7 +23,12 @@ class SawyerRobotControl(object):
self._tip_name = tip_name # string
self._hover_distance = hover_distance # in meters
self._limb = intera_interface.Limb(limb)
- self._gripper = intera_interface.Gripper()
+
+ if demo_constants.REAL_ROBOT:
+ self._gripper =PSGGripper()
+ else:
+ self._gripper = intera_interface.Gripper()
+
self._robot_enable = intera_interface.RobotEnable()
# verify robot is enabled
diff --git a/src/sorting_demo/src/sorting_demo/task_planner.py b/src/sorting_demo/src/sorting_demo/task_planner.py
index d731dd7..2fbfce5 100644
--- a/src/sorting_demo/src/sorting_demo/task_planner.py
+++ b/src/sorting_demo/src/sorting_demo/task_planner.py
@@ -29,10 +29,12 @@ class TaskPlanner:
def __init__(self):
"""
"""
+ rospy.logwarn("creating task planner")
limbname = 'right'
self._hover_distance = 0.1 # meters
self.place_hover_distance = 0.15
+ rospy.logwarn("creating environment estimation")
# subcomponents
self.environment_estimation = EnvironmentEstimation()
@@ -44,6 +46,8 @@ class TaskPlanner:
self.ikservice = rospy.ServiceProxy("/sawyer_ik_5d_node/ik", moveit_msgs.srv.GetPositionIK)
+ rospy.logwarn("creating task planner")
+
self.cancel_signal = False
self.pause_flag = False
@@ -64,6 +68,16 @@ class TaskPlanner:
self.joint_names = ["right_j0", "right_j1", "right_j2", "right_j3", "right_j4", "right_j5", "right_j6"]
self.gripper_state = GripperState()
+ self.head_pose_joints = {'right_j0': 0.0,
+ 'right_j1': -numpy.pi / 2.0,
+ 'right_j2': 0.0,
+ 'right_j3': numpy.pi / 2.0,
+ 'right_j4': 0.0,
+ 'right_j5': 0.0,
+ 'right_j6': 0.0}
+
+ self.starting_joint_angles = self.head_pose_joints
+
def scheduler_yield(self):
rospy.logwarn("scheduler yield")
if self.cancel_signal:
@@ -314,13 +328,7 @@ class TaskPlanner:
oldceil = self.trajectory_planner.ceilheight
- joint_angles = {'right_j0': 0.0,
- 'right_j1': -numpy.pi / 2.0,
- 'right_j2': 0.0,
- 'right_j3': numpy.pi / 2.0,
- 'right_j4': 0.0,
- 'right_j5': 0.0,
- 'right_j6': 0.0}
+ joint_angles = self.head_pose_joints
self.trajectory_planner.ceilheight = 2.0