Created
October 10, 2020 02:58
-
-
Save yenchenlin/bc2864e0d48225a00696cc84960ff8f7 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/usr/bin/env python | |
import socket | |
import struct | |
import time | |
import threading | |
import os | |
import sys | |
import math | |
import numpy as np | |
import cv2 | |
from robots import UR5 | |
from cameras import RealSense | |
import pybullet as p | |
import utils_3d as utils | |
from skimage import measure | |
from scipy import optimize | |
import matplotlib.pyplot as plt | |
import utils_3d | |
class RealUR5Env(object): | |
def __init__(self, args): | |
self.is_sim = False | |
self.task = args.task | |
self.last_reset_iter = 0 | |
# Defines where robot end effector can move to in world coordinates | |
self._workspace_bounds=np.array([[ 0.35, 0.85], # 3x2 rows: x,y,z cols: min,max | |
[-0.30, 0.05], | |
[-0.4, -0.1]]) | |
# Defines where robot can see in world coordinates (3D bounds for TSDF fusion) | |
self._view_bounds = np.array([[ 0.3, 0.9], # 3x2 rows: x,y,z cols: min,max | |
[-0.35, 0.1], | |
[-0.4, -0.1]]) | |
self._home_j_config = np.asarray([0.0, -90.0, -90.0, -90.0, 90.0, 0.0]) * np.pi / 180.0 | |
if args.task =='grasp': | |
self.robot_ip ='xxx' | |
tool_offset = [0.0,0.0,0.174,0.0,0.0,0.0] | |
cali_file_name = '_g.txt' | |
self._bin_cam = RealSense(tcp_ip='127.0.0.1',tcp_port=50010,im_h=720,im_w=1280,max_depth=3.0) | |
self._heightmap_pixel_size = 0.00125 # in meters | |
self._block_offset_from_tool = np.asarray([0.0, -0.015/np.sqrt(2.), 0.015/np.sqrt(2.)]) # from tip to checkerboard center | |
#self._block_offset_from_tool = np.asarray([0.0, -0.018, 0.0]) # from tip to checkerboard center | |
self._tool_orientation = np.asarray([0.006,-2.899,1.216]) | |
#self._tool_orientation = np.asarray([0.0,-3.14,0.00]) | |
elif args.task == 'suction': | |
self.robot_ip ='xxx' | |
tool_offset = [0e-3, 8e-3, 236e-3, 0e-3, 0e-3, 0e-3] | |
cali_file_name = '_s.txt' | |
self._bin_cam = RealSense(tcp_ip='127.0.0.1',tcp_port=50010,im_h=720,im_w=1280,max_depth=3.0) | |
#self._heightmap_pixel_size = 0.005 # in meters | |
self._heightmap_pixel_size = 0.00125 # in meters | |
# Workspace for calibration | |
self._workspace_bounds = np.array([[0.4, 0.8], [-0.37, 0.25], [-0.45, -0.3]]) | |
self._view_bounds = np.array([[0.4, 0.8], [-0.43, 0.27], [-0.5, -0.3]]) | |
self._block_offset_from_tool = np.asarray([0.03, 0.0, 0.11]) # from tip to checkerboard center | |
self._tool_orientation = np.asarray([2.222,-2.2213,0.01]) | |
#self._tool_orientation = np.asarray([1.2,1.2,1.2]) | |
self.computer_ip = '128.30.31.8' | |
self.robot = UR5(tcp_ip=self.robot_ip,tcp_port=30002,rtc_port=30003,computer_ip=self.computer_ip, | |
j_acc=1.4,j_vel=1.1, # safer params: j_acc=1.4,j_vel=1.1, | |
#j_acc=1.1,j_vel=2.0, # safer params: j_acc=1.4,j_vel=1.1, | |
tool_offset=tool_offset, | |
home_j_config=self._home_j_config, | |
ar_tracker=None,rtde_config_path='./record_configuration.xml') | |
try: | |
print('./calibration/camera_pose'+cali_file_name) | |
self._bin_cam_pose = np.loadtxt('./calibration/camera_pose'+cali_file_name) | |
self._bin_cam_depth_scale = np.loadtxt('./calibration/camera_depth_scale'+cali_file_name) | |
except: | |
self.robot.homej(blocking=True) | |
print('Calibration files not found! Running calibration') | |
input("Press Enter to continue ...") | |
self.calibrate_bin_cam(cali_file_name) | |
# Define Denavit-Hartenberg parameters for UR5 | |
self._ur5_kinematics_d = np.array([0.1625, 0. , 0. , 0.1333, 0.0997, 0.0996]) | |
self._ur5_kinematics_a = np.array([0. ,-0.42500,-0.39225, 0. , 0. , 0. ]) | |
# Move robot to home configuration | |
self.open_gripper(blocking=True) | |
self.robot.homej(blocking=True) | |
self.initial_gripper_pose = self.robot.get_tool_pose()[0:3,0:4] | |
# Start thread to perform robot actions in parallel to main thread | |
# self.robot_do_grasp,self.robot_is_grasping = False,False | |
# self.robot_grasp_params = [None,None] # grasping primitive parameters | |
# self.robot_grasp_success,self.robot_grasped_object_shape_id,self.robot_grasp_object_offset = None,-1,np.array([0,0,0]) | |
# self.robot_do_toss,self.robot_is_tossing = False,False | |
# self.robot_toss_params = [None,None] # tossing primitive parameters | |
# action_thread = threading.Thread(target=self.robot_do_action) | |
# action_thread.daemon = True | |
# action_thread.start() | |
# Callback for doing robot actions in parallel with main thread | |
# def robot_do_action(self): | |
# # while True: | |
# # if not self._ar_tracker.marker_detected: | |
# # if self.robot_do_grasp: | |
# # self.robot_is_grasping = True | |
# # grasp_position = self.robot_grasp_params[0] | |
# # grasp_angle = self.robot_grasp_params[1] | |
# # self.robot_grasp_success = self.robot.grasping_primitive(grasp_position,grasp_angle) | |
# # self.robot_do_grasp = False | |
# # self.robot_is_grasping = False | |
# # time.sleep(0.01) | |
# time.sleep(0.01) | |
def init_bounds(self): | |
color_image, depth_image, cam_pose = self.get_gripper_camera() | |
z_near = 0.2 | |
z_far = 3.5 | |
depth_image[depth_image<z_near] = 0 | |
depth_image[depth_image>z_far] = 0 | |
camera_points,color_points = utils.get_pointcloud(color_image,depth_image,self.get_camera_intrisic()) | |
xyz_points = utils.transform_pointcloud(camera_points,cam_pose) | |
# denoise xyz_points | |
bnd = utils.denoise_pointcloud(xyz_points,3) | |
view_bounds = bnd.copy() | |
view_bounds[:,0] = view_bounds[:,0]-0.05 | |
view_bounds[:,1] = view_bounds[:,1]+0.05 | |
gripper_tranformation = self.robot.get_tool_pose() | |
workspace_bounds = view_bounds.copy() | |
workspace_bounds[:,0] = np.min(np.vstack((workspace_bounds[:,0],gripper_tranformation[0:3,3])), axis= 0) | |
workspace_bounds[:,1] = np.max(np.vstack((workspace_bounds[:,1],gripper_tranformation[0:3,3])), axis= 0) | |
return view_bounds,workspace_bounds | |
def get_view_bounds(self): | |
return self._view_bounds | |
def get_workspace_bounds(self): | |
return self._workspace_bounds | |
def robot_go_home(self,blocking=True): | |
self.robot.homej(blocking) | |
def get_bin_camera_data(self): | |
color_im = self._bin_cam.color_im.copy() | |
depth_im = self._bin_cam.depth_im.copy()*self._bin_cam_depth_scale | |
return color_im,depth_im | |
def get_bin_heightmap(self,color_image,depth_image): | |
camera_points,color_points = utils.get_pointcloud(color_image,depth_image,self._bin_cam.color_intr) | |
camera_points = utils.transform_pointcloud(camera_points,self._bin_cam_pose) | |
# utils.write_pointcloud('test.ply',camera_points,color_points) | |
color_heightmap,depth_heightmap = utils.get_heightmap(camera_points,color_points,self._view_bounds,self._heightmap_pixel_size,zero_level=self._view_bounds[2,0]) | |
return color_heightmap,depth_heightmap | |
def get_valid_pixels_heightmap(self,depth_heightmap): | |
heightmap_width = depth_heightmap.shape[1] | |
heightmap_height = depth_heightmap.shape[0] | |
# Get 3D locations of each heightmap pixel in world coordinates | |
heightmap_pixel_x,heightmap_pixel_y = np.meshgrid(np.linspace(0,heightmap_width-1,heightmap_width), | |
np.linspace(0,heightmap_height-1,heightmap_height)) | |
heightmap_points = np.array([heightmap_pixel_x*self._heightmap_pixel_size+self._view_bounds[0,0], | |
heightmap_pixel_y*self._heightmap_pixel_size+self._view_bounds[1,0], | |
depth_heightmap+self._view_bounds[2,0]]).transpose(1,2,0) | |
within_workspace_bounds = np.logical_and(heightmap_points[:,:,0] >= self._workspace_bounds[0,0], | |
np.logical_and(heightmap_points[:,:,0] <= self._workspace_bounds[0,1], | |
np.logical_and(heightmap_points[:,:,1] >= self._workspace_bounds[1,0], | |
np.logical_and(heightmap_points[:,:,1] <= self._workspace_bounds[1,1], | |
np.logical_and(heightmap_points[:,:,2] >= self._workspace_bounds[2,0], | |
heightmap_points[:,:,2] <= self._workspace_bounds[2,1]))))) | |
return within_workspace_bounds | |
def get_grasp_position(self,grasp_pixel,depth_heightmap): | |
valid_pixels = np.array(np.where(depth_heightmap > 0)).T # y,x | |
dist_to_valid = np.sqrt(np.sum((valid_pixels-grasp_pixel)**2,axis=1)) | |
closest_valid_pixel = grasp_pixel | |
if np.min(dist_to_valid) < 2: # get nearest non-zero pixel less than 2 pixels away | |
closest_valid_pixel = valid_pixels[np.argmin(dist_to_valid),:] | |
grasp_position = np.array([closest_valid_pixel[1]*self._heightmap_pixel_size+self._view_bounds[0,0], | |
closest_valid_pixel[0]*self._heightmap_pixel_size+self._view_bounds[1,0], | |
depth_heightmap[closest_valid_pixel[0],closest_valid_pixel[1]]+self._view_bounds[2,0]]) | |
grasp_position = np.clip(grasp_position,self._workspace_bounds[:,0],self._workspace_bounds[:,1]) # clamp grasp position w.r.t. workspace bounds | |
return grasp_position,closest_valid_pixel | |
# Grasp for simulation | |
def grasp(self, grasp_pixel, grasp_angle, depth_heightmap): | |
grasp_position,closest_valid_pixel = self.get_grasp_position(grasp_pixel,depth_heightmap) | |
grasp_position[2] -= 0.03 # target 3cm below observed surface | |
print("Grasp position: {}".format(grasp_position)) | |
robot_grasp_success = self.robot.grasping_primitive(grasp_position,grasp_angle) | |
return robot_grasp_success | |
# Grasp for real robot | |
def grasp_real(self, grasp_position, grasp_angle): | |
grasp_position[2] -= 0.04 # target 3cm below observed surface | |
bottom = -0.36 | |
if grasp_position[2] < bottom: | |
grasp_position[2] = bottom | |
print("Grasp position: {}".format(grasp_position)) | |
robot_grasp_success = self.robot.grasping_primitive(grasp_position, grasp_angle) | |
return robot_grasp_success | |
# Suction for simulation | |
def suction(self, grasp_pixel, grasp_angle, depth_heightmap): | |
grasp_position,closest_valid_pixel = self.get_grasp_position(grasp_pixel,depth_heightmap) | |
grasp_position[2] += 0.015 # target 1.5cm above observed surface | |
print("Grasp position: {}".format(grasp_position)) | |
robot_suction_success = self.robot.suction_primitive(grasp_position) | |
return robot_suction_success | |
# Suction for real robot | |
def suction_real(self, grasp_position): | |
if grasp_position[2] < self._workspace_bounds[2, 0]: | |
grasp_position[2] = self._workspace_bounds[2, 0] | |
print("Grasp position: {}".format(grasp_position)) | |
robot_suction_success = self.robot.suction_primitive(grasp_position) | |
return robot_suction_success | |
def move2goal(self,relativeRT,max_pixel,render_depth,cam_pose_vir,camera_intrin_vir,step_size): | |
curr_transformation = self.robot.get_tool_pose() | |
relativeR = np.zeros((3,4)) | |
relativeR[0:3,0:3] = relativeRT[0:3,0:3] | |
move_rot = utils_3d.tranformRT(relativeR,curr_transformation) | |
target_3d_point = utils_3d.get_3d_point(max_pixel.reshape(2,1),render_depth,cam_pose_vir,camera_intrin_vir) | |
workspace_bounds_tool = self.get_workspace_bounds() | |
target_3d_point = np.clip(target_3d_point,workspace_bounds_tool[:,0],workspace_bounds_tool[:,1]) | |
# tool_offest = np.array(self.tool_tip_to_ee_joint) | |
# tool_offest[2] = tool_offest[2] - 0.035 #scale = 0.125 | |
target_3d_tool_pos = target_3d_point.copy() #-np.dot(move_rot[0:3,0:3],tool_offest.reshape(3,1)).reshape(1,3) | |
move_vec = target_3d_tool_pos - move_rot[0:3,3].T | |
distance_to_goal = np.linalg.norm(move_vec) | |
move_vec_step = move_vec *(min(distance_to_goal,step_size)/distance_to_goal) | |
mov_loc = move_rot[0:3,3] + move_vec_step.reshape(3) | |
arrive_goal = int(distance_to_goal<step_size) | |
if not arrive_goal: | |
offset = -1*self.initial_gripper_pose[:,2]*0.05 | |
target_tool_t = np.array(mov_loc).flatten()+ offset | |
target_tool_rot = utils.angle2urx(utils.rotm2angle(move_rot[0:3,0:3])) | |
succ = self.robot.movej(use_pos=True, params=[target_tool_t[0],target_tool_t[1],target_tool_t[2], | |
target_tool_rot[0],target_tool_rot[1],target_tool_rot[2]], blocking=True) | |
target_tool_t = np.array(mov_loc).flatten() | |
target_tool_rot = utils.angle2urx(utils.rotm2angle(move_rot[0:3,0:3])) | |
succ = self.robot.movej(use_pos=True, params=[target_tool_t[0],target_tool_t[1],target_tool_t[2], | |
target_tool_rot[0],target_tool_rot[1],target_tool_rot[2]], blocking=True) | |
if succ == 0: | |
self.robot.homej(True) | |
print("target_3d_point:{}".format(target_3d_point)) | |
print(self.get_current_tool_tip_location()[:,3]) | |
# print(target_3d_tool_pos) | |
# input() | |
return succ,curr_transformation,arrive_goal | |
def get_current_tool_tip_location(self): | |
curr_transformation = self.robot.get_tool_pose() | |
return curr_transformation[0:3,0:4] | |
def open_gripper(self,blocking=False): | |
if self.task=='grasp': | |
self.robot.open_gripper(blocking) | |
elif self.task=='suction': | |
self.robot.off_suction(blocking) | |
def close_gripper(self,blocking=False): | |
if self.task=='grasp': | |
self.robot.close_gripper(blocking) | |
elif self.task=='suction': | |
self.robot.on_suction(blocking) | |
def execute_place(self,place_pos=None,place_angle=-90.,): | |
if place_pos is None: | |
if self.task=='grasp': | |
place_pos = np.array([0.55, 0.6, 0.0]) | |
elif self.task=='suction': | |
place_pos = np.array([0.56,0.54,-0.2]) | |
self.robot.homej(blocking=True) | |
# If we want to place the objects using gripper's initial pose, uncomment the following lime: | |
# target_tool_rot = utils.angle2urx(utils.rotm2angle(self.initial_gripper_pose[0:3,0:3])) | |
home_rot = np.asarray([np.cos(7*np.pi/4),np.sin(7*np.pi/4),0.0])*np.pi | |
home_rotm = utils.angle2rotm(utils.urx2angle(home_rot)) | |
targ_rotm = np.dot(utils.angle2rotm([place_angle*np.pi/180.,0,0,1.]),home_rotm) | |
target_tool_rot = utils.angle2urx(utils.rotm2angle(targ_rotm)) | |
succ = self.robot.movej(use_pos=True, | |
params=[place_pos[0],place_pos[1],place_pos[2], | |
target_tool_rot[0],target_tool_rot[1],target_tool_rot[2]], | |
blocking=True) | |
#succ = self.robot.movej(use_pos=True,params=[place_pos_b[0],place_pos_b[1],place_pos_b[2],target_tool_rot[0],target_tool_rot[1],target_tool_rot[2]],blocking=True) | |
self.open_gripper(blocking=True) | |
#succ = self.robot.movej(use_pos=True,params=[place_pos_a[0],place_pos_a[1],place_pos_a[2],target_tool_rot[0],target_tool_rot[1],target_tool_rot[2]],blocking=True) | |
def reset_objects(self, curr_iter, auto=True): | |
if auto: | |
self.robot.reset_primitive() | |
else: # manually reset the objects | |
print('Not enough stuff in view! Resetting objects!! Press Enter When Done') | |
self.robot.homej(blocking=True) | |
input() | |
self.last_reset_iter = curr_iter | |
def calibrate_bin_cam(self,cali_file_name): | |
# Set limits on y-axis | |
self._workspace_bounds[1,1] = min(self._workspace_bounds[1,1],0.3) | |
# Move robot to home configuration | |
self.robot.set_speed(j_acc=1.4,j_vel=1.1) | |
self.robot.close_gripper(blocking=True) | |
self.robot.homej(blocking=True) | |
# self.robot.movej(use_pos=False,params=np.asarray([0.0., -90., -90., -90., 45, 90.])*np.pi/180.,blocking=True) | |
# Constants | |
calib_grid_step = 0.05 | |
tool_orientation = self._tool_orientation | |
# block_offset_from_tool = np.asarray([0.,(0.044/2)/np.sqrt(2)-(0.022/2)/np.sqrt(2),(0.045/2)/np.sqrt(2)+(0.022/2)/np.sqrt(2)]) # from tip to block center | |
# Construct 3D calibration grid across workspace | |
gridspace_x = np.linspace(self._workspace_bounds[0,0]+0.1,self._workspace_bounds[0,1]-0.1,1+(self._workspace_bounds[0,1]-self._workspace_bounds[0,0]-0.2)/calib_grid_step) | |
gridspace_y = np.linspace(self._workspace_bounds[1,0]+0.1,self._workspace_bounds[1,1]-0.1,1+(self._workspace_bounds[1,1]-self._workspace_bounds[1,0]-0.2)/calib_grid_step) | |
calib_grid_x,calib_grid_y,calib_grid_z = np.meshgrid(gridspace_x,gridspace_y,self._workspace_bounds[2,0]+0.1) | |
num_calib_grid_pts = calib_grid_x.shape[0]*calib_grid_x.shape[1]*calib_grid_x.shape[2] | |
calib_grid_x.shape = (num_calib_grid_pts,1) | |
calib_grid_y.shape = (num_calib_grid_pts,1) | |
calib_grid_z.shape = (num_calib_grid_pts,1) | |
calib_grid_pts = np.concatenate((calib_grid_x,calib_grid_y,calib_grid_z), axis=1) | |
# Move robot to each calibration point in workspace | |
global measured_pts, observed_pts, observed_pix, world2camera | |
measured_pts = [] | |
observed_pts = [] | |
observed_pix = [] | |
print('Collecting data ...') | |
for calib_pt_idx in range(num_calib_grid_pts): | |
tool_position = calib_grid_pts[calib_pt_idx,:] | |
print(tool_position, tool_orientation) | |
self.robot.movej(use_pos=True,params=[tool_position[0],tool_position[1],tool_position[2],tool_orientation[0],tool_orientation[1],tool_orientation[2]],blocking=True) | |
time.sleep(1.0) | |
chckr_found = False | |
while not chckr_found: | |
# Find pixel location of red block | |
color_im,depth_im = self._bin_cam.color_im,self._bin_cam.depth_im | |
# plt.imshow(color_im) | |
# plt.show() | |
chckr_size = (3,3) | |
refine_criteria = (cv2.TERM_CRITERIA_EPS+cv2.TERM_CRITERIA_MAX_ITER,30,0.001) | |
bgr_im = cv2.cvtColor(color_im,cv2.COLOR_RGB2BGR) | |
gray_im = cv2.cvtColor(bgr_im,cv2.COLOR_RGB2GRAY) | |
chckr_found, crnrs = cv2.findChessboardCorners(gray_im,chckr_size,None,cv2.CALIB_CB_ADAPTIVE_THRESH) | |
if chckr_found: | |
crnrs_refined = cv2.cornerSubPix(gray_im,crnrs,(3,3),(-1,-1),refine_criteria) | |
block_pix = crnrs_refined[4,0,:] | |
else: | |
time.sleep(0.01) | |
continue | |
# conf_map = color_im[:,:,0]/255.*(1-color_im[:,:,1]/255.)*(1-color_im[:,:,2]/255.) | |
# conf_map = cv2.GaussianBlur(conf_map,(101,101),0) | |
# block_pix = np.round(np.unravel_index(np.argmax(conf_map),conf_map.shape)).astype(int) # y,x | |
# block_pix = np.flip(block_pix,axis=0) # save in fromat: x,y | |
# Get observed checkerboard center 3D point in camera space | |
block_z = depth_im[int(np.round(block_pix[1])),int(np.round(block_pix[0]))] | |
block_x = np.multiply(block_pix[1]-self._bin_cam.color_intr[0,2],block_z/self._bin_cam.color_intr[0,0]) | |
block_y = np.multiply(block_pix[0]-self._bin_cam.color_intr[1,2],block_z/self._bin_cam.color_intr[1,1]) | |
print("Found {}".format([block_x,block_y,block_z])) | |
if block_z == 0: | |
print("Zero Z, potential error!") | |
continue | |
# Save calibration point and observed checkerboard center | |
observed_pts.append([block_x,block_y,block_z]) | |
tool_position += self._block_offset_from_tool | |
measured_pts.append(tool_position) | |
observed_pix.append(block_pix) | |
# Draw and display the corners | |
vis_im = cv2.circle(color_im,(block_pix[0],block_pix[1]),7,(0,255,0),2) | |
# cv2.imwrite('%06d.png' % len(measured_pts), vis) | |
cv2.imshow('Calibration',cv2.cvtColor(vis_im,cv2.COLOR_RGB2BGR)) | |
cv2.waitKey(10) | |
# Move robot back to home pose | |
self.robot.homej(blocking=True) | |
measured_pts = np.asarray(measured_pts) | |
observed_pts = np.asarray(observed_pts) | |
observed_pix = np.asarray(observed_pix) | |
world2camera = np.eye(4) | |
# Estimate rigid transform with SVD (from Nghia Ho) | |
def get_rigid_transform(A, B): | |
assert len(A) == len(B) | |
N = A.shape[0]; # Total points | |
centroid_A = np.mean(A, axis=0) | |
centroid_B = np.mean(B, axis=0) | |
AA = A - np.tile(centroid_A, (N, 1)) # Centre the points | |
BB = B - np.tile(centroid_B, (N, 1)) | |
H = np.dot(np.transpose(AA), BB) # Dot is matrix multiplication for array | |
U, S, Vt = np.linalg.svd(H) | |
R = np.dot(Vt.T, U.T) | |
if np.linalg.det(R) < 0: # Special reflection case | |
Vt[2,:] *= -1 | |
R = np.dot(Vt.T, U.T) | |
t = np.dot(-R, centroid_A.T) + centroid_B.T | |
return R, t | |
def get_rigid_transform_error(z_scale): | |
global measured_pts, observed_pts, observed_pix, world2camera | |
# Apply z offset and compute new observed points using camera intrinsics | |
print(observed_pts, z_scale) | |
observed_z = observed_pts[:,2:] * z_scale | |
observed_x = np.multiply(observed_pix[:,[0]]-self._bin_cam.color_intr[0,2],observed_z/self._bin_cam.color_intr[0,0]) | |
observed_y = np.multiply(observed_pix[:,[1]]-self._bin_cam.color_intr[1,2],observed_z/self._bin_cam.color_intr[1,1]) | |
new_observed_pts = np.concatenate((observed_x, observed_y, observed_z), axis=1) | |
# Estimate rigid transform between measured points and new observed points | |
R, t = get_rigid_transform(np.asarray(measured_pts), np.asarray(new_observed_pts)) | |
t.shape = (3,1) | |
world2camera = np.concatenate((np.concatenate((R, t), axis=1),np.array([[0, 0, 0, 1]])), axis=0) | |
# Compute rigid transform error | |
registered_pts = np.dot(R,np.transpose(measured_pts)) + np.tile(t,(1,measured_pts.shape[0])) | |
error = np.transpose(registered_pts) - new_observed_pts | |
error = np.sum(np.multiply(error,error)) | |
rmse = np.sqrt(error/measured_pts.shape[0]); | |
return rmse | |
# Optimize z scale w.r.t. rigid transform error | |
print('Calibrating ...') | |
z_scale_init = 1 | |
optim_result = optimize.minimize(get_rigid_transform_error, np.asarray(z_scale_init), method='Nelder-Mead') | |
camera_depth_offset = optim_result.x | |
# Save camera optimized offset and camera pose | |
print('Saving calibration files ...') | |
if not os.path.exists('./calibration'): | |
os.makedirs('./calibration') | |
np.savetxt('./calibration/camera_depth_scale'+cali_file_name, camera_depth_offset, delimiter=' ') | |
get_rigid_transform_error(camera_depth_offset) | |
camera_pose = np.linalg.inv(world2camera) | |
np.savetxt('./calibration/camera_pose'+cali_file_name, camera_pose, delimiter=' ') | |
print('Done. Please restart main script.' + './calibration/camera_pose'+cali_file_name) | |
def touch(self): | |
self.robot.set_speed(j_acc=1.4,j_vel=1.1) | |
self.robot.homej(blocking=True) | |
tool_orientation = np.asarray([np.cos(7*np.pi/4),np.sin(7*np.pi/4),0.0])*np.pi | |
# Callback function for clicking on OpenCV window | |
global click_point_pix | |
click_point_pix = () | |
color_im,depth_im = self._bin_cam.color_im,self._bin_cam.depth_im | |
def mouseclick_callback(event, x, y, flags, param): | |
if event == cv2.EVENT_LBUTTONDOWN: | |
global click_point_pix | |
click_point_pix = (x,y) | |
# Get click point in camera coordinates | |
click_z = depth_im[y,x]*self._bin_cam_depth_scale | |
click_x = (x-self._bin_cam.color_intr[0,2])*click_z/self._bin_cam.color_intr[0,0] | |
click_y = (y-self._bin_cam.color_intr[1,2])*click_z/self._bin_cam.color_intr[1,1] | |
if click_z == 0: | |
return | |
click_point = np.asarray([click_x,click_y,click_z]) | |
click_point = np.append(click_point,1.0).reshape(4,1) | |
# Convert camera coordinates to robot coordinates | |
target_position = np.dot(self._bin_cam_pose,click_point) | |
target_position = target_position[0:3,0] | |
print(target_position) | |
# Move robot to target position | |
final_pose = np.asarray([target_position[0],target_position[1],target_position[2],tool_orientation[0],tool_orientation[1],tool_orientation[2]]) | |
above_pose = final_pose.copy() | |
above_pose[2] += 0.1 | |
self.robot.movej(use_pos=True,params=above_pose,blocking=True) | |
self.robot.movej(use_pos=True,params=final_pose,blocking=True) | |
time.sleep(3.0) | |
# Move robot back to home position | |
self.robot.movej(use_pos=True,params=above_pose,blocking=True) | |
self.robot.homej(blocking=True) | |
# Show color and depth frames | |
cv2.namedWindow('color') | |
cv2.setMouseCallback('color', mouseclick_callback) | |
while True: | |
color_im,depth_im = self._bin_cam.color_im,self._bin_cam.depth_im | |
bgr_data = cv2.cvtColor(color_im, cv2.COLOR_RGB2BGR) | |
if len(click_point_pix) != 0: | |
bgr_data = cv2.circle(bgr_data, click_point_pix, 7, (0,0,255), 2) | |
cv2.imshow('color', bgr_data) | |
if cv2.waitKey(1) == ord('c'): | |
break |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment