Hi, here is the function that we use to calculate the 3d point for the robot. We use the camera characteristics to get a point with 3-5mm of distortion.
def make_3D_point(x, y, pipeline,camera):
'''
Description
----------
Function to calculate the coordinates of a point in the 3D space.
Parameters
----------
x : float
X coordinate of the point
y : float
Y coordinate of the point
pipeline : pyrealsense.pipeline
Camera pipeline object
camera : int
Integer value to specify in which camera frame the point needs to be calculated
Returns
-------
point : array of float
An array containing the X, Y and Z coordinates of the point
'''
# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
# Get the depth value at the point of interest
depth_value = depth_frame.get_distance(x, y)
# Convert depth pixel coordinates to world coordinates
depth_intrinsics = depth_frame.profile.as_video_stream_profile().intrinsics
depth_to_color_extrinsics = depth_frame.profile.get_extrinsics_to(color_frame.profile)
world_coords = rs.rs2_deproject_pixel_to_point(depth_intrinsics, [x, y], depth_value)
rotation = np.array(depth_to_color_extrinsics.rotation).reshape(3, 3)
translation = np.array(depth_to_color_extrinsics.translation).reshape(3, 1)
depth_to_color_extrinsics = np.concatenate((rotation, translation), axis=1)
world_coords = np.append(world_coords, [1])
world_coords = np.dot(depth_to_color_extrinsics, world_coords)
world_coords = world_coords[:3]
#return world_coords
if (camera==1):
cam1=[-594.37,-645,936.8]# difference from the camera to the robot coordinates
point=[-(world_coords[1]*1000)+cam1[0],-(world_coords[0]*1000)+cam1[1],(-world_coords[2]*1000)+cam1[2]]
elif(camera==2):
cam2=[-670,215,936.8]# difference from the camera to the robot coordinates
point=[-(world_coords[1]*1000)+cam2[0],-(world_coords[0]*1000)+cam2[1],(-world_coords[2]*1000+cam2[2])]
return(point)