131 Id = torch.eye(obj_descriptors.size(), device=obj_descriptors.device)
132 cos = obj_descriptors @ obj_descriptors.t()
133 w = 1.0 - (torch.max(cos - Id, dim=-1))
152 def compute(self, frame: RBFeatureTrackerInput, previousFrame: RBFeatureTrackerInput) ->
None:
162 with torch.no_grad():
164 h, w = frame.I.getRows(), frame.I.getCols()
166 visible_indices = np.ascontiguousarray(self.
environment_map.point_map.getVisiblePoints(h, w, frame.cam, self.
cTw, frame.depth))
168 if len(visible_indices) > 0:
169 visible_env_descriptors = self.
environment_map.descriptors[visible_indices]
183 Z = frame.depth[int(kp[1]), int(kp[0])]
185 valid_indices.append(i)
187 self.
Zs = np.ascontiguousarray(self.
Zs)
199 self.
cnTc = HomogeneousMatrix()
203 self.
cnTc = HomogeneousMatrix()
207 xoe, yoe = PixelMeterConversion.convertPoints(frame.cam, current_px_matched_env[:, 0], current_px_matched_env[:, 1])
209 indices = ArrayInt2D.view(np.ascontiguousarray(self.
idx_environment_map[:,
None].astype(np.int32)))
211 c = HomogeneousMatrix(self.
cTw)
213 current_xyz_env = Matrix.view(np.stack((xoe * self.
Zs, yoe * self.
Zs, self.
Zs), axis=-1))
214 RBVisualOdometryUtils.levenbergMarquardtKeypoints3D(mapX, current_xyz_env, self.
optim_params, c)
216 current_xy_env = Matrix.view(np.stack((xoe, yoe), axis=-1))
217 RBVisualOdometryUtils.levenbergMarquardtKeypoints2D(mapX, current_xy_env, self.
optim_params, c)
238 def display(self, cam: CameraParameters, _I: ImageGray, IRGB: ImageRGBa, _I_depth: ImageGray):
239 """Display the features used for visual odometry.
240 XFeat keypoints are displayed as small disks in the RGB image.
242 Depending on the display type, the behavior is different:
243 - DisplayType.SIMPLE: Display the current image keypoints that were matched with the environment map
244 - DisplayType.SIMPLE_MODEL_AND_PROJ: Display current image keypoints along with the map points that they were matched with.
245 - DisplayType.WEIGHT_AND_ERROR: Same as the SIMPLE value, except that the color of the keypoints display the error
246 (on the red component) and their attributed weight in the optimization (in blue)
249 cam (CameraParameters): Camera intrinsics
250 _I (ImageGray): Grayscale image
251 IRGB (ImageRGBa): RGB image where to display the points
252 _I_depth (ImageGray): Depth image
255 RuntimeError: if the display type is invalid
261 cX, xs, uvs = Matrix(), Matrix(), Matrix()
263 uvs = uvs.numpy().copy()
264 error = np.linalg.norm(uvs - ps, axis=1)
265 threshold = self.
environment_map.point_map.getOutlierReprojectionErrorThreshold()
268 max_error_display = np.maximum(np.max(error), self.
environment_map.point_map.getOutlierReprojectionErrorThreshold())
269 ps = np.rint(ps).astype(np.int32)
270 uvs = np.rint(uvs).astype(np.int32)
272 if self.
display_type == XFeatVisualOdometry.DisplayType.SIMPLE:
273 Display.displayCrosses(IRGB, ps[:, 1], ps[:, 0], 8, Color.blue, 1)
275 elif self.
display_type == XFeatVisualOdometry.DisplayType.SIMPLE_MODEL_AND_PROJ:
276 Display.displayCrosses(IRGB, ps[:, 1], ps[:, 0], 8, Color.blue, 1)
277 Display.displayCrosses(IRGB, uvs[:, 1], np.rint(uvs[:, 0]).astype(np.int32), 8, Color.red, 1)
279 elif self.
display_type == XFeatVisualOdometry.DisplayType.WEIGHT_AND_ERROR:
281 for p
in range(len(error)):
282 e = np.minimum(error[p], threshold) / threshold
284 np.rint((e) * 255).astype(np.uint8),
286 np.rint((1.0 - e) * 255).astype(np.uint8),
290 Display.displayCircleStatic(IRGB, ps[p, 1], ps[p, 0], 2, c,
True, 1)
292 raise RuntimeError(
'Display type not implemented')