88 def load(self, path: Path, view_id: int) -> bool:
90 load_folder = path / str(self.
view_id)
91 if not load_folder.exists():
94 self.
points_3d = np.load(load_folder /
'points.npy')
95 self.
pose = HomogeneousMatrix(np.load(load_folder /
'pose.npy'))
98 for k
in [
'keypoints',
'descriptors',
'scales']:
99 self.
representation[k] = torch.tensor(np.load(load_folder / f
'{k}.npy')).cuda()
134 cMo = HomogeneousMatrix(self.
pose)
135 oMc = self.
pose.inverse()
136 Zs = depth.numpy()[(self.
matched_obs[:, 1].astype(np.int32)), self.
matched_obs[:, 0].astype(np.int32)][...,
None]
139 oX_3 = np.ascontiguousarray(oX[..., :3])
141 observations = Matrix.view(observations)
143 RBVisualOdometryUtils.levenbergMarquardtKeypoints3D(Matrix.view(oX_3), observations, self.
optim_params, cMo)
145 cX = (cMo.numpy() @ oX.T).T
148 distances = np.linalg.norm(observations.numpy() - cX[..., :3], axis=-1)
150 weights = ColVector(len(distances))
151 robust.MEstimator(Robust.TUKEY, ColVector.view(distances), weights)
152 res = distances * weights.numpy()
155 return cMo, self.
error <= threshold
158 cMo = HomogeneousMatrix(self.
pose)
159 oMc = self.
pose.inverse()
162 oX_3 = np.ascontiguousarray(oX[..., :3])
165 RBVisualOdometryUtils.levenbergMarquardtKeypoints2D(Matrix.view(oX_3), Matrix.view(observations), self.
optim_params, cMo)
167 cX = (cMo.numpy() @ oX.T).T
169 new_xy = cX[:, :2] / cX[:, 2,
None]
171 print(f
'Used {len(oX)} points for optimization')
174 new_us, new_vs = MeterPixelConversion.convertPoints(cam, new_xy[:, 0], new_xy[:, 1])
176 new_pix = np.concatenate((new_us[...,
None], new_vs[...,
None]), axis=-1)
177 distances = np.linalg.norm(self.
matched_obs - new_pix, axis=-1)
182 self.
error = np.median(distances)
183 print(f
'Final error for view {self.view_id}: {self.error}')
184 return cMo, self.
error <= threshold
195 IG = ImageRGBa(image)
201 cTr = cTo * self.
pose.inverse()
202 points_cam = (cTr.numpy() @ self.
points_3d.T).T
203 xs,ys = points_cam[:, 0] / points_cam[:, 2], points_cam[:, 1] / points_cam[:, 2]
204 us,vs = MeterPixelConversion.convertPoints(cam, xs, ys)
205 for i
in range(len(us)):
206 Display.displayPoint(IG, int(np.rint(vs[i])), int(np.rint(us[i])), Color.green, 2)
209 Display.displayFrame(IG, self.
pose, cam, 0.05, Color.yellow)
210 Display.displayFrame(IG, cTo, cam, 0.05)
213 Display.getKeyboardEvent(IG,
True)
216 oMc = self.
pose.inverse()
222 p.setWorldCoordinates(oX[i, 0], oX[i, 1], oX[i, 2])
227 IG = ImageRGBa(image)
233 for i,p
in enumerate(points):
236 p.changeFrame(self.
pose)
238 u1, v1 = MeterPixelConversion.convertPoint(cam, p.get_x(), p.get_y())
239 Display.displayPoint(IG, int(np.rint(v1)), int(np.rint(u1)), Color.blue, 2)
240 Display.displayPoint(IG, int(np.rint(self.
matched_obs[i, 1])), int(np.rint(self.
matched_obs[i, 0])), Color.green, 2)
244 u2, v2 = MeterPixelConversion.convertPoint(cam, p.get_x(), p.get_y())
245 Display.displayPoint(IG, int(np.rint(v2)), int(np.rint(u2)), Color.red, 2)
246 Display.displayLine(IG, int(np.rint(v2)), int(np.rint(u2)), int(np.rint(self.
matched_obs[i, 1])), int(np.rint(self.
matched_obs[i, 0])), Color.red, 1, segment=
True)
248 Display.displayFrame(IG, self.
pose, cam, 0.05, Color.yellow)
249 Display.displayFrame(IG, cMo, cam, 0.05)
252 Display.getKeyboardEvent(IG,
True)
301 def record(self, frame: RBFeatureTrackerInput, cTo: HomogeneousMatrix):
302 """Record a new viewpoint
305 frame (RBFeatureTrackerInput): _description_
306 cTo (HomogeneousMatrix): _description_
313 kps, descriptors = representation.keypoints, representation.descriptors
317 if isinstance(representation, XFeatStarRepresentation):
318 scales = representation.scales
320 scales = np.ones_like(kps[..., 0])
323 us, vs = kps[:, 0], kps[:, 1]
324 xs,ys = PixelMeterConversion.convertPoints(frame.cam, us, vs)
327 Z = frame.renders.depth.numpy()[np.rint(vs).astype(np.int32), np.rint(us).astype(np.int32)]
329 xs, ys, Z, kps, descriptors, scales = [array[depth_ok]
for array
in [xs, ys, Z, kps, descriptors, scales]]
331 points_3d = np.empty((len(xs), 4))
332 points_3d[:, 0] = xs * Z
333 points_3d[:, 1] = ys * Z
335 points_3d[:, 3] = 1.0
338 assert len(points_3d) == len(descriptors) == len(kps) == len(scales)
340 viewpoint.view_id = len(self.
views)
341 viewpoint.pose = HomogeneousMatrix(frame.renders.cMo)
342 print(f
'Recorded view {viewpoint.view_id} with {len(descriptors)} keypoints and pose {PoseVector(viewpoint.pose).t()}')
343 viewpoint.points_3d = points_3d
344 viewpoint.descriptors = descriptors
345 viewpoint.representation = {
347 'descriptors': descriptors,
352 self.
views.append(viewpoint)
354 def estimate_pose(self, image: ImageRGBa, depth, cam: CameraParameters) -> Tuple[bool, HomogeneousMatrix]:
358 kps, descriptors = representation.keypoints, representation.descriptors
359 if isinstance(representation, XFeatStarRepresentation):
360 scales = representation.scales
362 scales = np.ones_like(kps[..., 0])
363 current_representation = {
365 'descriptors': descriptors,
369 current_representation = XFeatViewPointPoseEstimator.ViewPointMatcher.reformat_repr_if_needed(current_representation)
372 for view
in self.
views:
373 scores.append(view.match_and_score(cam, self.
xfeat_backend, current_representation))
374 views_enough_matches = list(filter(
lambda x: x[1] > self.
min_match_ratio, zip(self.
views, scores)))
377 if len(views_enough_matches) == 0:
378 return False, HomogeneousMatrix()
383 for view, score
in views_enough_matches:
388 final_views.append(view)
390 errors.append(view.error)
394 return False, HomogeneousMatrix()
399 pose_ok = len(errors) > 0
400 best_view_index = np.argmin(errors)
401 cMo = poses[best_view_index]