Visual Servoing Platform version 3.7.0
Loading...
Searching...
No Matches
TrackedDescriptorMap.py
35
36from typing import Any, Dict, List, Optional
37import numpy as np
38import torch
39import time
40
41from visp.core import CameraParameters, HomogeneousMatrix
42from visp.rbt import RBFeatureTrackerInput, PointMap
43from visp.core import Matrix
44from visp.core import ArrayInt2D, ImageFloat, ImageRGBf
45
47 """
48 A map that associates 3D points in a common frame to a visual descriptive representation (i.e., an XFeat or Sift descriptor).
49 The map has a maximum number of points that are stored and least recently added points are replaced first when maximum capacity is reached.
50 """
51 def __init__(self, num_points: int, reprojection_threshold: float, min_dist_new_point: float, max_depth_error_visible: float):
52 """_summary_
53
54 Args:
55 num_points (int): Maximum number of points that can be stored in the map.
56 reprojection_threshold (float): Reprojection threshold (in pixels) that is used to filter outliers.
57 When map points are matched with points in an image,
58 if their reprojection is too far from the actually observed point, then they are marked as outliers and removed.
59 min_dist_new_point (float): Minimum distance (in meters) that a point should have to all the map points to be considered as a candidate for addition to the map
60 max_depth_error_visible (float): Maximum depth (in meters) error that is tolerated for a point to be considered as visible. Used to filter out self occlusion.
61 """
62 self.descriptors: Optional[torch.tensor] = None
63 self.data_for_update = None
64 self.indices_removal = None
65
66 self.reprojection_threshold: float = reprojection_threshold
67 self.num_points: int = num_points
68 self.min_dist_new_point: float = min_dist_new_point
69 self.max_depth_error_visible: float = max_depth_error_visible
72
73 def reset(self):
74 self.descriptors = None
75 self.data_for_update = None
76 self.indices_removal = None
77 self.point_map.clear()
78
79 def parse_settings(self, d: Dict[str, Any]):
80 """Update map settings from a dictionary
81
82 Args:
83 d (_type_): Dictionary containing the map parameters
84 """
85 self.reprojection_threshold = d['reprojectionThreshold']
86 self.num_points = d['numPoints']
87 self.min_dist_new_point = d['minDistNewPoint']
88 self.max_depth_error_visible = d['maxDepthErrorVisible']
89 self.max_depth_error_candidates = d['maxDepthErrorCandidate']
90 self.max_normal_deg = d.get('maxNormalThresholdVisible', 180)
91
92 self.point_map.setOutlierReprojectionErrorThreshold(self.reprojection_threshold)
93 self.point_map.setNumMaxPoints(self.num_points)
94 self.point_map.setMinDistanceAddNewPoints(self.min_dist_new_point)
95 self.point_map.setMaxDepthErrorVisibilityCriterion(self.max_depth_error_visible)
96 self.point_map.setMaxDepthErrorCandidate(self.max_depth_error_candidates)
97 self.point_map.setThresholdNormalVisibiltyCriterion(self.max_normal_deg)
98
99 def has_points(self) -> bool:
100 """Returns whether this map stores any points.
101
102 Returns:
103 bool: _description_
104 """
105 return self.point_map.getPoints().getRows() > 0
106
107 def update(self, frame: RBFeatureTrackerInput, cMo: HomogeneousMatrix, render_depth: ImageFloat, depth_map: ImageFloat, idx_curr_matched: np.ndarray, idx_matched_map: np.ndarray, current_kps: np.ndarray, current_descriptors: torch.Tensor) -> List[int]:
108 """ Update the map: Remove outliers and add new points
109
110 Args:
111 frame (RBFeatureTrackerInput): Input frame
112 cMo (HomogeneousMatrix): Current pose corresponding to the acquired frame
113 render_depth (ImageFloat): Depth in render
114 depth_map (ImageFloat): Depth observed by the camera (can be zero sized image, in which case it isn't used)
115 idx_curr_matched (np.ndarray): Indices of the points points that were matched for the set of current keypoints (in the camera frame).
116 idx_matched_map (np.ndarray): Indices of the points that were matched for the set of points from this map. After this method has been called it may no longer be valid.
117 current_kps (np.ndarray): N x 2 array that contains the pixel locations of all the keypoints detected in the image. Will be indexed with idx_curr_matched
118 current_descriptors (torch.Tensor): N x D tensor containing the descriptors associated to the keypoints observed in the image.
119
120 Returns:
121 List[int]: The indices of the points that were removed
122 """
123 if current_descriptors is None or current_kps is None:
124 return
125
126 if idx_curr_matched is not None:
127 assert len(idx_curr_matched.shape) == 1
128 assert len(idx_matched_map.shape) == 1
129 assert len(idx_curr_matched) == len(idx_matched_map)
130
131 indices_outliers = []
132 if self.has_points() and idx_curr_matched is not None and idx_matched_map is not None:
133 # For matched points, find outliers and mark them for removal
134 cam = frame.cam
135 if len(idx_matched_map) > 0:
136 idx_map = ArrayInt2D.view(idx_matched_map[:, None].astype(np.int32))
137 cX, x, uvs = Matrix(), Matrix(), Matrix()
138 self.point_map.project(cam, idx_map, cMo, cX, x, uvs)
139 indices_outliers = self.point_map.getOutliers(idx_map, uvs, Matrix.view(np.ascontiguousarray(current_kps[idx_curr_matched])))
140 self.descriptors[idx_matched_map] = current_descriptors[idx_curr_matched]
141 # self.descriptors[idx_matched_map] = self.descriptors[idx_matched_map] * 0.9 + current_descriptors[idx_curr_matched] * 0.1
142
143 self.mark_points_to_remove(indices_outliers)
144
145 num_points = len(current_kps)
146
147 # Get all non matched keypoints
148 if idx_curr_matched is None:
149 indices_points_not_matched_object = np.arange(num_points, dtype=np.int32)
150 else:
151 indices_points_not_matched_object = np.ascontiguousarray(np.setdiff1d(np.arange(num_points, dtype=np.int32), idx_curr_matched, assume_unique=True))
152 points_to_add_px = np.ascontiguousarray(current_kps[indices_points_not_matched_object], dtype=np.float64)
153 if len(indices_points_not_matched_object) == 0:
154 indices_array = ArrayInt2D()
155 else:
156 indices_array = ArrayInt2D.view(indices_points_not_matched_object[..., None])
157
158 oX_new = Matrix()
159 oN_new = Matrix()
160 cam = frame.cam
161
162 render_normals = frame.renders.normals if render_depth is not None else ImageRGBf()
163 if render_depth is None:
164 render_depth = ImageFloat()
165
166 indices_not_matched_to_add = self.point_map.selectValidNewCandidates(cam, cMo, indices_array, Matrix.view(points_to_add_px), render_depth, depth_map, render_normals, oX_new, oN_new)
167
168 self.set_points_to_add(oX_new, current_descriptors[indices_not_matched_to_add], oN_new)
169 indices_to_remove = self.finalize_update()
170
171 return indices_to_remove
172
173 def mark_points_to_remove(self, indices):
174 self.indices_removal = indices
175
176 def set_points_to_add(self, X: Matrix, descriptors: torch.tensor, normals: Matrix):
177 assert X is not None and descriptors is not None
178 assert X.getRows() == descriptors.size(0)
179 assert X.getCols() == 3
180
181 self.data_for_update = (X, normals, descriptors)
182
183 def finalize_update(self) -> List[int]:
184 """Actually remove outliers and add new points to the map.
185 This step needs to be done in one go so as to not invalidate any indexing that was done prior to this.
186
187 Returns:
188 List[int]: The indices of the points that were removed
189 """
190 removed_indices = self.indices_removal or []
191 if self.indices_removal is None or len(self.indices_removal) == 0:
192 outlier_array = ArrayInt2D()
193 else:
194 outlier_array = ArrayInt2D.view(np.ascontiguousarray(self.indices_removal)[:, None].astype(np.int32))
195 removed_indices, num_added_points = self.point_map.updatePoints(outlier_array, self.data_for_update[0], self.data_for_update[1])
196
197 if len(removed_indices) > 0:
198 if len(removed_indices) == len(self.descriptors):
199 self.descriptors = None
200 else:
201 kept_indices = np.setdiff1d(np.arange(len(self.descriptors)), np.ascontiguousarray(removed_indices), assume_unique=True)
202 kept_indices = torch.tensor(kept_indices, device=self.descriptors.device)
203 self.descriptors = self.descriptors.index_select(0, kept_indices)
204 if self.data_for_update is not None:
205 (_, _, descriptors) = self.data_for_update
206 if self.descriptors is None:
207 self.descriptors = descriptors[:num_added_points]
208 else:
209 self.descriptors = torch.concatenate((self.descriptors, descriptors[:num_added_points]), dim=0)
210
211 self.data_for_update = None
212 self.indices_removal = None
213
214 return removed_indices
215
216 def get_px_proj(self, indices: np.ndarray, cam: CameraParameters, cMo: HomogeneousMatrix) -> Matrix:
217 """Helper function to compute the pixel coordinates of a set of map points.
218
219 Args:
220 indices (np.ndarray): The indices of the points for which to compute the projection
221 cam (CameraParameters): Camera intrinsics
222 cMo (HomogeneousMatrix): Camera to map reference pose
223
224 Returns:
225 Matrix: a indices.shape[0] x 2 Matrix containing the pixel coordinates of the 3D points in the camera frame
226 """
227 indices = ArrayInt2D() if len(indices) == 0 else ArrayInt2D(indices[..., None].astype(np.int32))
228 cX, xs, uvs = Matrix(), Matrix(), Matrix()
229 self.point_map.project(cam, indices, cMo, cX, xs, uvs)
230 return uvs
__init__(self, int num_points, float reprojection_threshold, float min_dist_new_point, float max_depth_error_visible)
List[int] update(self, RBFeatureTrackerInput frame, HomogeneousMatrix cMo, ImageFloat render_depth, ImageFloat depth_map, np.ndarray idx_curr_matched, np.ndarray idx_matched_map, np.ndarray current_kps, torch.Tensor current_descriptors)
Matrix get_px_proj(self, np.ndarray indices, CameraParameters cam, HomogeneousMatrix cMo)
set_points_to_add(self, Matrix X, torch.tensor descriptors, Matrix normals)