Nag*_*S N 5 python numpy computer-vision tensorflow pytorch
我正在尝试使用地面实况深度图、姿势信息和相机矩阵将帧从 view1 扭曲到 view2。我已经能够删除大部分 for 循环并将其矢量化,除了一个 for 循环。变形时,由于遮挡,view1 中的多个像素可能会映射到 view2 中的单个位置。在这种情况下,我需要选择具有最低深度值的像素(前景对象)。我无法矢量化这部分代码。任何帮助向量化这个 for 循环表示赞赏。
def warp_frame_04(frame1: numpy.ndarray, depth: numpy.ndarray, intrinsic: numpy.ndarray, transformation1: numpy.ndarray,
transformation2: numpy.ndarray, convert_to_uint: bool = True, verbose_log: bool = True):
"""
Vectorized Forward warping. Nearest Neighbor.
Offset requirement of warp_frame_03() overcome.
mask: 1 if pixel found, 0 if no pixel found
Drawback: Nearest neighbor, collision resolving not vectorized
"""
height, width, _ = frame1.shape
assert depth.shape == (height, width)
transformation = numpy.matmul(transformation2, numpy.linalg.inv(transformation1))
y1d = numpy.array(range(height))
x1d = numpy.array(range(width))
x2d, y2d = numpy.meshgrid(x1d, y1d)
ones_2d = numpy.ones(shape=(height, width))
ones_4d = ones_2d[:, :, None, None]
pos_vectors_homo = numpy.stack([x2d, y2d, ones_2d], axis=2)[:, :, :, None]
intrinsic_inv = numpy.linalg.inv(intrinsic)
intrinsic_4d = intrinsic[None, None]
intrinsic_inv_4d = intrinsic_inv[None, None]
depth_4d = depth[:, :, None, None]
trans_4d = transformation[None, None]
unnormalized_pos = numpy.matmul(intrinsic_inv_4d, pos_vectors_homo)
world_points = depth_4d * unnormalized_pos
world_points_homo = numpy.concatenate([world_points, ones_4d], axis=2)
trans_world_homo = numpy.matmul(trans_4d, world_points_homo)
trans_world = trans_world_homo[:, :, :3]
trans_norm_points = numpy.matmul(intrinsic_4d, trans_world)
trans_pos = trans_norm_points[:, :, :2, 0] / trans_norm_points[:, :, 2:3, 0]
trans_pos_int = numpy.round(trans_pos).astype('int')
# Solve occlusions
a = trans_pos_int.reshape(-1, 2)
d = depth.ravel()
b = numpy.unique(a, axis=0, return_index=True, return_counts=True)
collision_indices = b[1][b[2] >= 2] # Unique indices which are involved in collision
for c1 in tqdm(collision_indices, disable=not verbose_log):
cl = a[c1].copy() # Collision Location
ci = numpy.where((a[:, 0] == cl[0]) & (a[:, 1] == cl[1]))[0] # Colliding Indices: Indices colliding for cl
cci = ci[numpy.argmin(d[ci])] # Closest Collision Index: Index of the nearest point among ci
a[ci] = [-1, -1]
a[cci] = cl
trans_pos_solved = a.reshape(height, width, 2)
# Offset both axes by 1 and set any out of frame motion to edge. Then crop 1-pixel thick edge
trans_pos_offset = trans_pos_solved + 1
trans_pos_offset[:, :, 0] = numpy.clip(trans_pos_offset[:, :, 0], a_min=0, a_max=width + 1)
trans_pos_offset[:, :, 1] = numpy.clip(trans_pos_offset[:, :, 1], a_min=0, a_max=height + 1)
warped_image = numpy.ones(shape=(height + 2, width + 2, 3)) * numpy.nan
warped_image[trans_pos_offset[:, :, 1], trans_pos_offset[:, :, 0]] = frame1
cropped_warped_image = warped_image[1:-1, 1:-1]
mask = numpy.isfinite(cropped_warped_image)
cropped_warped_image[~mask] = 0
if convert_to_uint:
final_warped_image = cropped_warped_image.astype('uint8')
else:
final_warped_image = cropped_warped_image
mask = mask[:, :, 0]
return final_warped_image, mask
Run Code Online (Sandbox Code Playgroud)
[1] https://i.stack.imgur.com/s1D9t.png
[2] https://dsp.stackexchange.com/q/69890/32876