-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgeormetry.py
121 lines (85 loc) · 3.11 KB
/
geormetry.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
import numpy as np
import torch
import torch.nn.functional as F
import cv2
from comon import *
#def get_intrinsic(file_name,image_length):
# text_file = open('file_name','r')
def world_coordinate_generator(R,K,t,u,v,d):
vect = np.array([u,v,d])
a = R.T
b = np.linalg.inv(K)
ans = a*(np.matmul(b,vect)-t)
return ans
def get_normal_map_from_depth_map(d_im):
h,w,d = d_im.shape
normals = torch.zeros(h,w,d)
for i in range(1,w-1):
for j in range(1,h-1):
t = np.array([i,j-1,d_im[j-1,i,0]],dtype="float64")
f = np.array([i-1,j,d_im[j,i-1,0]],dtype="float64")
c = np.array([i,j,d_im[j,i,0]] , dtype = "float64")
#d = np.cross(f-c,t-c)
d = torch.cross(torch.tensor(f-c),torch.tensor(t-c))
#print("cross_passed")
#n = d / np.sqrt((np.sum(d**2)))
n = F.normalize(d,dim=-1)
normals[j,i,:] = n
return normals*255
def depth_map_to_cam_coordinates(x_im,y_im,z,intrinsics):
fx,fy,cx,cy = get_intrinsic_params(intrinsics)
#print(fx.shape)
#print(fy.shape)
#print(cx.shape)
#print(cy.shape)
#print(x_im.shape)
#print(y_im.shape)
#print(z.shape)
num_pixels = x_im.shape[1]
#x_cam = torch.zeros(x_im.shape[0],num_pixels)
#y_cam = torch.zeros(x_im.shape[0],num_pixels)
cx_1 = cx[:,None]
cy_1 = cy[:,None]
fx_1 = fx[:,None]
fy_1 = fy[:,None]
cx_1 = cx_1.expand(4,num_pixels)
cy_1 = cy_1.expand(4,num_pixels)
fx_1 = fx_1.expand(4,num_pixels)
fy_1 = fy_1.expand(4,num_pixels)
cx_1 = cx_1.to(device)
cy_1 = cy_1.to(device)
fx_1 = cx_1.to(device)
fy_1 = cy_1.to(device)
x_im = x_im.to(device)
y_im = y_im.to(device)
x_cam = ((x_im-cx_1)*z)/fx_1
y_cam = ((y_im-cy_1)*z)/fy_1
x_cam = x_cam.float()
y_cam = y_cam.float()
#print("barrier_passed")
return torch.stack((x_cam,y_cam,z),dim=-1)
def get_intrinsic_params(intrinsic_matrix):
fx = intrinsic_matrix[:,0,0]
fy = intrinsic_matrix[:,1,1]
cx = intrinsic_matrix[:,0,2]
cy = intrinsic_matrix[:,1,2]
return fx,fy,cx,cy
def pixel_depth_to_world_coordinates(xy_grid,depth_map,intirnsics,extrinsics):
x_cam = xy_grid[:,:,0].view(batch_size,-1)
y_cam = xy_grid[:,:,1].view(batch_size,-1)
z_cam = depth_map.view(batch_size,-1)
camera_3d_coordinates = depth_map_to_cam_coordinates(x_cam,y_cam,z_cam,intrinsics)
world_coordinates = extrinsics*camera_3d_coordinates
return world_coordinates
def pixels_form_camera(x,y,z,intrinsics):
fx,fy,cx,cy = get_intrinsic_params(instrinsics)
u = (x*fx)/z + cx
v = (y*fy)/z + cy
return torch.stack((u,v,z),dim=-1)
def get_depths_from_world_coordinates(x,y,z,extrinsic_matrix):
batch_size = x.shape[0]
homogenous_points = torch.cat((x,y,z,torch.ones(x).cuda()),dim=-1)
#camera_coordinates = torch.inverse(extrinsic_matrix)*.bmm(homogenous_points)
depth = camera_coordinates[:,:,2]
depth = depth[:,:,None]
return depth