forked from braingram/pyspringmesh
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcuda_test.py
67 lines (57 loc) · 1.55 KB
/
cuda_test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
#!/usr/bin/env python
import time
import numpy
import pycuda.driver
import pylab
import springmesh
size = (1000, 1000)
#size = (200, 200)
#size = (100, 100)
n = 100
bs = (1024, 1, 1)
show = False
n_iters = 10
s = 0.01
delay = 0.5
d = pycuda.driver.Device(0)
(free, total) = pycuda.driver.mem_get_info()
print("size: %s" % (size, ))
print("n: %s" % n)
print("bs: %s" % (bs, ))
print("n_iters: %s" % (n_iters, ))
print("s: %s" % s)
(free, total) = pycuda.driver.mem_get_info()
print("mem(free): %s" % (free * 100 / float(total)))
gm = springmesh.gen.triangle_mesh(size, 0.01, 0.01, sel=0.5)
springmesh.relax.cuda.prepare(gm, bs)
(free, total) = pycuda.driver.mem_get_info()
print("mem(free): %s" % (free * 100 / float(total)))
print("springs[n]: %s" % (len(gm.springs), ))
print("points[n]: %s" % (len(gm.points), ))
i = 0
ts = []
while n_iters > 0:
n_iters -= 1
t0 = time.time()
springmesh.relax.cuda.step_n(gm, n=n, s=s)
t1 = time.time()
i += n
ts.append(t1 - t0)
print(
"i: %s, size: %s, n: %s, gpu[%s]: %s" % (
i, size, n, gm.grid, t1 - t0))
(free, total) = pycuda.driver.mem_get_info()
print("mem(free): %s" % (free * 100 / float(total)))
if not show:
#time.sleep(delay)
continue
pylab.figure(1)
pylab.clf()
springmesh.render.mpl.plot(gm)
pylab.title("gpu: %s" % i)
pylab.show()
gm = springmesh.relax.cuda.finalize(gm)
avg_t = numpy.mean(ts)
print("Size: %s" % (size, ))
print("Average time per run[%s]: %s" % (n, avg_t))
print("Average time per step: %s" % (avg_t / float(n), ))