-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtestscript.py
More file actions
70 lines (58 loc) · 1.91 KB
/
testscript.py
File metadata and controls
70 lines (58 loc) · 1.91 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
# converted to convenient script form
# just run python -m testscript
# it should go without saying that you need a CUDA-enabled PyTorch installation before running this will work
# I mean, that's the whole point of this script, to test that very installation
# Imports
import time
import torch
# functions
def timeFun(f, dim, iterations, device='cpu'):
iterations = iterations
t_total = 0
for _ in range(iterations):
start = time.time()
f(dim, device)
end = time.time()
t_total += end - start
if device == 'cpu':
print(f"time taken for {iterations} iterations of {f.__name__}({dim}, {device}): {t_total:.5f}")
else:
print(f"time taken for {iterations} iterations of {f.__name__}({dim}, {device}): {t_total:.5f}")
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("GPU is not available")
else:
print("GPU is available")
return device
def simpleFun(dim, device):
"""
Args:
dim: integer
device: "cpu" or "cuda"
Returns:
Nothing.
"""
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
x = torch.rand(dim,dim,device=device) # don't use "to". just make it a cuda-native array (with device='cuda') and it'll go way faster because you'll be abolishing the transfer overhead!
# 2D tensor filled with uniform random numbers in [0,1), dim x dim
y = torch.rand_like(x,device=device)
# 2D tensor filled with the scalar value 2, dim x dim
z = 2*torch.ones_like(x,device=device)
# elementwise multiplication of x and y
a = x * y
# matrix multiplication of x and y
b = x @ y
del x
del y
del z
del a
del b
# set device
DEVICE = set_device()
# params for timeFun
dim = 10000
iterations = 1
# test your GPU matrix multiplication against a CPU benchmark
timeFun(f=simpleFun, dim=dim, iterations=iterations, device='cpu')
timeFun(f=simpleFun, dim=dim, iterations=iterations, device=DEVICE)