-
Notifications
You must be signed in to change notification settings - Fork 0
/
largernnevaltest.py
93 lines (79 loc) · 2.92 KB
/
largernnevaltest.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import torch
import torch.nn as nn
import pandas as pd
import torch.nn.functional as F
torch.cuda.empty_cache()
data = pd.read_csv("mixed_game_data_limit.csv", header=None)
torch_data = torch.tensor(data.values).cuda()
train_len = (int(len(torch_data)*.9)//32)*32
test_len = len(torch_data)-train_len
train, test = torch.utils.data.random_split(torch_data, [train_len, test_len])
train_loader = torch.utils.data.DataLoader(train, batch_size=32, shuffle=True, num_workers=0)
test_loader = torch.utils.data.DataLoader(test, batch_size=32, shuffle=True, num_workers=0)
# Compute the size of the minibatch for training set and test set
trainset_batchsize = train_loader.batch_size
testset_batchsize = test_loader.batch_size
# Print sizes of the minibatch
print(trainset_batchsize, testset_batchsize)
# Define the class Net
class Net(nn.Module):
def __init__(self):
# Define all the parameters of the net
super(Net, self).__init__()
self.fc1 = nn.Linear(64*12+6, 800, dtype=float)
self.fc2 = nn.Linear(800, 800, dtype=float)
self.fc3 = nn.Linear(800, 800, dtype=float)
self.fc4 = nn.Linear(800, 800, dtype=float)
self.fc5 = nn.Linear(800, 200, dtype=float)
self.fc6 = nn.Linear(200, 200, dtype=float)
self.fc7 = nn.Linear(200, 200, dtype=float)
self.fc8 = nn.Linear(200, 1, dtype=float)
def forward(self, x):
# Do the forward pass
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
x = F.relu(self.fc5(x))
x = F.relu(self.fc6(x))
x = F.relu(self.fc7(x))
x = self.fc8(x)
return x
import torch.optim as optim
# Instantiate the Adam optimizer and Cross-Entropy loss function
model = Net().cuda()
#model.load_state_dict(torch.load("model35.pt"))
optimizer = optim.Adam(model.parameters(), lr=3e-4)
criterion = nn.MSELoss()
for i in range(20000):
epoch_loss = 0.0
for batch_idx, data_target in enumerate(train_loader):
data = data_target[: ,0:64*12+6]
target = data_target[:,-1:]
optimizer.zero_grad()
# Compute a forward pass
output = model(data)
if batch_idx % 500 == 0:
print(batch_idx)
# Compute the loss gradients and change the weights
loss = criterion(output, target)
loss.backward()
optimizer.step()
epoch_loss += loss
print("Epoch:" + str(i))
print(epoch_loss)
if i % 5 == 0:
print("Saving")
torch.save(model.state_dict(), "model"+str(i)+".pt")
correct, total = 0, 0
torch.save(model.state_dict(), "model.pt")
# Set the model in eval mode
model.eval()
loss = 0.0
for i, data in enumerate(test_loader, 0):
inputs = data[:,0:64*12+6]
labels = data[:,-1:]
# Do the forward pass and get the predictions
outputs = model(inputs)
loss = criterion(outputs, labels)
print('The loss of the network is: %d %%' % loss)