Skip to content
Snippets Groups Projects
Commit af972f28 authored by Wang, Andy's avatar Wang, Andy
Browse files

Delete LSTM_REG_EMB.ipynb

parent ca866a48
No related branches found
No related tags found
No related merge requests found
%% Cell type:code id: tags:
```
import torch
import torch.nn as nn
import numpy as np
from torchvision import datasets, transforms
import seaborn as sns
import matplotlib.pyplot as plt
GPU = True
device_idx = 0
if GPU:
device = torch.device("cuda:" + str(device_idx) if torch.cuda.is_available() else "cpu")
else:
device = torch.device("cpu")
print(device)
# Set default dtype for model weights
torch.set_default_dtype(torch.double)
```
%% Output
cuda:0
%% Cell type:code id: tags:
```
# Load the data
import os
os.chdir("/content/drive/My Drive/Colab Notebooks/NLP/coursework")
import pickle
with open("en_indices.pk", "rb") as f:
en_sentences_vectors = pickle.load(f)
with open("de_indices.pk", "rb") as f:
de_sentences_vectors = pickle.load(f)
with open("scores.pk", "rb") as f:
scores = pickle.load(f)
```
%% Cell type:code id: tags:
```
en_lengths = np.array([len(en_sentences_vectors[i]) for i in range(len(en_sentences_vectors))])
de_lengths = np.array([len(de_sentences_vectors[i]) for i in range(len(de_sentences_vectors))])
scores = np.array(scores)
# Plotting the lengths of sentences to see where we should pad
sns.distplot(en_lengths)
plt.title("Lengths of sentences EN")
plt.show()
sns.distplot(de_lengths)
plt.title("Lengths of sentences DE")
plt.show()
```
%% Output
%% Cell type:code id: tags:
```
# Split into 2 group so that inputs are not too sparse
# Check whether the proposed splits have the same distribution of scores
sns.distplot(scores)
plt.title("Full Distribution of scores")
plt.show()
sns.distplot(scores[en_lengths < 17])
plt.title("EN_SENT, len < 17")
plt.show()
sns.distplot(scores[en_lengths >= 17])
plt.title("EN_SENT, len >= 17")
plt.show()
# sns.distplot(scores[de_lengths < 17])
# plt.title("DE_SENT, len < 17")
# plt.show()
# sns.distplot(scores[de_lengths >= 17])
# plt.title("DE_SENT, len >= 17")
# plt.show()
```
%% Output
%% Cell type:code id: tags:
```
# Split up into two seperate sets and pad
pad = [0]
scores_set_1 = scores[en_lengths < 17]
scores_set_2 = scores[en_lengths >= 17]
en_set_1 = []
en_set_2 = []
idx_set_1 = []
idx_set_2 = []
max_len_en = max(len(sent) for sent in en_sentences_vectors)
for i, en_sentence in enumerate(en_sentences_vectors):
sent_len = len(en_sentence)
if sent_len < 17:
en_set_1.append(en_sentence + pad * (16 - sent_len))
idx_set_1.append(i)
else:
en_set_2.append(en_sentence + pad * (max_len_en - sent_len))
idx_set_2.append(i)
de_set_1 = []
de_set_2 = []
max_len_de_set_1 = max(de_lengths[idx_set_1])
max_len_de_set_2 = max(len(sent) for sent in de_sentences_vectors)
for idx in idx_set_1:
de_set_1.append(de_sentences_vectors[idx] + pad * (max_len_de_set_1 - len(de_sentences_vectors[idx])))
for idx in idx_set_2:
de_set_2.append(de_sentences_vectors[idx] + pad * (max_len_de_set_2 - len(de_sentences_vectors[idx])))
```
%% Cell type:code id: tags:
```
class Sentences(torch.utils.data.Dataset):
def __init__(self, en_sentences, de_sentences, scores):
super(Sentences, self).__init__()
self.en_sentences_vectors = torch.tensor(en_sentences, device=device)
self.de_sentences_vectors = torch.tensor(de_sentences, device=device)
self.scores = torch.tensor(scores, device=device)
def __len__(self):
return len(self.scores)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
en, de, scores = self.en_sentences_vectors[idx], self.de_sentences_vectors[idx], self.scores[idx]
return en, de, scores
```
%% Cell type:code id: tags:
```
# Create train and test sets
train_pct = 0.8
first_set_train_len = int(train_pct * len(en_set_1))
second_set_train_len = int(train_pct * len(en_set_2))
first_set = Sentences(en_set_1, de_set_1, scores_set_1)
second_set = Sentences(en_set_2, de_set_2, scores_set_2)
first_set_train, first_set_test = torch.utils.data.random_split(first_set, [first_set_train_len, len(en_set_1) - first_set_train_len])
second_set_train, second_set_test = torch.utils.data.random_split(second_set, [second_set_train_len, len(en_set_2) - second_set_train_len])
```
%% Cell type:code id: tags:
```
# Create Dataloaders
batch_size = 32
loader_first_train = torch.utils.data.DataLoader(first_set_train, batch_size=batch_size, shuffle=True)
loader_first_test = torch.utils.data.DataLoader(first_set_test, batch_size=batch_size, shuffle=False)
loader_second_train = torch.utils.data.DataLoader(second_set_train, batch_size=batch_size, shuffle=True)
loader_second_test = torch.utils.data.DataLoader(second_set_test, batch_size=batch_size, shuffle=False)
```
%% Cell type:code id: tags:
```
# https://stackoverflow.com/questions/53010465/bidirectional-lstm-output-question-in-pytorch
# Hyper Parameters
en_vocab_size = 25237
de_vocab_size = 27854
embed_dim = 300
lstm_hidden_dim = 256
num_layers = 2
reg_inputs = 4 * lstm_hidden_dim
reg_hid_dim1 = 2 * lstm_hidden_dim
reg_hid_dim2 = lstm_hidden_dim
learning_rate = 1e-4
class LSTM_REG(torch.nn.Module):
def __init__(self):
super(LSTM_REG, self).__init__()
self.en_emb = torch.nn.Embedding(en_vocab_size, embed_dim, padding_idx=0)
self.de_emb = torch.nn.Embedding(de_vocab_size, embed_dim, padding_idx=0)
self.en_lstm = torch.nn.GRU(embed_dim, lstm_hidden_dim, num_layers, bidirectional=True, batch_first=True)
self.de_lstm = torch.nn.GRU(embed_dim, lstm_hidden_dim, num_layers, bidirectional=True, batch_first=True)
self.reg = torch.nn.Sequential(
nn.Dropout(),
nn.Linear(reg_inputs, reg_hid_dim1),
nn.ReLU(),
nn.Linear(reg_hid_dim1, reg_hid_dim2),
nn.ReLU(),
nn.Linear(reg_hid_dim2, 1)
)
def forward(self, en_batch, de_batch):
# inputs should be 3D, BATCH, NUM_WORDS, lstm_hidden_dim
en_emb = self.en_emb(en_batch)
de_emb = self.de_emb(de_batch)
en_all_hids, en_last_hid = self.en_lstm(en_emb)
de_all_hids, de_last_hid = self.de_lstm(de_emb)
# using last state of last layer in each direction
en_last_hid_resh = en_last_hid.view(num_layers, 2, en_batch.size(0), lstm_hidden_dim)
en_reg_input_1 = en_last_hid_resh[-1, 0, :, :]
en_reg_input_2 = en_last_hid_resh[-1, 1, :, :]
de_last_hid_resh = de_last_hid.view(num_layers, 2, de_batch.size(0), lstm_hidden_dim)
de_reg_input_1 = de_last_hid_resh[-1, 0, :, :]
de_reg_input_2 = de_last_hid_resh[-1, 1, :, :]
reg_input = torch.cat((en_reg_input_1, en_reg_input_2, de_reg_input_1, de_reg_input_2), dim=-1)
out = self.reg(reg_input)
return out
```
%% Cell type:code id: tags:
```
# Create model
model = LSTM_REG()
model = model.to(device)
```
%% Cell type:code id: tags:
```
# Optimiser
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
```
%% Cell type:code id: tags:
```
# Loss function
def calc_loss(output, target):
loss = torch.nn.functional.mse_loss(output.squeeze(), target, reduction="mean")
return loss
# Test function
def eval_target(output, target):
acc = torch.nn.functional.l1_loss(output.squeeze(), target, reduction="mean")
return acc
```
%% Cell type:code id: tags:
```
num_epochs = 100
verbose_print = False
train_losses = []
test_losses = []
test_acc = []
test_pearson = []
for epoch in range(num_epochs):
model.train()
epoch_loss = []
total_steps = len(loader_first_train) + len(loader_second_train)
for idx_1, (en, de, labels) in enumerate(loader_first_train):
model.zero_grad()
en = en.to(device)
de = de.to(device)
labels = labels.to(device)
output = model(en, de)
loss = calc_loss(output, labels)
loss.backward()
avg_batch_loss = loss.item()
optimizer.step()
epoch_loss.append(avg_batch_loss)
if verbose_print:
print(f"Epoch {epoch}, Batch {idx_1 + 1} Train Loss: {avg_batch_loss:.4f}")
for idx_2, (en, de, labels) in enumerate(loader_second_train):
model.zero_grad()
en = en.to(device)
de = de.to(device)
labels = labels.to(device)
output = model(en, de)
loss = calc_loss(output, labels)
loss.backward()
avg_batch_loss = loss.item()
optimizer.step()
epoch_loss.append(avg_batch_loss)
if verbose_print:
print(f"Epoch {epoch}, Batch {idx_1 + idx_2 + 2} Train Loss: {avg_batch_loss:.4f}")
avg_epoch_loss = sum(epoch_loss) / total_steps
train_losses.append(avg_epoch_loss)
print(f"Average Train Loss in Epoch {epoch}: {avg_epoch_loss:.4f}")
# Test on test set
model.eval()
epoch_loss = []
epoch_acc = []
all_outputs = []
all_labels = []
total_steps = len(loader_first_test) + len(loader_second_test)
with torch.no_grad():
for idx_1, (en, de, labels) in enumerate(loader_first_test):
# Record for Pearson
all_labels.extend(labels.tolist())
en = en.to(device)
de = de.to(device)
labels = labels.to(device)
output = model(en, de)
# Record for Pearson
all_outputs.extend(output.squeeze().tolist())
loss = calc_loss(output, labels)
avg_batch_loss = loss.item()
acc = eval_target(output, labels)
avg_batch_acc= acc.item()
epoch_loss.append(avg_batch_loss)
epoch_acc.append(avg_batch_acc)
if verbose_print:
print(f"Epoch {epoch}, Batch {idx_1 + 1} Test Loss: {avg_batch_loss:.4f}, Test Acc: {avg_batch_acc:.4f}")
for idx_2, (en, de, labels) in enumerate(loader_second_test):
en = en.to(device)
de = de.to(device)
labels = labels.to(device)
output = model(en, de)
loss = calc_loss(output, labels)
avg_batch_loss = loss.item()
acc = eval_target(output, labels)
avg_batch_acc= acc.item()
epoch_loss.append(avg_batch_loss)
epoch_acc.append(avg_batch_acc)
if verbose_print:
print(f"Epoch {epoch}, Batch {idx_1 + idx_2 + 2} Test Loss: {avg_batch_loss:.4f}, Test Acc: {avg_batch_acc:.4f}")
avg_epoch_loss = sum(epoch_loss) / total_steps
avg_epoch_acc = sum(epoch_acc) / total_steps
test_losses.append(avg_epoch_loss)
test_acc.append(avg_epoch_acc)
print(f"Average Test Loss in Epoch {epoch}: {avg_epoch_loss:.4f}")
print(f"Average Test Acc in Epoch {epoch}: {avg_epoch_acc:.4f}")
# Calc Pearson
pearson = np.corrcoef(all_outputs, all_labels)[0, 1]
test_pearson.append(pearson)
print(f"Pearson Coeff in Epoch {epoch}: {pearson}")
```
%% Output
Average Train Loss in Epoch 0: 0.0064
Average Test Loss in Epoch 0: 0.7037
Average Test Acc in Epoch 0: 0.5604
Pearson Coeff in Epoch 0: -0.00640668765047935
Average Train Loss in Epoch 1: 0.0070
Average Test Loss in Epoch 1: 0.7356
Average Test Acc in Epoch 1: 0.5793
Pearson Coeff in Epoch 1: -0.006979045487867788
Average Train Loss in Epoch 2: 0.0070
Average Test Loss in Epoch 2: 0.7183
Average Test Acc in Epoch 2: 0.5663
Pearson Coeff in Epoch 2: -0.008282282412511641
Average Train Loss in Epoch 3: 0.0070
Average Test Loss in Epoch 3: 0.7177
Average Test Acc in Epoch 3: 0.5695
Pearson Coeff in Epoch 3: -0.0060183350042013065
Average Train Loss in Epoch 4: 0.0069
Average Test Loss in Epoch 4: 0.7513
Average Test Acc in Epoch 4: 0.5851
Pearson Coeff in Epoch 4: -0.01662392594265492
Average Train Loss in Epoch 5: 0.0084
Average Test Loss in Epoch 5: 0.7393
Average Test Acc in Epoch 5: 0.5792
Pearson Coeff in Epoch 5: -0.010780362453402855
Average Train Loss in Epoch 6: 0.0062
Average Test Loss in Epoch 6: 0.7309
Average Test Acc in Epoch 6: 0.5760
Pearson Coeff in Epoch 6: -0.007079517514670253
Average Train Loss in Epoch 7: 0.0061
Average Test Loss in Epoch 7: 0.7319
Average Test Acc in Epoch 7: 0.5740
Pearson Coeff in Epoch 7: -0.013774120205408132
Average Train Loss in Epoch 8: 0.0065
Average Test Loss in Epoch 8: 0.7202
Average Test Acc in Epoch 8: 0.5689
Pearson Coeff in Epoch 8: -0.012118685158348044
Average Train Loss in Epoch 9: 0.0066
Average Test Loss in Epoch 9: 0.7192
Average Test Acc in Epoch 9: 0.5707
Pearson Coeff in Epoch 9: -0.0071289457160448495
Average Train Loss in Epoch 10: 0.0069
Average Test Loss in Epoch 10: 0.7193
Average Test Acc in Epoch 10: 0.5656
Pearson Coeff in Epoch 10: -0.011515248369711595
Average Train Loss in Epoch 11: 0.0060
Average Test Loss in Epoch 11: 0.7236
Average Test Acc in Epoch 11: 0.5703
Pearson Coeff in Epoch 11: -0.005069177373407349
Average Train Loss in Epoch 12: 0.0062
Average Test Loss in Epoch 12: 0.7340
Average Test Acc in Epoch 12: 0.5758
Pearson Coeff in Epoch 12: -0.011562655037911231
Average Train Loss in Epoch 13: 0.0079
Average Test Loss in Epoch 13: 0.7214
Average Test Acc in Epoch 13: 0.5684
Pearson Coeff in Epoch 13: -0.008449650566705465
Average Train Loss in Epoch 14: 0.0072
Average Test Loss in Epoch 14: 0.7141
Average Test Acc in Epoch 14: 0.5676
Pearson Coeff in Epoch 14: -0.006097211910686325
Average Train Loss in Epoch 15: 0.0059
Average Test Loss in Epoch 15: 0.7171
Average Test Acc in Epoch 15: 0.5663
Pearson Coeff in Epoch 15: -0.00868391500578421
Average Train Loss in Epoch 16: 0.0063
Average Test Loss in Epoch 16: 0.7132
Average Test Acc in Epoch 16: 0.5626
Pearson Coeff in Epoch 16: -0.0068288151125823265
Average Train Loss in Epoch 17: 0.0073
Average Test Loss in Epoch 17: 0.7285
Average Test Acc in Epoch 17: 0.5689
Pearson Coeff in Epoch 17: -0.015800308472356018
Average Train Loss in Epoch 18: 0.0079
Average Test Loss in Epoch 18: 0.7174
Average Test Acc in Epoch 18: 0.5664
Pearson Coeff in Epoch 18: -0.006678423310916171
Average Train Loss in Epoch 19: 0.0070
Average Test Loss in Epoch 19: 0.7112
Average Test Acc in Epoch 19: 0.5584
Pearson Coeff in Epoch 19: -0.012301081725153455
Average Train Loss in Epoch 20: 0.0066
Average Test Loss in Epoch 20: 0.7375
Average Test Acc in Epoch 20: 0.5748
Pearson Coeff in Epoch 20: -0.011193149754333446
Average Train Loss in Epoch 21: 0.0058
Average Test Loss in Epoch 21: 0.7168
Average Test Acc in Epoch 21: 0.5616
Pearson Coeff in Epoch 21: -0.016230629153410902
Average Train Loss in Epoch 22: 0.0061
Average Test Loss in Epoch 22: 0.7361
Average Test Acc in Epoch 22: 0.5745
Pearson Coeff in Epoch 22: -0.008928321861264556
Average Train Loss in Epoch 23: 0.0061
Average Test Loss in Epoch 23: 0.7311
Average Test Acc in Epoch 23: 0.5724
Pearson Coeff in Epoch 23: -0.010055173007708168
Average Train Loss in Epoch 24: 0.0055
Average Test Loss in Epoch 24: 0.7179
Average Test Acc in Epoch 24: 0.5661
Pearson Coeff in Epoch 24: -0.010154973152838248
Average Train Loss in Epoch 25: 0.0058
Average Test Loss in Epoch 25: 0.7151
Average Test Acc in Epoch 25: 0.5665
Pearson Coeff in Epoch 25: -0.014390283954534358
Average Train Loss in Epoch 26: 0.0065
Average Test Loss in Epoch 26: 0.6966
Average Test Acc in Epoch 26: 0.5547
Pearson Coeff in Epoch 26: -0.006887657576059487
Average Train Loss in Epoch 27: 0.0074
Average Test Loss in Epoch 27: 0.7502
Average Test Acc in Epoch 27: 0.5811
Pearson Coeff in Epoch 27: -0.013991437861377506
Average Train Loss in Epoch 28: 0.0083
Average Test Loss in Epoch 28: 0.7345
Average Test Acc in Epoch 28: 0.5756
Pearson Coeff in Epoch 28: -0.01275095297448143
Average Train Loss in Epoch 29: 0.0070
Average Test Loss in Epoch 29: 0.7228
Average Test Acc in Epoch 29: 0.5699
Pearson Coeff in Epoch 29: -0.010738972002671059
Average Train Loss in Epoch 30: 0.0058
Average Test Loss in Epoch 30: 0.7384
Average Test Acc in Epoch 30: 0.5746
Pearson Coeff in Epoch 30: -0.014365453459400542
Average Train Loss in Epoch 31: 0.0048
Average Test Loss in Epoch 31: 0.7270
Average Test Acc in Epoch 31: 0.5708
Pearson Coeff in Epoch 31: -0.01160351295848988
Average Train Loss in Epoch 32: 0.0044
Average Test Loss in Epoch 32: 0.7284
Average Test Acc in Epoch 32: 0.5734
Pearson Coeff in Epoch 32: -0.013472051296738327
Average Train Loss in Epoch 33: 0.0056
Average Test Loss in Epoch 33: 0.7188
Average Test Acc in Epoch 33: 0.5686
Pearson Coeff in Epoch 33: -0.00952966769478103
Average Train Loss in Epoch 34: 0.0051
Average Test Loss in Epoch 34: 0.7328
Average Test Acc in Epoch 34: 0.5728
Pearson Coeff in Epoch 34: -0.014258350328160824
Average Train Loss in Epoch 35: 0.0052
Average Test Loss in Epoch 35: 0.7503
Average Test Acc in Epoch 35: 0.5829
Pearson Coeff in Epoch 35: -0.013481927715847445
Average Train Loss in Epoch 36: 0.0056
Average Test Loss in Epoch 36: 0.7332
Average Test Acc in Epoch 36: 0.5736
Pearson Coeff in Epoch 36: -0.011264977647855326
Average Train Loss in Epoch 37: 0.0046
Average Test Loss in Epoch 37: 0.7137
Average Test Acc in Epoch 37: 0.5610
Pearson Coeff in Epoch 37: -0.0145935856302308
Average Train Loss in Epoch 38: 0.0048
Average Test Loss in Epoch 38: 0.7094
Average Test Acc in Epoch 38: 0.5559
Pearson Coeff in Epoch 38: -0.015018139630774197
Average Train Loss in Epoch 39: 0.0050
Average Test Loss in Epoch 39: 0.7175
Average Test Acc in Epoch 39: 0.5611
Pearson Coeff in Epoch 39: -0.01683060454278484
Average Train Loss in Epoch 40: 0.0056
Average Test Loss in Epoch 40: 0.7006
Average Test Acc in Epoch 40: 0.5604
Pearson Coeff in Epoch 40: -0.014162116495078265
Average Train Loss in Epoch 41: 0.0071
Average Test Loss in Epoch 41: 0.7132
Average Test Acc in Epoch 41: 0.5651
Pearson Coeff in Epoch 41: -0.011043106471982304
Average Train Loss in Epoch 42: 0.0057
Average Test Loss in Epoch 42: 0.7186
Average Test Acc in Epoch 42: 0.5623
Pearson Coeff in Epoch 42: -0.01730310071553439
Average Train Loss in Epoch 43: 0.0046
Average Test Loss in Epoch 43: 0.7365
Average Test Acc in Epoch 43: 0.5735
Pearson Coeff in Epoch 43: -0.015166682443291847
Average Train Loss in Epoch 44: 0.0053
Average Test Loss in Epoch 44: 0.7442
Average Test Acc in Epoch 44: 0.5783
Pearson Coeff in Epoch 44: -0.016477045219500264
Average Train Loss in Epoch 45: 0.0049
Average Test Loss in Epoch 45: 0.7381
Average Test Acc in Epoch 45: 0.5715
Pearson Coeff in Epoch 45: -0.014855397963026226
Average Train Loss in Epoch 46: 0.0045
Average Test Loss in Epoch 46: 0.7301
Average Test Acc in Epoch 46: 0.5700
Pearson Coeff in Epoch 46: -0.014198007048672636
Average Train Loss in Epoch 47: 0.0046
Average Test Loss in Epoch 47: 0.7421
Average Test Acc in Epoch 47: 0.5746
Pearson Coeff in Epoch 47: -0.014543215716162435
Average Train Loss in Epoch 48: 0.0051
Average Test Loss in Epoch 48: 0.7382
Average Test Acc in Epoch 48: 0.5737
Pearson Coeff in Epoch 48: -0.018195214360679147
Average Train Loss in Epoch 49: 0.0058
Average Test Loss in Epoch 49: 0.7369
Average Test Acc in Epoch 49: 0.5736
Pearson Coeff in Epoch 49: -0.0187835210579788
Average Train Loss in Epoch 50: 0.0060
Average Test Loss in Epoch 50: 0.7327
Average Test Acc in Epoch 50: 0.5713
Pearson Coeff in Epoch 50: -0.016376628604220102
Average Train Loss in Epoch 51: 0.0047
Average Test Loss in Epoch 51: 0.7333
Average Test Acc in Epoch 51: 0.5698
Pearson Coeff in Epoch 51: -0.021823389604178886
Average Train Loss in Epoch 52: 0.0039
Average Test Loss in Epoch 52: 0.7330
Average Test Acc in Epoch 52: 0.5661
Pearson Coeff in Epoch 52: -0.021463282019656534
Average Train Loss in Epoch 53: 0.0048
Average Test Loss in Epoch 53: 0.7222
Average Test Acc in Epoch 53: 0.5625
Pearson Coeff in Epoch 53: -0.018696313913313105
Average Train Loss in Epoch 54: 0.0056
Average Test Loss in Epoch 54: 0.7278
Average Test Acc in Epoch 54: 0.5663
Pearson Coeff in Epoch 54: -0.017137352732088478
Average Train Loss in Epoch 55: 0.0051
Average Test Loss in Epoch 55: 0.7280
Average Test Acc in Epoch 55: 0.5682
Pearson Coeff in Epoch 55: -0.018891591467467665
Average Train Loss in Epoch 56: 0.0040
Average Test Loss in Epoch 56: 0.7340
Average Test Acc in Epoch 56: 0.5694
Pearson Coeff in Epoch 56: -0.019951237621963408
Average Train Loss in Epoch 57: 0.0041
Average Test Loss in Epoch 57: 0.7418
Average Test Acc in Epoch 57: 0.5753
Pearson Coeff in Epoch 57: -0.019676796008937927
Average Train Loss in Epoch 58: 0.0041
Average Test Loss in Epoch 58: 0.7390
Average Test Acc in Epoch 58: 0.5707
Pearson Coeff in Epoch 58: -0.020598112045259128
Average Train Loss in Epoch 59: 0.0043
Average Test Loss in Epoch 59: 0.7237
Average Test Acc in Epoch 59: 0.5626
Pearson Coeff in Epoch 59: -0.018634062292781854
Average Train Loss in Epoch 60: 0.0043
Average Test Loss in Epoch 60: 0.7294
Average Test Acc in Epoch 60: 0.5690
Pearson Coeff in Epoch 60: -0.019200184562833688
Average Train Loss in Epoch 61: 0.0048
Average Test Loss in Epoch 61: 0.7250
Average Test Acc in Epoch 61: 0.5645
Pearson Coeff in Epoch 61: -0.017169600118991837
Average Train Loss in Epoch 62: 0.0046
Average Test Loss in Epoch 62: 0.7429
Average Test Acc in Epoch 62: 0.5717
Pearson Coeff in Epoch 62: -0.02105909092450887
Average Train Loss in Epoch 63: 0.0040
Average Test Loss in Epoch 63: 0.7397
Average Test Acc in Epoch 63: 0.5708
Pearson Coeff in Epoch 63: -0.020242108494791737
Average Train Loss in Epoch 64: 0.0042
Average Test Loss in Epoch 64: 0.7207
Average Test Acc in Epoch 64: 0.5620
Pearson Coeff in Epoch 64: -0.02208794678246374
Average Train Loss in Epoch 65: 0.0042
Average Test Loss in Epoch 65: 0.7232
Average Test Acc in Epoch 65: 0.5612
Pearson Coeff in Epoch 65: -0.019729349599685995
Average Train Loss in Epoch 66: 0.0048
Average Test Loss in Epoch 66: 0.7218
Average Test Acc in Epoch 66: 0.5619
Pearson Coeff in Epoch 66: -0.01763264710866364
Average Train Loss in Epoch 67: 0.0045
Average Test Loss in Epoch 67: 0.7217
Average Test Acc in Epoch 67: 0.5601
Pearson Coeff in Epoch 67: -0.019551360501773796
Average Train Loss in Epoch 68: 0.0050
Average Test Loss in Epoch 68: 0.7119
Average Test Acc in Epoch 68: 0.5567
Pearson Coeff in Epoch 68: -0.018149361915137767
Average Train Loss in Epoch 69: 0.0042
Average Test Loss in Epoch 69: 0.7361
Average Test Acc in Epoch 69: 0.5679
Pearson Coeff in Epoch 69: -0.019791509404197764
Average Train Loss in Epoch 70: 0.0047
Average Test Loss in Epoch 70: 0.7192
Average Test Acc in Epoch 70: 0.5590
Pearson Coeff in Epoch 70: -0.02263321301328681
Average Train Loss in Epoch 71: 0.0050
Average Test Loss in Epoch 71: 0.7246
Average Test Acc in Epoch 71: 0.5600
Pearson Coeff in Epoch 71: -0.01887427602274251
Average Train Loss in Epoch 72: 0.0043
Average Test Loss in Epoch 72: 0.7290
Average Test Acc in Epoch 72: 0.5655
Pearson Coeff in Epoch 72: -0.01975682382762401
Average Train Loss in Epoch 73: 0.0045
Average Test Loss in Epoch 73: 0.7275
Average Test Acc in Epoch 73: 0.5611
Pearson Coeff in Epoch 73: -0.022303661384655567
Average Train Loss in Epoch 74: 0.0044
Average Test Loss in Epoch 74: 0.7345
Average Test Acc in Epoch 74: 0.5697
Pearson Coeff in Epoch 74: -0.013561320865527042
Average Train Loss in Epoch 75: 0.0046
Average Test Loss in Epoch 75: 0.7555
Average Test Acc in Epoch 75: 0.5764
Pearson Coeff in Epoch 75: -0.02202401637066137
Average Train Loss in Epoch 76: 0.0033
Average Test Loss in Epoch 76: 0.7241
Average Test Acc in Epoch 76: 0.5617
Pearson Coeff in Epoch 76: -0.019334440020621756
Average Train Loss in Epoch 77: 0.0037
Average Test Loss in Epoch 77: 0.7241
Average Test Acc in Epoch 77: 0.5612
Pearson Coeff in Epoch 77: -0.01860456967621103
Average Train Loss in Epoch 78: 0.0041
Average Test Loss in Epoch 78: 0.7257
Average Test Acc in Epoch 78: 0.5617
Pearson Coeff in Epoch 78: -0.020251074135047984
Average Train Loss in Epoch 79: 0.0039
Average Test Loss in Epoch 79: 0.7314
Average Test Acc in Epoch 79: 0.5637
Pearson Coeff in Epoch 79: -0.01813485602271905
Average Train Loss in Epoch 80: 0.0036
Average Test Loss in Epoch 80: 0.7216
Average Test Acc in Epoch 80: 0.5600
Pearson Coeff in Epoch 80: -0.02119288567892971
Average Train Loss in Epoch 81: 0.0040
Average Test Loss in Epoch 81: 0.7204
Average Test Acc in Epoch 81: 0.5569
Pearson Coeff in Epoch 81: -0.01891174788038295
Average Train Loss in Epoch 82: 0.0040
Average Test Loss in Epoch 82: 0.7121
Average Test Acc in Epoch 82: 0.5535
Pearson Coeff in Epoch 82: -0.025232018269285464
Average Train Loss in Epoch 83: 0.0041
Average Test Loss in Epoch 83: 0.7225
Average Test Acc in Epoch 83: 0.5599
Pearson Coeff in Epoch 83: -0.017284696851279386
Average Train Loss in Epoch 84: 0.0038
Average Test Loss in Epoch 84: 0.7079
Average Test Acc in Epoch 84: 0.5509
Pearson Coeff in Epoch 84: -0.01465603118938496
Average Train Loss in Epoch 85: 0.0041
Average Test Loss in Epoch 85: 0.7082
Average Test Acc in Epoch 85: 0.5533
Pearson Coeff in Epoch 85: -0.020592868840659445
Average Train Loss in Epoch 86: 0.0037
Average Test Loss in Epoch 86: 0.7177
Average Test Acc in Epoch 86: 0.5548
Pearson Coeff in Epoch 86: -0.013848488095631058
Average Train Loss in Epoch 87: 0.0046
Average Test Loss in Epoch 87: 0.7191
Average Test Acc in Epoch 87: 0.5614
Pearson Coeff in Epoch 87: -0.020433791133147864
Average Train Loss in Epoch 88: 0.0039
Average Test Loss in Epoch 88: 0.7168
Average Test Acc in Epoch 88: 0.5562
Pearson Coeff in Epoch 88: -0.0209650485342702
Average Train Loss in Epoch 89: 0.0039
Average Test Loss in Epoch 89: 0.7073
Average Test Acc in Epoch 89: 0.5538
Pearson Coeff in Epoch 89: -0.01930070775427866
Average Train Loss in Epoch 90: 0.0033
Average Test Loss in Epoch 90: 0.7176
Average Test Acc in Epoch 90: 0.5562
Pearson Coeff in Epoch 90: -0.018268362264179574
Average Train Loss in Epoch 91: 0.0036
Average Test Loss in Epoch 91: 0.7092
Average Test Acc in Epoch 91: 0.5495
Pearson Coeff in Epoch 91: -0.01695721266439781
Average Train Loss in Epoch 92: 0.0035
Average Test Loss in Epoch 92: 0.7220
Average Test Acc in Epoch 92: 0.5597
Pearson Coeff in Epoch 92: -0.020730753494440576
Average Train Loss in Epoch 93: 0.0032
Average Test Loss in Epoch 93: 0.7041
Average Test Acc in Epoch 93: 0.5497
Pearson Coeff in Epoch 93: -0.023294490975846783
Average Train Loss in Epoch 94: 0.0034
Average Test Loss in Epoch 94: 0.7133
Average Test Acc in Epoch 94: 0.5538
Pearson Coeff in Epoch 94: -0.021937212324890532
Average Train Loss in Epoch 95: 0.0036
Average Test Loss in Epoch 95: 0.7207
Average Test Acc in Epoch 95: 0.5584
Pearson Coeff in Epoch 95: -0.023256931584865477
Average Train Loss in Epoch 96: 0.0038
Average Test Loss in Epoch 96: 0.7115
Average Test Acc in Epoch 96: 0.5523
Pearson Coeff in Epoch 96: -0.023386304406968088
Average Train Loss in Epoch 97: 0.0044
Average Test Loss in Epoch 97: 0.7186
Average Test Acc in Epoch 97: 0.5548
Pearson Coeff in Epoch 97: -0.017680577356088172
Average Train Loss in Epoch 98: 0.0044
Average Test Loss in Epoch 98: 0.7276
Average Test Acc in Epoch 98: 0.5588
Pearson Coeff in Epoch 98: -0.027775749690502043
Average Train Loss in Epoch 99: 0.0043
Average Test Loss in Epoch 99: 0.7179
Average Test Acc in Epoch 99: 0.5514
Pearson Coeff in Epoch 99: -0.024401396445606256
%% Cell type:code id: tags:
```
# plot stats
plt.plot(range(len(train_losses)), train_losses)
plt.xlabel("Epochs")
plt.title('Train losses')
plt.show()
plt.plot(range(len(test_losses)), test_losses)
plt.xlabel("Epochs")
plt.title('Test losses')
plt.show()
plt.plot(range(len(test_acc)), test_acc)
plt.xlabel("Epochs")
plt.title('Test acc')
plt.show()
plt.plot(range(len(test_pearson)), test_pearson)
plt.xlabel("Epochs")
plt.title('Test pearson')
plt.show()
```
%% Output
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment