From 79c7877c76e4680a37e74a97cd0d005104c71011 Mon Sep 17 00:00:00 2001
From: "Park, Se" <se.park19@imperial.ac.uk>
Date: Fri, 28 Feb 2020 20:10:04 +0000
Subject: [PATCH] Delete model.py

---
 model.py | 35 -----------------------------------
 1 file changed, 35 deletions(-)
 delete mode 100644 model.py

diff --git a/model.py b/model.py
deleted file mode 100644
index 58f1cc0..0000000
--- a/model.py
+++ /dev/null
@@ -1,35 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-from transformers import BertModel, BertConfig
-
-class QualityEstimation(nn.Module):
-
-    def __init__(self, hidden_dim):
-        super(QualityEstimation, self).__init__()
-        self.hidden_dim = hidden_dim
-
-        # Instantiating BERT model object
-        config = BertConfig()
-        self.bert = BertModel(config).from_pretrained('bert-base-multilingual-cased')
-        self.dropout = nn.Dropout(0.25)
-
-        # LSTM and classification layers
-        self.lstm = nn.LSTM(input_size=768, hidden_size=self.hidden_dim,
-                            num_layers=1, batch_first=True,
-                            dropout=0, bidirectional=False)
-        self.fc1 = nn.Linear(self.hidden_dim, 1)
-        nn.init.kaiming_normal_(self.fc1.weight)
-        # self.fc2 = nn.Linear(self.hidden_dim, 1)
-        # nn.init.kaiming_normal_(self.fc2.weight)
-
-    def forward(self, token_ids, segment_ids=None, attention_mask=None):
-
-        encoded_layers, _ = self.bert(input_ids=token_ids, token_type_ids=segment_ids, attention_mask=attention_mask)
-        encoded_layers = self.dropout(encoded_layers)
-        output, _ = self.lstm(encoded_layers)
-        # output = torch.tanh(self.fc1(output[:,-1,:]))
-        qe_scores = self.fc1(output[:,-1,:])
-        # qe_scores = torch.tanh(qe_scores)
-
-        return qe_scores
-- 
GitLab