-
Notifications
You must be signed in to change notification settings - Fork 0
/
ex_ttt_sentence_space.py
128 lines (103 loc) · 4.1 KB
/
ex_ttt_sentence_space.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
import numpy as np
from math import ceil
from strlearn.metrics import balanced_accuracy_score as bac
from tqdm import tqdm
from strlearn.metrics import balanced_accuracy_score as bac, recall, precision, specificity, f1_score, geometric_mean_score_1, geometric_mean_score_2
from sklearn.metrics import recall_score, precision_score, balanced_accuracy_score
from sentence_transformers import SentenceTransformer
import matplotlib.pyplot as plt
from cv2 import resize
from torchvision.models import resnet18, ResNet18_Weights
from torch.utils.data import DataLoader, TensorDataset
import torch
import torch.nn as nn
import torch.optim as optim
X = np.load("fakeddit_stream/fakeddit_posts.npy", allow_pickle=True)
bias = np.load("fakeddit_stream/fakeddit_posts_y.npy")
# How many classes?
bias_id = 0
print(X.shape)
print(bias.shape)
# Only titles, without timestamp
# Binary problem
stream = X[:, 0]
y = np.array([1,0])[bias[:,bias_id]] if bias_id == 0 else bias[:,bias_id]
chunk_size = 250
# All chunks
n_chunks = ceil(stream.shape[0]/chunk_size)
# Select dummies
classes = np.unique(y)
n_classes = len(classes)
dummies = stream[[np.where(y==label)[0][0] for label in classes]]
metrics=(recall, recall_score, precision, precision_score, specificity, f1_score, geometric_mean_score_1, geometric_mean_score_2, bac, balanced_accuracy_score)
"""
Model
"""
num_classes = 2
batch_size = 8
num_epochs = 1
# To transfer or not to transfer?
weights = ResNet18_Weights.IMAGENET1K_V1
# weights = None
model = resnet18(weights=weights)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, num_classes)
device = torch.device("mps")
model = model.to(device)
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# imb_weight = torch.from_numpy(np.array(imb_weights)).float().to(device)
criterion = nn.CrossEntropyLoss()
# METHODS x CHUNKS x METRICS
transformer = SentenceTransformer('all-MiniLM-L6-v2', device=device).to(device)
results = []
for chunk_id in tqdm(range(n_chunks)):
chunk_X = stream[chunk_id*chunk_size:chunk_id*chunk_size+chunk_size]
chunk_y = y[chunk_id*chunk_size:chunk_id*chunk_size+chunk_size]
if len(np.unique(chunk_y)) != n_classes:
chunk_X[:n_classes] = dummies
chunk_y[:n_classes] = classes
chunk_images = []
for text_id, text in enumerate(tqdm(chunk_X, disable=True)):
words = text.split(" ")
img = resize(transformer.encode(words), (384, 200))
rgb = np.stack((img, img, img), axis=0)
chunk_images.append(rgb)
# print(text)
# plt.imshow(rgb[:, :, 0])
# plt.title(text)
# plt.tight_layout()
# plt.savefig("bar.png")
# exit()
chunk_images = np.array(chunk_images)
chunk_X = torch.from_numpy(chunk_images).float()
chunk_y = torch.from_numpy(chunk_y).long()
stml_dataset = TensorDataset(chunk_X, chunk_y)
data_loader = DataLoader(stml_dataset, batch_size=batch_size, shuffle=True)
if chunk_id==0:
model.train()
for epoch in range(num_epochs):
for i, batch in enumerate(data_loader, 0):
inputs, labels = batch
optimizer.zero_grad()
outputs = model(inputs.to(device))
loss = criterion(outputs.to(device), labels.to(device))
loss.backward()
optimizer.step()
else:
model.eval()
logits = model(chunk_X.to(device))
probs = torch.nn.functional.softmax(logits, dim=1).cpu().detach().numpy()
preds = np.argmax(probs, 1)
scores = [metric(chunk_y.numpy(), preds) for metric in metrics]
results.append(scores)
model.train()
for epoch in range(num_epochs):
for i, batch in enumerate(data_loader, 0):
inputs, labels = batch
optimizer.zero_grad()
outputs = model(inputs.to(device))
loss = criterion(outputs.to(device), labels.to(device))
loss.backward()
optimizer.step()
results = np.array(results)
np.save("results/scores_sentence_space_2c_transfer", results)