-
Notifications
You must be signed in to change notification settings - Fork 0
/
training_cnn.py
131 lines (98 loc) · 4.35 KB
/
training_cnn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 16 15:45:46 2019
@author: rjn
"""
#importing all neccessary packages
import numpy as np
from deeplearning.Layers import Conv2D,Dense,MaxPooling2D,Dropout
from deeplearning.Layers import Flatten, NeuralNetwork, Activation
import numpy as np
from deeplearning.activat import ReLU
from deeplearning.utils import train_test_split
from deeplearning.loss import SquareLoss
from deeplearning.optimizer import RMSprop
import pickle
class createCNNModel:
'''
to create cnn model
using built-in functions of keras such as
conv2D,
maxpooling2D,
flatten,
dropout
'''
def __init__(self,input_size, output_size, epochs, batch_size):
self.input_size = input_shape
self.output_size = output_size
self.epochs = epochs
self.batch_size = batch_size
self.createmodel()
def createmodel(self):
#creating a Neural network is to initialise the network using the NeuralNetwork Class from deeplearning.
clf = NeuralNetwork(optimizer=RMSprop() , loss= SquareLoss)
# The first two layers with 64 filters of window size 3x3
# filters : Denotes the number of Feature detectors
# kernel_size : Denotes the shape of the feature detector. (3,3) denotes a 3 x 3 matrix.
# input _shape : standardises the size of the input image
# activation : Activation function to break the linearity
clf.add(Conv2D(n_filters=32, filter_shape=(3,3), stride=1, input_shape=self.input_size,
biase=False, padding='valid'))
clf.add(Activation(ReLU))
clf.add(Conv2D(n_filters=32, filter_shape=(3,3), biase=False, stride=1, padding='same'))
clf.add(Activation(ReLU))
# pool_size : the shape of the pooling window.
clf.add(MaxPooling2D(pool_shape=(2, 2), stride=2))
clf.add(Dropout(0.2))
clf.add(Conv2D(n_filters=64, filter_shape=(3,3), biase=False, stride=1, padding='same' ))
clf.add(Activation(ReLU))
clf.add(Conv2D(n_filters=64, filter_shape=(3,3), biase=False, stride=1, padding='same' ))
clf.add(Activation(ReLU))
clf.add(MaxPooling2D(pool_shape=(2, 2), stride=2))
clf.add(Conv2D(n_filters=128, filter_shape=(3,3), stride=1, padding='same'))
clf.add(Activation(ReLU))
clf.add(Conv2D(n_filters=128, filter_shape=(3,3), stride=1, padding='same'))
clf.add(Activation(ReLU))
clf.add(MaxPooling2D(pool_shape=(2, 2), stride=2))
clf.add(Dropout(0.2))
clf.add(Conv2D(n_filters=256, filter_shape=(3,3), stride=1, padding='same'))
clf.add(Activation(ReLU))
clf.add(Conv2D(n_filters=256, filter_shape=(3,3), stride=1, padding='same'))
clf.add(Activation(ReLU))
clf.add(MaxPooling2D(pool_shape=(2, 2), stride=2))
clf.add(Flatten())
# units: Number of nodes in the layer.
# activation : the activation function in each node.
clf.add(Dropout(0.2))
clf.add(Dense(512))
clf.add(Activation(ReLU))
clf.add(Dropout(0.2))
clf.add(Dense(self.output_size))
self.clf = clf
return self.clf
def summary(self , name="ConvNet"):
return self.clf.summary(name="ConvNet")
def compile(self,X_train, X_test, y_train, y_test,save= True):
self.hist = self.clf.fit(X=X_train, y= y_train , n_epochs=self.epochs,
batch_size= self.epochs, val_set=(X_test,y_test))
def save_history(self):
# for visualizing losses and accuracy
# History.history attribute is a record of training loss values
# metrics values at successive epochs
# as well as validation loss values and validation metrics values
train_loss= self.hist[0]
val_loss= self.hist[1]
xc=range(self.epochs)
model_histroy = [
train_loss,
val_loss,
xc]
np.save('model_histroy.npy',model_histroy)
#%%
epochs = 5
batch_size = 32
model = createCNNModel(input_shape , output_size ,epochs, batch_size )
model.summary()
model.compile(train_data, vtrain_data , land_data, vland_data)
model.save_history()