-
Notifications
You must be signed in to change notification settings - Fork 0
/
Network.py
88 lines (55 loc) · 2.59 KB
/
Network.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
from audioop import bias
import numpy as np
class Network :
def __init__(self,learningRate, inputNodes, activation, activationDerivative,costFunction,costDerivative) -> None:
self.layers = []
self.bias = []
self.activations = []
self.activationDerivatives = []
self.learningRate = learningRate
self.inputNodes = inputNodes
self.activation = activation
self.activationDerivative = activationDerivative
self.costFunction = costFunction
self.costDerivative = costDerivative
def addLayer(self, nodes, acivationfunction, activationDerivative):
if len(self.layers) == 0:
self.layers.append(np.random.rand(nodes,self.inputNodes)*2-1)
else:
prev = self.layers[-1].shape[0]
self.layers.append(np.random.rand(nodes,prev)*2-1)
self.bias.append(np.random.rand(1,1))
self.activations.append(acivationfunction)
self.activationDerivatives.append(activationDerivative)
def forwardPropagate(self,input):
biasIndex = 0
for W, B, A in zip(self.layers,self.bias, self.activations):
weightedOutput = np.matmul(W,input) + B
input = A(weightedOutput)
biasIndex+=1
return input
def backPropagate(self, input, costGradient):
outputs = []
activation = input
activations = [input]
for W, B, A in zip(self.layers,self.bias, self.activations):
weightedOutput = np.dot(W,activation) + B
outputs.append(weightedOutput)
activation = A(weightedOutput)
activations.append(activation)
error = np.multiply(costGradient,self.activationDerivatives[-1](outputs[-1]))
nabla_w = []
for i in range(1,len(self.layers)+1):
w = self.layers[-i]
delta_w = np.dot(error,activations[-i-1].transpose())
nabla_w.append(delta_w)
self.layers[-i] = self.layers[-i] - (self.learningRate * delta_w)
self.bias[-i] = self.bias[-i] - (self.learningRate * error)
if i < len(self.layers):
error = np.multiply(np.dot(w.transpose(),error), self.activationDerivatives[-i-1](outputs[-i-1]))
def fit(self, trainingData, epochs):
for e in range(epochs):
for i in trainingData:
actual = self.forwardPropagate(i.input)
costGradient = self.costDerivative(actual,i.expected)
self.backPropagate(i.input,costGradient)