-
Notifications
You must be signed in to change notification settings - Fork 0
/
activation_functions_tf.py
100 lines (84 loc) · 3.02 KB
/
activation_functions_tf.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
"""
try:
%tensorflow_version 1.x
%matplotlib inline
except Exception:
pass
"""
print(tf.__version__)
# we want TF 1.x
assert tf.__version__ < "2.0"
import tensorflow as tf
import tensorflow.keras as K
## softplus
activation_function, deriv_activation_function = K.activations.softplus, K.activations.sigmoid
## Relu : ReLU(x) = max(0,x)
activation_function, deriv_activation_function = K.activations.relu, lambda x : tf.cast(x > 0, dtype = x.dtype)
## Gelu
import math
a = math.sqrt(2/math.pi)
b = 0.044715
part = lambda x : 1 + K.activations.tanh(a*(x + b*(x**3)))
dpart = lambda x : (a*(1 + 3*b*(x)**2))*(1 - K.activations.tanh(a*(x + b*(x**3)))**2)
g = lambda x : x * part(x) / 2
dg = lambda x : part(x) / 2 + x * dpart(x) / 2
activation_function, deriv_activation_function = g, dg
## Tanh
activation_function, deriv_activation_function = K.activations.tanh, lambda x : 1 - K.activations.tanh(x)**2
## Sigmoid
g = K.activations.sigmoid
activation_function, deriv_activation_function = g, lambda x : g(x)*(1-g(x))
# Parameterised ReLU
def prelu(a = 0.01):
@tf.function
def f(x) :
return tf.nn.relu(x) + a * (x - abs(x))*0.5
return f
def prelu_deriv(a = 0.01):
@tf.function
def f(x) :
"""
# generates graph errors
y = tf.zeros_like(x)
for index in zip(*tf.where(tf.greater(x, 0))): # x > 0
y[index] = 1.
for index in zip(*tf.where(tf.less_equal(x, 0))): # 0 >= x
y[index] = a
return y
"""
for_zero = tf.cast(tf.math.equal(x, 0), dtype = x.dtype)*a
y = tf.cast(x > 0, dtype = x.dtype) + a * (tf.cast(tf.math.not_equal(x, 0), dtype = x.dtype) - tf.math.sign(x)) * 0.5
return y + for_zero
return f
a = 0.01
activation_function, deriv_activation_function = prelu(a), prelu_deriv(a)
## Leaky ReLU : Parameterised ReLU with a = 0.01
activation_function, deriv_activation_function = prelu(a = 0.01), prelu_deriv(a = 0.01)
## Parameterised eLU
def elu(a = 1.):
@tf.function
def f(x):
#return K.activations.elu(x = x, alpha = torch.tensor(a))
# This approximation is preferable as it also allows the derivative to be approximated.
return tf.nn.relu(x) + a * (1 - tf.math.sign(x))*0.5*(tf.exp(x) - 1)
return f
def elu_deriv(a = 1.):
@tf.function
def f(x) :
"""
# generates graph errors
y = tf.zeros_like(x)
for index in zip(*tf.where(tf.greater(x, 0))): # x > 0
y[index] = 1.
for index in zip(*tf.where(tf.less_equal(x, 0))): # 0 >= x
y[index] = a * tf.exp(x[index])
return y
"""
for_zero = tf.cast(tf.math.equal(x, 0), dtype = x.dtype)*a
y = tf.cast(x > 0, dtype = x.dtype) + a * (tf.cast(tf.math.not_equal(x, 0), dtype = x.dtype) - tf.math.sign(x))*0.5*tf.exp(x)
return y + for_zero
return f
a = 1.
activation_function, deriv_activation_function = elu(a), elu_deriv(a)
## Elu : Parameterised eLU with a = 1
activation_function, deriv_activation_function = elu(a = 1.), elu_deriv(a = 1.)