forked from comp-cogneuro-lang/EARShot_TF2
-
Notifications
You must be signed in to change notification settings - Fork 0
/
temp.py
47 lines (37 loc) · 1.21 KB
/
temp.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import tensorflow as tf
from Feeder import Feeder
import Modules
feeder = Feeder(0, True)
acoustics = tf.keras.layers.Input(
shape= [None, 256],
dtype= tf.float32
)
net = Modules.Network()
lo = Modules.Loss()
logits, _, _ = net([acoustics, net.get_initial_state()])
model = tf.keras.Model(inputs= acoustics, outputs= logits)
patterns = feeder.Get_Pattern()[2]
optimizer = tf.keras.optimizers.Adam(
learning_rate= 0.001,
beta_1= 0.9,
beta_2= 0.999,
epsilon= 1e-7
)
while True:
with tf.GradientTape() as tape:
logit = model(patterns['acoustics'])
label = tf.expand_dims(patterns['semantics'], axis = 1)
label = tf.tile(label, [1, tf.shape(logit)[1], 1])
loss = tf.nn.sigmoid_cross_entropy_with_logits(label, logit)
loss = tf.reduce_mean(loss)
print(loss)
gradients = tape.gradient(
loss,
model.trainable_variables
)
for gradient, variable in zip(gradients, model.trainable_variables):
print(variable.name, '\t', tf.reduce_mean(tf.abs(gradient)))
optimizer.apply_gradients([
(gradient, variable)
for gradient, variable in zip(gradients, model.trainable_variables)
])