Skip to content

Commit

Permalink
Removing some unuseful logs that might hinder clearer visualization.
Browse files Browse the repository at this point in the history
  • Loading branch information
gugarosa committed Apr 20, 2021
1 parent 0ef1369 commit 3d2e401
Show file tree
Hide file tree
Showing 6 changed files with 0 additions and 26 deletions.
6 changes: 0 additions & 6 deletions nalp/core/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,8 +97,6 @@ def generate_greedy_search(self, start, max_length=100):
"""

logger.debug('Greedy search generation with maximum length: %d', max_length)

# Encoding the start string into tokens, while expanding its first dimension
start_tokens = self.encoder.encode(start)
start_tokens = tf.expand_dims(start_tokens, 0)
Expand Down Expand Up @@ -145,8 +143,6 @@ def generate_temperature_sampling(self, start, max_length=100, temperature=1.0):
"""

logger.debug('Temperature sampling generation with maximum length: %d', max_length)

# Encoding the start string into tokens, while expanding its first dimension
start_tokens = self.encoder.encode(start)
start_tokens = tf.expand_dims(start_tokens, 0)
Expand Down Expand Up @@ -198,8 +194,6 @@ def generate_top_sampling(self, start, max_length=100, k=0, p=0.0):
"""

logger.debug('Top-based sampling generation with maximum length: %d', max_length)

# Encoding the start string into tokens, while expanding its first dimension
start_tokens = self.encoder.encode(start)
start_tokens = tf.expand_dims(start_tokens, 0)
Expand Down
4 changes: 0 additions & 4 deletions nalp/datasets/language_modeling.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,6 @@ def _create_sequences(self, encoded_tokens, rank, max_contiguous_pad_length):
"""

logger.debug('Creating sequences ...')

# Slices the tensors into sequences
sequences = data.Dataset.from_tensor_slices(encoded_tokens)

Expand All @@ -66,8 +64,6 @@ def _create_sequences(self, encoded_tokens, rank, max_contiguous_pad_length):
# Creates the sequences
sequences = sequences.batch(max_contiguous_pad_length + 1, drop_remainder=True)

logger.debug('Maximum contiguous pad length: %d.', max_contiguous_pad_length)

return sequences

def _create_input_target(self, sequence):
Expand Down
2 changes: 0 additions & 2 deletions nalp/encoders/integer.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,6 @@ def learn(self, dictionary, reverse_dictionary):
"""

logger.debug('Learning how to encode ...')

# Creates the encoder property
self.encoder = dictionary

Expand Down
2 changes: 0 additions & 2 deletions nalp/encoders/word2vec.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,6 @@ def learn(self, tokens, max_features=128, window_size=5, min_count=1,
"""

logger.debug('Learning how to encode ...')

# Creates a Word2Vec model
self.encoder = W2V(sentences=[tokens], size=max_features, window=window_size, min_count=min_count,
sg=algorithm, alpha=learning_rate, iter=iterations,
Expand Down
6 changes: 0 additions & 6 deletions nalp/models/generators/gumbel_lstm.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,8 +93,6 @@ def generate_greedy_search(self, start, max_length=100):
"""

logger.debug('Greedy search generation with maximum length: %d', max_length)

# Encoding the start string into tokens and expanding its first dimension
start_tokens = self.encoder.encode(start)
start_tokens = tf.expand_dims(start_tokens, 0)
Expand Down Expand Up @@ -141,8 +139,6 @@ def generate_temperature_sampling(self, start, max_length=100, temperature=1.0):
"""

logger.debug('Temperature sampling generation with maximum length: %d', max_length)

# Applying Gumbel-Softmax temperature as argument
self.tau = temperature

Expand Down Expand Up @@ -197,8 +193,6 @@ def generate_top_sampling(self, start, max_length=100, k=0, p=0.0):
"""

logger.debug('Top-based sampling generation with maximum length: %d', max_length)

# Encoding the start string into tokens and expanding its first dimension
start_tokens = self.encoder.encode(start)
start_tokens = tf.expand_dims(start_tokens, 0)
Expand Down
6 changes: 0 additions & 6 deletions nalp/models/generators/gumbel_rmc.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,8 +99,6 @@ def generate_greedy_search(self, start, max_length=100):
"""

logger.debug('Greedy search generation with maximum length: %d', max_length)

# Encoding the start string into tokens and expanding its first dimension
start_tokens = self.encoder.encode(start)
start_tokens = tf.expand_dims(start_tokens, 0)
Expand Down Expand Up @@ -147,8 +145,6 @@ def generate_temperature_sampling(self, start, max_length=100, temperature=1.0):
"""

logger.debug('Temperature sampling generation with maximum length: %d', max_length)

# Applying Gumbel-Softmax temperature as argument
self.tau = temperature

Expand Down Expand Up @@ -203,8 +199,6 @@ def generate_top_sampling(self, start, max_length=100, k=0, p=0.0):
"""

logger.debug('Top-based sampling generation with maximum length: %d', max_length)

# Encoding the start string into tokens and expanding its first dimension
start_tokens = self.encoder.encode(start)
start_tokens = tf.expand_dims(start_tokens, 0)
Expand Down

0 comments on commit 3d2e401

Please sign in to comment.