Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Log training metrics to visualize them via tensorboard #5422

Merged
merged 23 commits into from
Mar 18, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 12 additions & 0 deletions changelog/5422.feature.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
Add options ``tensorboard_log_directory`` and ``tensorboard_log_level`` to ``EmbeddingIntentClassifier``,
``DIETClasifier``, ``ResponseSelector``, ``EmbeddingPolicy`` and ``TEDPolicy``.

By default ``tensorboard_log_directory`` is ``None``. If a valid directory is provided,
tabergma marked this conversation as resolved.
Show resolved Hide resolved
metrics are written during training. After the model is trained you can take a look
at the training metrics in tensorboard. Execute ``tensorboard --logdir <path-to-given-directory>``.

Metrics can either be written after every epoch (default) or for every training step.
You can specify when to write metrics using the variable ``tensorboard_log_level``.
Valid values are 'epoch' and 'minibatch'.

We also write down a model summary, i.e. layers with inputs and types, to the given directory.
9 changes: 9 additions & 0 deletions docs/core/policies.rst
Original file line number Diff line number Diff line change
Expand Up @@ -425,6 +425,15 @@ It is recommended to use ``state_featurizer=LabelTokenizerSingleStateFeaturizer(
# How many examples to use for hold out validation set
# Large values may hurt performance, e.g. model accuracy.
"evaluate_on_number_of_examples": 0
# If you want to use tensorboard to visualize training metrics,
# set this option to a valid output directory.
# You can view the training metrics after training in tensorboard via
# ``tensorboard --logdir <path-to-given-directory>``
"tensorboard_log_directory": None
# Define when training metrics for tensorboard should be logged.
# Either after every epoch or for every training step.
# Valid values: 'epoch' and 'minibatch'
"tensorboard_log_level": "epoch"

.. note::

Expand Down
29 changes: 28 additions & 1 deletion docs/nlu/components.rst
Original file line number Diff line number Diff line change
Expand Up @@ -919,6 +919,15 @@ EmbeddingIntentClassifier
# How many examples to use for hold out validation set
# Large values may hurt performance, e.g. model accuracy.
"evaluate_on_number_of_examples": 0
# If you want to use tensorboard to visualize training metrics,
# set this option to a valid output directory.
# You can view the training metrics after training in tensorboard via
# ``tensorboard --logdir <path-to-given-directory>``
"tensorboard_log_directory": None
# Define when training metrics for tensorboard should be logged.
# Either after every epoch or for every training step.
# Valid values: 'epoch' and 'minibatch'
"tensorboard_log_level": "epoch"

.. _keyword_intent_classifier:

Expand Down Expand Up @@ -1099,7 +1108,16 @@ ResponseSelector
# should predict those tokens.
"use_masked_language_model": False
# Name of the intent for which this response selector is to be trained
"retrieval_intent: None
"retrieval_intent": None
# If you want to use tensorboard to visualize training metrics,
# set this option to a valid output directory.
# You can view the training metrics after training in tensorboard via
# ``tensorboard --logdir <path-to-given-directory>``
"tensorboard_log_directory": None
# Define when training metrics for tensorboard should be logged.
# Either after every epoch or for every training step.
# Valid values: 'epoch' and 'minibatch'
"tensorboard_log_level": "epoch"


Entity Extractors
Expand Down Expand Up @@ -1659,3 +1677,12 @@ DIETClassifier
# examples per entity are required.
# Rule of thumb: you should have more than 100 examples per entity.
"BILOU_flag": True
# If you want to use tensorboard to visualize training metrics,
# set this option to a valid output directory.
# You can view the training metrics after training in tensorboard via
# ``tensorboard --logdir <path-to-given-directory>``
"tensorboard_log_directory": None
# Define when training metrics for tensorboard should be logged.
# Either after every epoch or for every training step.
# Valid values: 'epoch' and 'minibatch'
"tensorboard_log_level": "epoch"
9 changes: 9 additions & 0 deletions rasa/core/policies/embedding_policy.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,8 @@
SOFTMAX,
AUTO,
BALANCED,
TENSORBOARD_LOG_DIR,
TENSORBOARD_LOG_LEVEL,
)
from rasa.utils.tensorflow.models import RasaModel
import rasa.utils.common as common_utils
Expand Down Expand Up @@ -140,6 +142,13 @@ class EmbeddingPolicy(TEDPolicy):
# How many examples to use for hold out validation set
# Large values may hurt performance, e.g. model accuracy.
EVAL_NUM_EXAMPLES: 0,
# If you want to use tensorboard to visualize training and validation metrics,
# set this option to a valid output directory.
TENSORBOARD_LOG_DIR: None,
# Define when training metrics for tensorboard should be logged.
# Either after every epoch or for every training step.
# Valid values: 'epoch' and 'minibatch'
TENSORBOARD_LOG_LEVEL: "epoch",
}

def __init__(
Expand Down
16 changes: 15 additions & 1 deletion rasa/core/policies/ted_policy.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,8 @@
SOFTMAX,
AUTO,
BALANCED,
TENSORBOARD_LOG_DIR,
TENSORBOARD_LOG_LEVEL,
)


Expand Down Expand Up @@ -169,6 +171,13 @@ class TEDPolicy(Policy):
# How many examples to use for hold out validation set
# Large values may hurt performance, e.g. model accuracy.
EVAL_NUM_EXAMPLES: 0,
# If you want to use tensorboard to visualize training and validation metrics,
# set this option to a valid output directory.
TENSORBOARD_LOG_DIR: None,
# Define when training metrics for tensorboard should be logged.
# Either after every epoch or for every training step.
# Valid values: 'epoch' and 'minibatch'
TENSORBOARD_LOG_LEVEL: "epoch",
}

@staticmethod
Expand Down Expand Up @@ -447,7 +456,12 @@ def __init__(
max_history_tracker_featurizer_used: bool,
label_data: RasaModelData,
) -> None:
super().__init__(name="TED", random_seed=config[RANDOM_SEED])
super().__init__(
name="TED",
random_seed=config[RANDOM_SEED],
tensorboard_log_dir=config[TENSORBOARD_LOG_DIR],
tensorboard_log_level=config[TENSORBOARD_LOG_LEVEL],
)

self.config = config
self.max_history_tracker_featurizer_used = max_history_tracker_featurizer_used
Expand Down
16 changes: 15 additions & 1 deletion rasa/nlu/classifiers/diet_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@
SPARSE_INPUT_DROPOUT,
MASKED_LM,
ENTITY_RECOGNITION,
TENSORBOARD_LOG_DIR,
INTENT_CLASSIFICATION,
EVAL_NUM_EXAMPLES,
EVAL_NUM_EPOCHS,
Expand All @@ -77,6 +78,7 @@
SOFTMAX,
AUTO,
BALANCED,
TENSORBOARD_LOG_LEVEL,
)


Expand Down Expand Up @@ -207,6 +209,13 @@ def required_components(cls) -> List[Type[Component]]:
# examples per entity are required.
# Rule of thumb: you should have more than 100 examples per entity.
BILOU_FLAG: True,
# If you want to use tensorboard to visualize training and validation metrics,
# set this option to a valid output directory.
TENSORBOARD_LOG_DIR: None,
# Define when training metrics for tensorboard should be logged.
# Either after every epoch or for every training step.
# Valid values: 'epoch' and 'minibatch'
TENSORBOARD_LOG_LEVEL: "epoch",
}

# init helpers
Expand Down Expand Up @@ -937,7 +946,12 @@ def __init__(
index_tag_id_mapping: Optional[Dict[int, Text]],
config: Dict[Text, Any],
) -> None:
super().__init__(name="DIET", random_seed=config[RANDOM_SEED])
super().__init__(
name="DIET",
random_seed=config[RANDOM_SEED],
tensorboard_log_dir=config[TENSORBOARD_LOG_DIR],
tensorboard_log_level=config[TENSORBOARD_LOG_LEVEL],
)

self.config = config

Expand Down
9 changes: 9 additions & 0 deletions rasa/nlu/classifiers/embedding_intent_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@
SOFTMAX,
AUTO,
BALANCED,
TENSORBOARD_LOG_DIR,
TENSORBOARD_LOG_LEVEL,
)
import rasa.utils.common as common_utils
from rasa.utils.tensorflow.models import RasaModel
Expand Down Expand Up @@ -132,6 +134,13 @@ def required_components(cls) -> List[Type[Component]]:
# How many examples to use for hold out validation set
# Large values may hurt performance, e.g. model accuracy.
EVAL_NUM_EXAMPLES: 0,
# If you want to use tensorboard to visualize training and validation metrics,
# set this option to a valid output directory.
TENSORBOARD_LOG_DIR: None,
# Define when training metrics for tensorboard should be logged.
# Either after every epoch or for every training step.
# Valid values: 'epoch' and 'minibatch'
TENSORBOARD_LOG_LEVEL: "epoch",
}

def __init__(
Expand Down
9 changes: 9 additions & 0 deletions rasa/nlu/selectors/response_selector.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@
SOFTMAX,
AUTO,
BALANCED,
TENSORBOARD_LOG_DIR,
TENSORBOARD_LOG_LEVEL,
)
from rasa.nlu.constants import (
RESPONSE,
Expand Down Expand Up @@ -186,6 +188,13 @@ def required_components(cls) -> List[Type[Component]]:
MASKED_LM: False,
# Name of the intent for which this response selector is to be trained
RETRIEVAL_INTENT: None,
# If you want to use tensorboard to visualize training and validation metrics,
# set this option to a valid output directory.
TENSORBOARD_LOG_DIR: None,
# Define when training metrics for tensorboard should be logged.
# Either after every epoch or for every training step.
# Valid values: 'epoch' and 'minibatch'
TENSORBOARD_LOG_LEVEL: "epoch",
}

def __init__(
Expand Down
3 changes: 3 additions & 0 deletions rasa/utils/tensorflow/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,3 +65,6 @@
POOLING = "pooling"
MAX_POOLING = "max"
MEAN_POOLING = "mean"

TENSORBOARD_LOG_DIR = "tensorboard_log_directory"
TENSORBOARD_LOG_LEVEL = "tensorboard_log_level"
Loading