Skip to content

Commit

Permalink
update configs for tts models to match the field typed with the expected
Browse files Browse the repository at this point in the history
values
  • Loading branch information
erogol committed May 11, 2021
1 parent 8058aaa commit 0213e1c
Show file tree
Hide file tree
Showing 4 changed files with 5 additions and 5 deletions.
4 changes: 2 additions & 2 deletions TTS/config/shared_configs.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from dataclasses import asdict, dataclass

from typing import List, Union
from coqpit import MISSING, Coqpit, check_argument


Expand Down Expand Up @@ -137,7 +137,7 @@ def check_values(
class BaseDatasetConfig(Coqpit):
name: str = None
path: str = None
meta_file_train: str = None
meta_file_train: Union[str, List] = None # TODO: don't take ignored speakers for multi-speaker datasets over this. This is Union for SC-Glow compat.
meta_file_val: str = None
meta_file_attn_mask: str = None

Expand Down
2 changes: 1 addition & 1 deletion TTS/tts/configs/tacotron_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ class TacotronConfig(BaseTTSConfig):
gst_style_input: str = None
# model specific params
r: int = 2
gradual_training: List = None
gradual_training: List[List] = None
memory_size: int = -1
prenet_type: str = "original"
prenet_dropout: bool = True
Expand Down
2 changes: 1 addition & 1 deletion tests/inputs/test_tacotron2_config.json
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@
"batch_size": 1, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
"eval_batch_size":1,
"r": 7, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled.
"gradual_training": [[0, 7, 4]], //set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled. For Tacotron, you might need to reduce the 'batch_size' as you proceeed.
"gradual_training": [[0, 7, 4], [1, 5, 2]], //set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled. For Tacotron, you might need to reduce the 'batch_size' as you proceeed.
"loss_masking": true, // enable / disable loss masking against the sequence padding.
"ga_alpha": 10.0, // weight for guided attention loss. If > 0, guided attention is enabled.
"mixed_precision": false,
Expand Down
2 changes: 1 addition & 1 deletion tests/inputs/test_tacotron_config.json
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@
"batch_size": 1, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
"eval_batch_size":1,
"r": 7, // Number of decoder frames to predict per iteration. Set the initial values if gradual training is enabled.
"gradual_training": [[0, 7, 4]], //set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled. For Tacotron, you might need to reduce the 'batch_size' as you proceeed.
"gradual_training": [[0, 7, 4], [1, 5, 2]], //set gradual training steps [first_step, r, batch_size]. If it is null, gradual training is disabled. For Tacotron, you might need to reduce the 'batch_size' as you proceeed.
"loss_masking": true, // enable / disable loss masking against the sequence padding.
"ga_alpha": 10.0, // weight for guided attention loss. If > 0, guided attention is enabled.
"mixed_precision": false,
Expand Down

0 comments on commit 0213e1c

Please sign in to comment.