We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 1a6e737 commit e99daafCopy full SHA for e99daaf
1 file changed
onmt/utils/parse.py
@@ -121,8 +121,8 @@ def validate_train_opts(cls, opt):
121
"Number of attention_dropout values must match accum_steps values"
122
123
assert not(opt.max_generator_batches > 0 and opt.lambda_cosine != 0), \
124
- "-lambda_cosine loss is not implemented for max_generator_batches > 0."
125
-
+ "-lambda_cosine loss is not implemented " \
+ "for max_generator_batches > 0."
126
127
@classmethod
128
def validate_translate_opts(cls, opt):
0 commit comments