diff --git a/crossfit/op/tokenize.py b/crossfit/op/tokenize.py index 3f549e3..dee2ac4 100644 --- a/crossfit/op/tokenize.py +++ b/crossfit/op/tokenize.py @@ -159,7 +159,7 @@ def from_pretrained(cls, name, cache_dir=None): # Save vocabulary to disk # `save_vocabulary()` automatically appends `-vocab.txt` suffix. - vocab_path = tokenizer.save_vocabulary(cache_dir, "{tokenizer_class}")[0] + vocab_path = tokenizer.save_vocabulary(cache_dir, f"{tokenizer_class}")[0] # Hash the vocabulary and save it hash_vocab(vocab_path, hashed_vocab_path)