From 0f2e61431a958184534751beab39f9d7adadb041 Mon Sep 17 00:00:00 2001 From: DrownFish19 Date: Thu, 31 Oct 2024 07:37:50 +0000 Subject: [PATCH 1/3] update qwen tokenizer --- paddlenlp/transformers/qwen/tokenizer.py | 32 +++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/paddlenlp/transformers/qwen/tokenizer.py b/paddlenlp/transformers/qwen/tokenizer.py index ca682b40f17c..23443d8883f2 100644 --- a/paddlenlp/transformers/qwen/tokenizer.py +++ b/paddlenlp/transformers/qwen/tokenizer.py @@ -125,14 +125,40 @@ def convert_tokens_to_ids(self, tokens: Union[bytes, str, List[Union[bytes, str] ids.append(self.mergeable_ranks.get(token)) return ids + def _update_tiktoken(self, tokens: List[str], special_tokens: bool = False) -> int: + if special_tokens: + added_tokens = [] + for token in tokens: + if token in self.special_tokens: + continue + + token_id = len(self.mergeable_ranks) + len(self.special_tokens) + self.special_tokens[token] = token_id + self.decoder[token_id] = token + + added_tokens.append(token) + + import tiktoken + self.tokenizer = tiktoken.Encoding( + "Qwen", + pat_str=PAT_STR, + mergeable_ranks=self.mergeable_ranks, + special_tokens=self.special_tokens, + ) + + return len(added_tokens) + else: + raise ValueError("Adding regular tokens is not supported") + def _add_tokens(self, new_tokens: Union[List[str], List[AddedToken]], special_tokens: bool = False) -> int: if not special_tokens and new_tokens: raise ValueError("Adding regular tokens is not supported") + new_tokens_str = [] for token in new_tokens: surface_form = token.content if isinstance(token, AddedToken) else token - if surface_form not in SPECIAL_TOKENS: - raise ValueError("Adding unknown special tokens is not supported") - return 0 + new_tokens_str.append(surface_form) + + return self._update_tiktoken(new_tokens_str, special_tokens) def save_vocabulary(self, save_directory: str, **kwargs) -> Tuple[str]: """ From f99fe98104710be00456277e2f882c1134bd2d7a Mon Sep 17 00:00:00 2001 From: DrownFish19 Date: Thu, 31 Oct 2024 08:30:00 +0000 Subject: [PATCH 2/3] fix lint --- paddlenlp/transformers/qwen/tokenizer.py | 1 + 1 file changed, 1 insertion(+) diff --git a/paddlenlp/transformers/qwen/tokenizer.py b/paddlenlp/transformers/qwen/tokenizer.py index 23443d8883f2..02fe5926d5fc 100644 --- a/paddlenlp/transformers/qwen/tokenizer.py +++ b/paddlenlp/transformers/qwen/tokenizer.py @@ -139,6 +139,7 @@ def _update_tiktoken(self, tokens: List[str], special_tokens: bool = False) -> i added_tokens.append(token) import tiktoken + self.tokenizer = tiktoken.Encoding( "Qwen", pat_str=PAT_STR, From 0ea84ca888a78620f8033a0f3317daa951ef2fb8 Mon Sep 17 00:00:00 2001 From: DrownFish19 Date: Thu, 31 Oct 2024 12:29:21 +0000 Subject: [PATCH 3/3] add test case --- paddlenlp/utils/import_utils.py | 2 - tests/transformers/qwen/test_tokenizer.py | 46 +++++++++++++++++++++++ 2 files changed, 46 insertions(+), 2 deletions(-) create mode 100644 tests/transformers/qwen/test_tokenizer.py diff --git a/paddlenlp/utils/import_utils.py b/paddlenlp/utils/import_utils.py index 2c3796214a7f..33d4ee831002 100644 --- a/paddlenlp/utils/import_utils.py +++ b/paddlenlp/utils/import_utils.py @@ -52,7 +52,6 @@ def _is_package_available(pkg_name: str, return_version: bool = False) -> Union[ else: # For packages other than "torch", don't attempt the fallback and set as not available package_exists = False - logger.debug(f"Detected {pkg_name} version: {package_version}") if return_version: return package_exists, package_version else: @@ -96,7 +95,6 @@ def _is_package_available(pkg_name: str, return_version: bool = False) -> Union[ else: # For packages other than "torch", don't attempt the fallback and set as not available package_exists = False - logger.debug(f"Detected {pkg_name} version: {package_version}") if return_version: return package_exists, package_version else: diff --git a/tests/transformers/qwen/test_tokenizer.py b/tests/transformers/qwen/test_tokenizer.py new file mode 100644 index 000000000000..35412a2757db --- /dev/null +++ b/tests/transformers/qwen/test_tokenizer.py @@ -0,0 +1,46 @@ +# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. +# Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest + +from paddlenlp.transformers import QWenTokenizer + + +class Qwen2TokenizationTest(unittest.TestCase): + from_pretrained_id = "qwen/qwen-7b" + tokenizer_class = QWenTokenizer + test_slow_tokenizer = True + space_between_special_tokens = False + from_pretrained_kwargs = None + test_seq2seq = False + + def setUp(self): + super().setUp() + + def get_tokenizer(self, **kwargs): + return QWenTokenizer.from_pretrained(self.from_pretrained_id, **kwargs) + + def test_add_special_tokens(self): + tokenizer = self.get_tokenizer() + origin_tokens_len = len(tokenizer) + + add_tokens_num = tokenizer.add_special_tokens({"additional_special_tokens": [""]}) + assert add_tokens_num == 1 + assert len(tokenizer) == origin_tokens_len + 1 + + add_tokens_num = tokenizer.add_special_tokens({"unk_token": ""}) + assert add_tokens_num == 1 + assert len(tokenizer) == origin_tokens_len + 2