diff --git a/app.py b/app.py
index 1f2bdf2e3..8b9cfb8a3 100644
--- a/app.py
+++ b/app.py
@@ -211,6 +211,7 @@ def get_min_max_step(data):
'special_characters_filter': [StatsKeys.special_char_ratio],
'stopwords_filter': [StatsKeys.stopwords_ratio],
'text_length_filter': [StatsKeys.text_len],
+ 'token_num_filter': [StatsKeys.num_token],
'words_num_filter': [StatsKeys.num_words],
'word_repetition_filter': [StatsKeys.word_rep_ratio],
}
diff --git a/configs/config_all.yaml b/configs/config_all.yaml
index 635155e8d..1e86c6fb7 100644
--- a/configs/config_all.yaml
+++ b/configs/config_all.yaml
@@ -13,8 +13,8 @@ np: 4 # number of subproce
text_keys: 'content' # the key name of field where the sample texts to be processed, e.g., `text`, `instruction`, `output`, ...
# Note: currently, we support specify only ONE key for each op, for cases requiring multiple keys, users can specify the op multiple times. We will only use the first key of `text_keys` when you set multiple keys.
suffixes: [] # the suffix of files that will be read. For example: '.txt', 'txt' or ['txt', '.pdf', 'docx']
-use_cache: true # whether to use the cache management of hugging face datasets. It might take up lots of disk space when using cache
-ds_cache_dir: '~/.cache/huggingface/datasets' # cache dir for hugging face datasets. In default it's the default cache dir "~/.cache/huggingface/datasets". If this argument is reset by users, it will override the default cache dir
+use_cache: true # whether to use the cache management of Hugging Face datasets. It might take up lots of disk space when using cache
+ds_cache_dir: '~/.cache/huggingface/datasets' # cache dir for Hugging Face datasets. In default it's the default cache dir "~/.cache/huggingface/datasets". If this argument is reset by users, it will override the default cache dir
use_checkpoint: false # whether to use the checkpoint management to save the latest version of dataset to work dir when processing. Rerun the same config will reload the checkpoint and skip ops before it. Cache will be disabled when using checkpoint. If args of ops before the checkpoint are changed, all ops will be rerun from the beginning.
temp_dir: null # the path to the temp directory to store intermediate caches when cache is disabled, these cache files will be removed on-the-fly. In default, it's None, so the temp dir will be specified by system. NOTICE: you should be caution when setting this argument because it might cause unexpected program behaviors when this path is set to an unsafe directory.
open_tracer: false # whether to open the tracer to trace the changes during process. It might take more time when opening tracer
@@ -127,6 +127,10 @@ process:
- text_length_filter: # filter text with length out of specific range
min_len: 10 # the min length of filter range
max_len: 10000 # the max length of filter range
+ - token_num_filter: # filter text with total token number out of specific range
+ hf_tokenizer: EleutherAI/pythia-6.9b-deduped # name of used Hugging Face tokenizer
+ min_num: 10 # the min number of filter range
+ max_num: 10000 # the max number of filter range
- words_num_filter: # filter text with number of words out of specific range
lang: en # sample in which language
tokenization: false # whether to use model to tokenize documents
diff --git a/data_juicer/ops/filter/__init__.py b/data_juicer/ops/filter/__init__.py
index 24228326e..b6e48ac5a 100644
--- a/data_juicer/ops/filter/__init__.py
+++ b/data_juicer/ops/filter/__init__.py
@@ -4,4 +4,4 @@
perplexity_filter, special_characters_filter,
specified_field_filter, specified_numeric_field_filter,
stopwords_filter, suffix_filter, text_length_filter,
- word_num_filter, word_repetition_filter)
+ token_num_filter, word_num_filter, word_repetition_filter)
diff --git a/data_juicer/ops/filter/token_num_filter.py b/data_juicer/ops/filter/token_num_filter.py
new file mode 100644
index 000000000..21066382a
--- /dev/null
+++ b/data_juicer/ops/filter/token_num_filter.py
@@ -0,0 +1,61 @@
+import sys
+
+from jsonargparse.typing import PositiveInt
+
+from data_juicer.utils.constant import Fields, StatsKeys
+from data_juicer.utils.model_utils import prepare_model, get_model
+
+from ..base_op import OPERATORS, Filter
+from ..common import get_words_from_document
+
+
+@OPERATORS.register_module('token_num_filter')
+class TokenNumFilter(Filter):
+ """Filter to keep samples with total token number within a specific
+ range."""
+
+ def __init__(self,
+ hf_tokenizer: str = 'EleutherAI/pythia-6.9b-deduped',
+ min_num: PositiveInt = 10,
+ max_num: PositiveInt = sys.maxsize,
+ *args,
+ **kwargs):
+ """
+ Initialization method.
+
+ :param hf_tokenizer: the tokenizer name of Hugging Face tokenizers.
+ :param min_num: The min filter token number in this op, samples
+ will be filtered if their token number is below this
+ parameter.
+ :param max_num: The max filter token number in this op, samples
+ will be filtered if their token number exceeds this
+ parameter.
+ :param args: extra args
+ :param kwargs: extra args
+ """
+ super().__init__(*args, **kwargs)
+ self.min_num = min_num
+ self.max_num = max_num
+ self.hf_tokenizer = hf_tokenizer
+ self.model_key = prepare_model(model_type='huggingface',
+ model_key=hf_tokenizer)
+
+ def compute_stats(self, sample):
+ # check if it's computed already
+ if StatsKeys.num_token in sample[Fields.stats]:
+ return sample
+
+ tokenizer = get_model(self.model_key, model_type='huggingface')
+ tokens = get_words_from_document(
+ sample[self.text_key],
+ token_func=tokenizer.tokenize if tokenizer else None
+ )
+ sample[Fields.stats][StatsKeys.num_token] = len(tokens)
+ return sample
+
+ def process(self, sample):
+ if self.min_num <= sample[Fields.stats][
+ StatsKeys.num_token] <= self.max_num:
+ return True
+ else:
+ return False
diff --git a/data_juicer/utils/constant.py b/data_juicer/utils/constant.py
index 37876e2c1..31bc9f0f8 100644
--- a/data_juicer/utils/constant.py
+++ b/data_juicer/utils/constant.py
@@ -21,6 +21,7 @@ class StatsKeys(object):
special_char_ratio = 'special_char_ratio'
stopwords_ratio = 'stopwords_ratio'
text_len = 'text_len'
+ num_token = 'num_token'
num_words = 'num_words'
word_rep_ratio = 'word_rep_ratio'
diff --git a/data_juicer/utils/model_utils.py b/data_juicer/utils/model_utils.py
index 761d6a813..8d416d9f2 100644
--- a/data_juicer/utils/model_utils.py
+++ b/data_juicer/utils/model_utils.py
@@ -61,12 +61,12 @@ def check_model(model_name, args=(), force=False):
try:
model_link = os.path.join(MODEL_LINKS, true_model_name)
- wget.download(model_link, mdp)
+ wget.download(model_link, mdp, bar=None)
except: # noqa: E722
try:
backup_model_link = os.path.join(
BACKUP_MODEL_LINKS[model_name], true_model_name)
- wget.download(backup_model_link, mdp)
+ wget.download(backup_model_link, mdp, bar=None)
except: # noqa: E722
logger.error(
f'Downloading model [{true_model_name}] error. '
@@ -165,7 +165,8 @@ def prepare_huggingface_tokenizer(tokenizer_name):
"""
from transformers import AutoTokenizer
logger.info('Loading tokenizer from HuggingFace...')
- tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
+ tokenizer = AutoTokenizer.from_pretrained(tokenizer_name,
+ trust_remote_code=True)
return tokenizer
def prepare_diversity_model(model_name, lang):
@@ -178,7 +179,6 @@ def prepare_diversity_model(model_name, lang):
:return: corresponding diversity model
"""
import spacy
- print(lang)
assert lang in ['zh', 'en'], 'Diversity only support zh and en'
model_name = model_name % lang
logger.info(f'Loading spacy model [{model_name}]...')
diff --git a/demos/data_visualization_op_effect/app.py b/demos/data_visualization_op_effect/app.py
index 2d15ee9f8..0e6f60fc8 100644
--- a/demos/data_visualization_op_effect/app.py
+++ b/demos/data_visualization_op_effect/app.py
@@ -147,6 +147,7 @@ def get_min_max_step(data):
'special_characters_filter': [StatsKeys.special_char_ratio],
'stopwords_filter': [StatsKeys.stopwords_ratio],
'text_length_filter': [StatsKeys.text_len],
+ 'token_num_filter': [StatsKeys.num_token],
'words_num_filter': [StatsKeys.num_words],
'word_repetition_filter': [StatsKeys.word_rep_ratio],
}
diff --git a/demos/tool_quality_classifier/quality_classifier/qc_utils.py b/demos/tool_quality_classifier/quality_classifier/qc_utils.py
index e989a9ccc..862e6f1bd 100644
--- a/demos/tool_quality_classifier/quality_classifier/qc_utils.py
+++ b/demos/tool_quality_classifier/quality_classifier/qc_utils.py
@@ -54,7 +54,8 @@ def prepare_model(model_name, model_path=DATA_JUICER_MODELS_CACHE):
# No specific models in local file systems. Download them from remote.
os.makedirs(model_path, exist_ok=True)
wget.download(os.path.join(MODEL_LINKS, f'{model_name}.zip'),
- os.path.join(model_path, f'{model_name}.zip'))
+ os.path.join(model_path, f'{model_name}.zip'),
+ bar=None)
with zipfile.ZipFile(os.path.join(model_path, f'{model_name}.zip')) as zip:
zip.extractall(os.path.join(model_path))
return PipelineModel.load(real_model_path)
diff --git a/docs/Operators.md b/docs/Operators.md
index 9501fe4d7..8521a4722 100644
--- a/docs/Operators.md
+++ b/docs/Operators.md
@@ -11,7 +11,7 @@ The operators in Data-Juicer are categorized into 5 types.
|-----------------------------------|:------:|-------------------------------------------------|
| [ Formatter ]( #formatter ) | 7 | Discovers, loads, and canonicalizes source data |
| [ Mapper ]( #mapper ) | 19 | Edits and transforms samples |
-| [ Filter ]( #filter ) | 15 | Filters out low-quality samples |
+| [ Filter ]( #filter ) | 16 | Filters out low-quality samples |
| [ Deduplicator ]( #deduplicator ) | 3 | Detects and removes duplicate samples |
| [ Selector ]( #selector ) | 2 | Selects top samples based on ranking |
@@ -83,6 +83,7 @@ All the specific operators are listed below, each featured with several capabili
| stopwords_filter | General | en, zh | Keeps samples with stopword ratio above the specified threshold |
| suffix_filter | General | en, zh | Keeps samples with specified suffixes |
| text_length_filter | General | en, zh | Keeps samples with total text length within the specified range |
+| token_num_filter | General | en, zh | Keeps samples with token count within the specified range |
| word_num_filter | General | en, zh | Keeps samples with word count within the specified range |
| word_repetition_filter | General | en, zh | Keeps samples with word-level n-gram repetition ratio within the specified range |
diff --git a/docs/Operators_ZH.md b/docs/Operators_ZH.md
index bb52d71a9..a2abfeadc 100644
--- a/docs/Operators_ZH.md
+++ b/docs/Operators_ZH.md
@@ -6,13 +6,13 @@
Data-Juicer 中的算子分为以下 5 种类型。
-| 类型 | 数量 | 描述 |
-|------------------------------------|:---:|---------------|
-| [ Formatter ]( #formatter ) | 7 | 发现、加载、规范化原始数据 |
-| [ Mapper ]( #mapper ) | 19 | 对数据样本进行编辑和转换 |
-| [ Filter ]( #filter ) | 15 | 过滤低质量样本 |
-| [ Deduplicator ]( #deduplicator ) | 3 | 识别、删除重复样本 |
-| [ Selector ]( #selector ) | 2 | 基于排序选取高质量样本 |
+| 类型 | 数量 | 描述 |
+|------------------------------------|:--:|---------------|
+| [ Formatter ]( #formatter ) | 7 | 发现、加载、规范化原始数据 |
+| [ Mapper ]( #mapper ) | 19 | 对数据样本进行编辑和转换 |
+| [ Filter ]( #filter ) | 16 | 过滤低质量样本 |
+| [ Deduplicator ]( #deduplicator ) | 3 | 识别、删除重复样本 |
+| [ Selector ]( #selector ) | 2 | 基于排序选取高质量样本 |
下面列出所有具体算子,每种算子都通过多个标签来注明其主要功能。
@@ -64,23 +64,24 @@ Data-Juicer 中的算子分为以下 5 种类型。
## Filter
-| 算子 | 场景 | 语言 | 描述 |
-|---------------------------------|----------|---------|----------------------------------------------------------|
-| alphanumeric_filter | General | en, zh | 保留字母数字比例在指定范围内的样本 |
-| average_line_length_filter | Code | en, zh | 保留平均行长度在指定范围内的样本 |
-| character_repetition_filter | General | en, zh | 保留 char-level n-gram 重复比率在指定范围内的样本 |
-| flagged_words_filter | General | en, zh | 保留使标记字比率保持在指定阈值以下的样本 |
-| language_id_score_filter | General | en, zh | 保留特定语言的样本,通过预测的置信度得分来判断 |
-| maximum_line_length_filter | Code | en, zh | 保留最大行长度在指定范围内的样本 |
-| perplexity_filter | General | en, zh | 保留困惑度低于指定阈值的样本 |
-| special_characters_filter | General | en, zh | 保留 special-char 比率的在指定范围内的样本 |
-| specified_field_filter | General | en, zh | 根据字段过滤样本,要求字段的值处于指定目标中 |
-| specified_numeric_field_filter | General | en, zh | 根据字段过滤样本,要求字段的值处于指定范围(针对数字类型) |
-| stopwords_filter | General | en, zh | 保留停用词比率高于指定阈值的样本 |
-| suffix_filter | General | en, zh | 保留包含特定后缀的样本 |
-| text_length_filter | General | en, zh | 保留总文本长度在指定范围内的样本 |
-| word_num_filter | General | en, zh | 保留字数在指定范围内的样本 |
-| word_repetition_filter | General | en, zh | 保留 word-level n-gram 重复比率在指定范围内的样本 |
+| 算子 | 场景 | 语言 | 描述 |
+|--------------------------------|----------|---------|------------------------------------|
+| alphanumeric_filter | General | en, zh | 保留字母数字比例在指定范围内的样本 |
+| average_line_length_filter | Code | en, zh | 保留平均行长度在指定范围内的样本 |
+| character_repetition_filter | General | en, zh | 保留 char-level n-gram 重复比率在指定范围内的样本 |
+| flagged_words_filter | General | en, zh | 保留使标记字比率保持在指定阈值以下的样本 |
+| language_id_score_filter | General | en, zh | 保留特定语言的样本,通过预测的置信度得分来判断 |
+| maximum_line_length_filter | Code | en, zh | 保留最大行长度在指定范围内的样本 |
+| perplexity_filter | General | en, zh | 保留困惑度低于指定阈值的样本 |
+| special_characters_filter | General | en, zh | 保留 special-char 比率的在指定范围内的样本 |
+| specified_field_filter | General | en, zh | 根据字段过滤样本,要求字段的值处于指定目标中 |
+| specified_numeric_field_filter | General | en, zh | 根据字段过滤样本,要求字段的值处于指定范围(针对数字类型) |
+| stopwords_filter | General | en, zh | 保留停用词比率高于指定阈值的样本 |
+| suffix_filter | General | en, zh | 保留包含特定后缀的样本 |
+| text_length_filter | General | en, zh | 保留总文本长度在指定范围内的样本 |
+| token_num_filter | General | en, zh | 保留token数在指定范围内的样本 |
+| word_num_filter | General | en, zh | 保留字数在指定范围内的样本 |
+| word_repetition_filter | General | en, zh | 保留 word-level n-gram 重复比率在指定范围内的样本 |
## Deduplicator
diff --git a/tests/ops/filter/test_token_num_filter.py b/tests/ops/filter/test_token_num_filter.py
new file mode 100644
index 000000000..ab1efaeb6
--- /dev/null
+++ b/tests/ops/filter/test_token_num_filter.py
@@ -0,0 +1,56 @@
+import unittest
+
+from datasets import Dataset
+
+from data_juicer.ops.filter.token_num_filter import TokenNumFilter
+from data_juicer.utils.constant import Fields, StatsKeys
+
+
+class WordNumFilterTest(unittest.TestCase):
+
+ def test_token_num(self):
+ src = [
+ {"text": "Today is Sunday and it's a happy day!"},
+ {"text": "Do you need a cup of coffee?"},
+ {"text": "你好,请问你是谁"},
+ {"text": "Sur la plateforme MT4, plusieurs manières d'accéder à "
+ "ces fonctionnalités sont conçues simultanément."},
+ {"text": "欢迎来到阿里巴巴!"},
+ {"text": "This paper proposed a novel method on LLM pretraining."},
+ ]
+ tgt = [
+ 10, 8, 9, 31, 14, 12,
+ ]
+ ds = Dataset.from_list(src)
+ op = TokenNumFilter()
+ ds = ds.add_column(Fields.stats, [{}] * len(ds))
+ ds = ds.map(op.compute_stats)
+ stats = ds[Fields.stats]
+ self.assertEqual([item[StatsKeys.num_token] for item in stats], tgt)
+
+ def test_token_num_filter(self):
+ src = [
+ {"text": "Today is Sunday and it's a happy day!"},
+ {"text": "Do you need a cup of coffee?"},
+ {"text": "你好,请问你是谁"},
+ {"text": "Sur la plateforme MT4, plusieurs manières d'accéder à "
+ "ces fonctionnalités sont conçues simultanément."},
+ {"text": "欢迎来到阿里巴巴!"},
+ {"text": "This paper proposed a novel method on LLM pretraining."},
+ ]
+ tgt = [
+ {"text": "Today is Sunday and it's a happy day!"},
+ {"text": "欢迎来到阿里巴巴!"},
+ {"text": "This paper proposed a novel method on LLM pretraining."},
+ ]
+ ds = Dataset.from_list(src)
+ tgt = Dataset.from_list(tgt)
+ op = TokenNumFilter(min_num=10, max_num=20)
+ ds = ds.add_column(Fields.stats, [{}] * len(ds))
+ ds = ds.map(op.compute_stats)
+ ds = ds.filter(op.process)
+ self.assertEqual(ds['text'], tgt['text'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tools/postprocess/README.md b/tools/postprocess/README.md
index 9f1c7641a..de2dfe4bb 100644
--- a/tools/postprocess/README.md
+++ b/tools/postprocess/README.md
@@ -4,6 +4,26 @@ This folder contains some postprocess scripts for additional processing of your
## Usage
+### Count tokens for datasets
+
+Use [count_token.py](count_token.py) to count tokens for datasets.
+
+```shell
+python tools/postprocess/count_token.py \
+ --data_path \
+ --text_keys \
+ --tokenizer_method \
+ --num_proc
+
+# get help
+python tools/postprocess/count_token.py --help
+```
+
+- `data_path`: path to the input dataset. Only support `jsonl` now.
+- `text_keys`: field keys that will be considered into token counts.
+- `tokenizer_method`: name of the Hugging Face tokenizer.
+- `num_proc`: number of processes to count tokens.
+
### Mix multiple datasets with optional weights
Use [data_mixture.py](data_mixture.py) to mix multiple datasets.
diff --git a/tools/postprocess/README_ZH.md b/tools/postprocess/README_ZH.md
index f9fb5bc18..99b5579c9 100644
--- a/tools/postprocess/README_ZH.md
+++ b/tools/postprocess/README_ZH.md
@@ -4,6 +4,26 @@
## 用法
+### 为数据集计算token数目
+
+使用 [count_token.py](count_token.py) 计算数据集包含的 token 数目。
+
+```shell
+python tools/postprocess/count_token.py \
+ --data_path \
+ --text_keys \
+ --tokenizer_method \
+ --num_proc
+
+# get help
+python tools/postprocess/count_token.py --help
+```
+
+- `data_path`: 输入数据集的路径。目前只支持 `jsonl` 格式。
+- `text_keys`: 单个样本中会被算入 token 数目的字段名称。
+- `tokenizer_method`: 使用的 Hugging Face tokenizer 的名称。
+- `num_proc`: 计算 token 数目时所用的进程数。
+
### 将多个数据集以可选的权重混合
使用 [data_mixture.py](data_mixture.py) 将多个数据集混合。
diff --git a/tools/postprocess/count_token.py b/tools/postprocess/count_token.py
new file mode 100644
index 000000000..8edfee2dd
--- /dev/null
+++ b/tools/postprocess/count_token.py
@@ -0,0 +1,57 @@
+
+import fire
+import jsonlines as jl
+
+from tqdm import tqdm
+from multiprocessing import Pool
+from loguru import logger
+from transformers import AutoTokenizer
+
+TOKENIZER = None
+
+def count_token_single(sample, text_keys):
+ global TOKENIZER
+ num = 0
+ for key in text_keys:
+ num += len(TOKENIZER.tokenize(sample[key]))
+ return num
+
+
+def prepare_tokenizer(tokenizer_method):
+ global TOKENIZER
+ logger.info('Loading tokenizer from HuggingFace...')
+ TOKENIZER = AutoTokenizer.from_pretrained(tokenizer_method,
+ trust_remote_code=True)
+
+def main(data_path,
+ text_keys='text',
+ tokenizer_method='EleutherAI/pythia-6.9b-deduped',
+ num_proc=1):
+ """
+ Count the number of tokens for given dataset and tokenizer.
+
+ :param data_path: path to the input dataset. Only support 'jsonl' now.
+ :param text_keys: field keys that will be considered into token counts.
+ :param tokenizer_method: name of the Hugging Face tokenizer.
+ :param num_proc: number of processes to count tokens.
+ """
+ prepare_tokenizer(tokenizer_method)
+
+ if isinstance(text_keys, str):
+ text_keys = [text_keys]
+
+ with jl.open(data_path) as reader:
+ token_count = 0
+ result_list = []
+ with Pool(num_proc) as p:
+ for sample in tqdm(reader):
+ result_list.append(p.apply_async(count_token_single,
+ args=(sample, text_keys,)))
+ for res in tqdm(result_list):
+ token_count += res.get()
+
+ logger.info(f'Total num of tokens: {token_count}')
+
+
+if __name__ == '__main__':
+ fire.Fire(main)
diff --git a/tools/quality_classifier/qc_utils.py b/tools/quality_classifier/qc_utils.py
index f7baed5c6..af6b776cd 100644
--- a/tools/quality_classifier/qc_utils.py
+++ b/tools/quality_classifier/qc_utils.py
@@ -60,7 +60,8 @@ def prepare_model(model_name, model_path=DATA_JUICER_MODELS_CACHE):
# No specific models in local file systems. Download them from remote.
os.makedirs(model_path, exist_ok=True)
wget.download(os.path.join(MODEL_LINKS, f'{model_name}.zip'),
- os.path.join(model_path, f'{model_name}.zip'))
+ os.path.join(model_path, f'{model_name}.zip'),
+ bar=None)
# extract the compressed model file into a model directory
with zipfile.ZipFile(os.path.join(model_path, f'{model_name}.zip')) as zp:
zp.extractall(os.path.join(model_path))