Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Robinbg add megatron bert config #4924

Closed
230 changes: 230 additions & 0 deletions paddlenlp/transformers/artist/configuration.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,230 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" BERT model configuration"""
from __future__ import annotations

from typing import Dict

from paddlenlp.transformers.configuration_utils import PretrainedConfig

__all__ = ["ARTIST_PRETRAINED_INIT_CONFIGURATION", "ArtistConfig", "ARTIST_PRETRAINED_RESOURCE_FILES_MAP"]

ARTIST_PRETRAINED_INIT_CONFIGURATION = {
"pai-painter-base-zh": {
"vocab_size": 37512,
"hidden_size": 768,
"num_hidden_layers": 12,
"num_attention_heads": 12,
"intermediate_size": 3072,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.0,
"attention_probs_dropout_prob": 0.0,
"max_position_embeddings": 288,
"type_vocab_size": 1, # no use
"initializer_range": 0.02,
"pad_token_id": 16384, # 0 + 16384
"eos_token_id": 16486, # 102 + 16384
"bos_token_id": 16485, # 101 + 16384
"eol_token_id": 16486, # 102 + 16384
},
"pai-painter-painting-base-zh": {
"vocab_size": 37512,
"hidden_size": 768,
"num_hidden_layers": 12,
"num_attention_heads": 12,
"intermediate_size": 3072,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.0,
"attention_probs_dropout_prob": 0.0,
"max_position_embeddings": 288,
"type_vocab_size": 1, # no use
"initializer_range": 0.02,
"pad_token_id": 16384, # 0 + 16384
"eos_token_id": 16486, # 102 + 16384
"bos_token_id": 16485, # 101 + 16384
"eol_token_id": 16486, # 102 + 16384
},
"pai-painter-scenery-base-zh": {
"vocab_size": 37512,
"hidden_size": 768,
"num_hidden_layers": 12,
"num_attention_heads": 12,
"intermediate_size": 3072,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.0,
"attention_probs_dropout_prob": 0.0,
"max_position_embeddings": 288,
"type_vocab_size": 1, # no use
"initializer_range": 0.02,
"pad_token_id": 16384, # 0 + 16384
"eos_token_id": 16486, # 102 + 16384
"bos_token_id": 16485, # 101 + 16384
"eol_token_id": 16486, # 102 + 16384
},
"pai-painter-commercial-base-zh": {
"vocab_size": 37512,
"hidden_size": 768,
"num_hidden_layers": 12,
"num_attention_heads": 12,
"intermediate_size": 3072,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.0,
"attention_probs_dropout_prob": 0.0,
"max_position_embeddings": 288,
"type_vocab_size": 1, # no use
"initializer_range": 0.02,
"pad_token_id": 16384, # 0 + 16384
"eos_token_id": 16486, # 102 + 16384
"bos_token_id": 16485, # 101 + 16384
"eol_token_id": 16486, # 102 + 16384
},
"pai-painter-large-zh": {
"vocab_size": 37512,
"hidden_size": 1024,
"num_hidden_layers": 24,
"num_attention_heads": 16,
"intermediate_size": 4096,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.0,
"attention_probs_dropout_prob": 0.0,
"max_position_embeddings": 288,
"type_vocab_size": 1,
"initializer_range": 0.02,
"pad_token_id": 16384, # 0 + 16384
"eos_token_id": 16486, # 102 + 16384
"bos_token_id": 16485, # 101 + 16384
"eol_token_id": 16486, # 102 + 16384
},
}

ARTIST_PRETRAINED_RESOURCE_FILES_MAP = {
"model_state": {
"pai-painter-base-zh": "https://bj.bcebos.com/paddlenlp/models/transformers/artist/pai-painter-base-zh/model_state.pdparams",
"pai-painter-painting-base-zh": "https://bj.bcebos.com/paddlenlp/models/transformers/artist/pai-painter-painting-base-zh/model_state.pdparams",
"pai-painter-scenery-base-zh": "https://bj.bcebos.com/paddlenlp/models/transformers/artist/pai-painter-scenery-base-zh/model_state.pdparams",
"pai-painter-commercial-base-zh": "https://bj.bcebos.com/paddlenlp/models/transformers/artist/pai-painter-commercial-base-zh/model_state.pdparams",
"pai-painter-large-zh": "https://bj.bcebos.com/paddlenlp/models/transformers/artist/pai-painter-large-zh/model_state.pdparams",
}
}


class ArtistConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`BertModel`] or a [`TFBertModel`]. It is used to
instantiate a BERT model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the BERT
bert-base-uncased architecture.

Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.


Args:
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`BertModel`] or [`TFBertModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, *optional*, defaults to 2):
The vocabulary size of the `token_type_ids` passed when calling [`BertModel`] or [`TFBertModel`].
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
classifier_dropout (`float`, *optional*):
The dropout ratio for the classification head.

Examples:

```python
>>> from paddlenlp.transformers import BertModel, BertConfig

>>> # Initializing a BERT bert-base-uncased style configuration
>>> configuration = BertConfig()

>>> # Initializing a model from the bert-base-uncased style configuration
>>> model = BertModel(configuration)

>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "artist"
attribute_map: Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
pretrained_init_configuration = ARTIST_PRETRAINED_INIT_CONFIGURATION

def __init__(
self,
vocab_size: int = 37512,
hidden_size: int = 768,
num_hidden_layers: int = 12,
num_attention_heads: int = 12,
intermediate_size: int = 3072,
hidden_act: str = "gelu",
hidden_dropout_prob: float = 0.0,
attention_probs_dropout_prob: float = 0.0,
max_position_embeddings: int = 288,
type_vocab_size: int = 1,
initializer_range: float = 0.02,
pad_token_id: int = 16384,
eos_token_id: int = 16486,
bos_token_id: int = 16485,
eol_token_id: int = 16486,
pool_act: str = "tanh",
fuse: bool = False,
layer_norm_eps=1e-12,
use_cache=False,
**kwargs
):
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.pool_act = pool_act
self.fuse = fuse

self.layer_norm_eps = layer_norm_eps
self.use_cache = use_cache
116 changes: 11 additions & 105 deletions paddlenlp/transformers/artist/modeling.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,110 +27,16 @@
"ArtistForConditionalGeneration",
]

# set gelu_new
F.gelu_python = F.gelu

pretrained_init_configuration = {
"pai-painter-base-zh": {
"vocab_size": 37512,
"hidden_size": 768,
"num_hidden_layers": 12,
"num_attention_heads": 12,
"intermediate_size": 3072,
"hidden_act": "gelu_python",
"hidden_dropout_prob": 0.0,
"attention_probs_dropout_prob": 0.0,
"max_position_embeddings": 288,
"type_vocab_size": 1, # no use
"initializer_range": 0.02,
"pad_token_id": 16384, # 0 + 16384
"eos_token_id": 16486, # 102 + 16384
"bos_token_id": 16485, # 101 + 16384
"eol_token_id": 16486, # 102 + 16384
},
"pai-painter-painting-base-zh": {
"vocab_size": 37512,
"hidden_size": 768,
"num_hidden_layers": 12,
"num_attention_heads": 12,
"intermediate_size": 3072,
"hidden_act": "gelu_python",
"hidden_dropout_prob": 0.0,
"attention_probs_dropout_prob": 0.0,
"max_position_embeddings": 288,
"type_vocab_size": 1, # no use
"initializer_range": 0.02,
"pad_token_id": 16384, # 0 + 16384
"eos_token_id": 16486, # 102 + 16384
"bos_token_id": 16485, # 101 + 16384
"eol_token_id": 16486, # 102 + 16384
},
"pai-painter-scenery-base-zh": {
"vocab_size": 37512,
"hidden_size": 768,
"num_hidden_layers": 12,
"num_attention_heads": 12,
"intermediate_size": 3072,
"hidden_act": "gelu_python",
"hidden_dropout_prob": 0.0,
"attention_probs_dropout_prob": 0.0,
"max_position_embeddings": 288,
"type_vocab_size": 1, # no use
"initializer_range": 0.02,
"pad_token_id": 16384, # 0 + 16384
"eos_token_id": 16486, # 102 + 16384
"bos_token_id": 16485, # 101 + 16384
"eol_token_id": 16486, # 102 + 16384
},
"pai-painter-commercial-base-zh": {
"vocab_size": 37512,
"hidden_size": 768,
"num_hidden_layers": 12,
"num_attention_heads": 12,
"intermediate_size": 3072,
"hidden_act": "gelu_python",
"hidden_dropout_prob": 0.0,
"attention_probs_dropout_prob": 0.0,
"max_position_embeddings": 288,
"type_vocab_size": 1, # no use
"initializer_range": 0.02,
"pad_token_id": 16384, # 0 + 16384
"eos_token_id": 16486, # 102 + 16384
"bos_token_id": 16485, # 101 + 16384
"eol_token_id": 16486, # 102 + 16384
},
"pai-painter-large-zh": {
"vocab_size": 37512,
"hidden_size": 1024,
"num_hidden_layers": 24,
"num_attention_heads": 16,
"intermediate_size": 4096,
"hidden_act": "gelu_python",
"hidden_dropout_prob": 0.0,
"attention_probs_dropout_prob": 0.0,
"max_position_embeddings": 288,
"type_vocab_size": 1,
"initializer_range": 0.02,
"pad_token_id": 16384, # 0 + 16384
"eos_token_id": 16486, # 102 + 16384
"bos_token_id": 16485, # 101 + 16384
"eol_token_id": 16486, # 102 + 16384
},
}
pretrained_resource_files_map = {
"model_state": {
"pai-painter-base-zh": "https://bj.bcebos.com/paddlenlp/models/transformers/artist/pai-painter-base-zh/model_state.pdparams",
"pai-painter-painting-base-zh": "https://bj.bcebos.com/paddlenlp/models/transformers/artist/pai-painter-painting-base-zh/model_state.pdparams",
"pai-painter-scenery-base-zh": "https://bj.bcebos.com/paddlenlp/models/transformers/artist/pai-painter-scenery-base-zh/model_state.pdparams",
"pai-painter-commercial-base-zh": "https://bj.bcebos.com/paddlenlp/models/transformers/artist/pai-painter-commercial-base-zh/model_state.pdparams",
"pai-painter-large-zh": "https://bj.bcebos.com/paddlenlp/models/transformers/artist/pai-painter-large-zh/model_state.pdparams",
}
}
from .configuration import (
ARTIST_PRETRAINED_INIT_CONFIGURATION,
ARTIST_PRETRAINED_RESOURCE_FILES_MAP,
ArtistConfig,
)


class ArtistModel(GPTModel):
pretrained_init_configuration = pretrained_init_configuration
pretrained_resource_files_map = pretrained_resource_files_map
pretrained_init_configuration = ARTIST_PRETRAINED_INIT_CONFIGURATION
pretrained_resource_files_map = ARTIST_PRETRAINED_RESOURCE_FILES_MAP


class ArtistForConditionalGeneration(GPTLMHeadModel):
Expand All @@ -143,8 +49,8 @@ class ArtistForConditionalGeneration(GPTLMHeadModel):

"""

pretrained_init_configuration = pretrained_init_configuration
pretrained_resource_files_map = pretrained_resource_files_map
pretrained_init_configuration = ARTIST_PRETRAINED_INIT_CONFIGURATION
pretrained_resource_files_map = ARTIST_PRETRAINED_RESOURCE_FILES_MAP

def __init__(self, gpt):
super().__init__(gpt)
Expand All @@ -168,8 +74,8 @@ class ArtistForImageGeneration(ArtistForConditionalGeneration):
The vocabulary size of image.
Defaults to `16384`.
"""
pretrained_init_configuration = pretrained_init_configuration
pretrained_resource_files_map = pretrained_resource_files_map
pretrained_init_configuration = ARTIST_PRETRAINED_INIT_CONFIGURATION
pretrained_resource_files_map = ARTIST_PRETRAINED_RESOURCE_FILES_MAP

def __init__(self, gpt, image_vocab_size=16384):
super().__init__(gpt)
Expand Down
Loading