Upload RWForCausalLM

This commit is contained in:
Daniel Hesslow 2023-05-24 12:17:53 +00:00 committed by huggingface-web
parent faeeeb3a5c
commit e7950c40d6
14 changed files with 1618 additions and 0 deletions

28
config.json Normal file

@ -0,0 +1,28 @@
{
"alibi": false,
"apply_residual_connection_post_layernorm": false,
"architectures": [
"RWForCausalLM"
],
"attention_dropout": 0.0,
"auto_map": {
"AutoConfig": "configuration_RW.RWConfig",
"AutoModelForCausalLM": "modelling_RW.RWForCausalLM"
},
"bias": false,
"bos_token_id": 1,
"eos_token_id": 2,
"hidden_dropout": 0.0,
"hidden_size": 8192,
"initializer_range": 0.02,
"layer_norm_epsilon": 1e-05,
"model_type": "RefinedWeb",
"n_head": 128,
"n_head_kv": 8,
"n_layer": 60,
"parallel_attn": true,
"torch_dtype": "bfloat16",
"transformers_version": "4.27.4",
"use_cache": true,
"vocab_size": 65024
}

75
configuration_RW.py Normal file

@ -0,0 +1,75 @@
# coding=utf-8
# Copyright 2022 the Big Science Workshop and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Bloom configuration"""
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
class RWConfig(PretrainedConfig):
model_type = "RefinedWeb"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__(
self,
vocab_size=250880,
hidden_size=64,
n_layer=2,
n_head=8,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
use_cache=True,
bos_token_id=1,
eos_token_id=2,
apply_residual_connection_post_layernorm=False,
hidden_dropout=0.0,
attention_dropout=0.0,
n_head_kv=None,
alibi=False,
**kwargs,
):
self.vocab_size = vocab_size
# Backward compatibility with n_embed kwarg
n_embed = kwargs.pop("n_embed", None)
self.hidden_size = hidden_size if n_embed is None else n_embed
self.n_layer = n_layer
self.n_head = n_head
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.use_cache = use_cache
self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.n_head_kv = n_head if n_head_kv is None else n_head_kv
self.alibi = alibi
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
@property
def head_dim(self):
return self.hidden_size // self.n_head
@property
def rotary(self):
return not self.alibi

6
generation_config.json Normal file

@ -0,0 +1,6 @@
{
"_from_model_config": true,
"bos_token_id": 1,
"eos_token_id": 2,
"transformers_version": "4.27.4"
}

1111
modelling_RW.py Normal file

File diff suppressed because it is too large Load Diff

BIN
pytorch_model-00001-of-00009.bin (Stored with Git LFS) Normal file

Binary file not shown.

BIN
pytorch_model-00002-of-00009.bin (Stored with Git LFS) Normal file

Binary file not shown.

BIN
pytorch_model-00003-of-00009.bin (Stored with Git LFS) Normal file

Binary file not shown.

BIN
pytorch_model-00004-of-00009.bin (Stored with Git LFS) Normal file

Binary file not shown.

BIN
pytorch_model-00005-of-00009.bin (Stored with Git LFS) Normal file

Binary file not shown.

BIN
pytorch_model-00006-of-00009.bin (Stored with Git LFS) Normal file

Binary file not shown.

BIN
pytorch_model-00007-of-00009.bin (Stored with Git LFS) Normal file

Binary file not shown.

BIN
pytorch_model-00008-of-00009.bin (Stored with Git LFS) Normal file

Binary file not shown.

BIN
pytorch_model-00009-of-00009.bin (Stored with Git LFS) Normal file

Binary file not shown.

@ -0,0 +1,371 @@
{
"metadata": {
"total_size": 83669975040
},
"weight_map": {
"lm_head.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.0.input_layernorm.bias": "pytorch_model-00001-of-00009.bin",
"transformer.h.0.input_layernorm.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.0.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.0.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.0.self_attention.dense.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.0.self_attention.query_key_value.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.1.input_layernorm.bias": "pytorch_model-00001-of-00009.bin",
"transformer.h.1.input_layernorm.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.1.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.1.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.1.self_attention.dense.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.1.self_attention.query_key_value.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.10.input_layernorm.bias": "pytorch_model-00002-of-00009.bin",
"transformer.h.10.input_layernorm.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.10.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.10.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.10.self_attention.dense.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.10.self_attention.query_key_value.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.11.input_layernorm.bias": "pytorch_model-00002-of-00009.bin",
"transformer.h.11.input_layernorm.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.11.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.11.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.11.self_attention.dense.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.11.self_attention.query_key_value.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.12.input_layernorm.bias": "pytorch_model-00002-of-00009.bin",
"transformer.h.12.input_layernorm.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.12.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.12.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.12.self_attention.dense.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.12.self_attention.query_key_value.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.13.input_layernorm.bias": "pytorch_model-00002-of-00009.bin",
"transformer.h.13.input_layernorm.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.13.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.13.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.13.self_attention.dense.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.13.self_attention.query_key_value.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.14.input_layernorm.bias": "pytorch_model-00003-of-00009.bin",
"transformer.h.14.input_layernorm.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.14.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.14.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.14.self_attention.dense.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.14.self_attention.query_key_value.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.15.input_layernorm.bias": "pytorch_model-00003-of-00009.bin",
"transformer.h.15.input_layernorm.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.15.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.15.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.15.self_attention.dense.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.15.self_attention.query_key_value.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.16.input_layernorm.bias": "pytorch_model-00003-of-00009.bin",
"transformer.h.16.input_layernorm.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.16.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.16.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.16.self_attention.dense.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.16.self_attention.query_key_value.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.17.input_layernorm.bias": "pytorch_model-00003-of-00009.bin",
"transformer.h.17.input_layernorm.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.17.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.17.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.17.self_attention.dense.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.17.self_attention.query_key_value.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.18.input_layernorm.bias": "pytorch_model-00003-of-00009.bin",
"transformer.h.18.input_layernorm.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.18.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.18.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.18.self_attention.dense.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.18.self_attention.query_key_value.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.19.input_layernorm.bias": "pytorch_model-00003-of-00009.bin",
"transformer.h.19.input_layernorm.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.19.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.19.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.19.self_attention.dense.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.19.self_attention.query_key_value.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.2.input_layernorm.bias": "pytorch_model-00001-of-00009.bin",
"transformer.h.2.input_layernorm.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.2.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.2.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.2.self_attention.dense.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.2.self_attention.query_key_value.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.20.input_layernorm.bias": "pytorch_model-00003-of-00009.bin",
"transformer.h.20.input_layernorm.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.20.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.20.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.20.self_attention.dense.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.20.self_attention.query_key_value.weight": "pytorch_model-00003-of-00009.bin",
"transformer.h.21.input_layernorm.bias": "pytorch_model-00004-of-00009.bin",
"transformer.h.21.input_layernorm.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.21.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.21.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.21.self_attention.dense.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.21.self_attention.query_key_value.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.22.input_layernorm.bias": "pytorch_model-00004-of-00009.bin",
"transformer.h.22.input_layernorm.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.22.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.22.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.22.self_attention.dense.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.22.self_attention.query_key_value.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.23.input_layernorm.bias": "pytorch_model-00004-of-00009.bin",
"transformer.h.23.input_layernorm.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.23.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.23.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.23.self_attention.dense.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.23.self_attention.query_key_value.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.24.input_layernorm.bias": "pytorch_model-00004-of-00009.bin",
"transformer.h.24.input_layernorm.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.24.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.24.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.24.self_attention.dense.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.24.self_attention.query_key_value.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.25.input_layernorm.bias": "pytorch_model-00004-of-00009.bin",
"transformer.h.25.input_layernorm.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.25.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.25.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.25.self_attention.dense.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.25.self_attention.query_key_value.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.26.input_layernorm.bias": "pytorch_model-00004-of-00009.bin",
"transformer.h.26.input_layernorm.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.26.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.26.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.26.self_attention.dense.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.26.self_attention.query_key_value.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.27.input_layernorm.bias": "pytorch_model-00004-of-00009.bin",
"transformer.h.27.input_layernorm.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.27.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.27.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.27.self_attention.dense.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.27.self_attention.query_key_value.weight": "pytorch_model-00004-of-00009.bin",
"transformer.h.28.input_layernorm.bias": "pytorch_model-00005-of-00009.bin",
"transformer.h.28.input_layernorm.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.28.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.28.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.28.self_attention.dense.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.28.self_attention.query_key_value.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.29.input_layernorm.bias": "pytorch_model-00005-of-00009.bin",
"transformer.h.29.input_layernorm.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.29.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.29.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.29.self_attention.dense.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.29.self_attention.query_key_value.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.3.input_layernorm.bias": "pytorch_model-00001-of-00009.bin",
"transformer.h.3.input_layernorm.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.3.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.3.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.3.self_attention.dense.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.3.self_attention.query_key_value.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.30.input_layernorm.bias": "pytorch_model-00005-of-00009.bin",
"transformer.h.30.input_layernorm.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.30.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.30.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.30.self_attention.dense.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.30.self_attention.query_key_value.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.31.input_layernorm.bias": "pytorch_model-00005-of-00009.bin",
"transformer.h.31.input_layernorm.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.31.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.31.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.31.self_attention.dense.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.31.self_attention.query_key_value.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.32.input_layernorm.bias": "pytorch_model-00005-of-00009.bin",
"transformer.h.32.input_layernorm.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.32.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.32.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.32.self_attention.dense.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.32.self_attention.query_key_value.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.33.input_layernorm.bias": "pytorch_model-00005-of-00009.bin",
"transformer.h.33.input_layernorm.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.33.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.33.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.33.self_attention.dense.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.33.self_attention.query_key_value.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.34.input_layernorm.bias": "pytorch_model-00005-of-00009.bin",
"transformer.h.34.input_layernorm.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.34.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.34.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.34.self_attention.dense.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.34.self_attention.query_key_value.weight": "pytorch_model-00005-of-00009.bin",
"transformer.h.35.input_layernorm.bias": "pytorch_model-00006-of-00009.bin",
"transformer.h.35.input_layernorm.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.35.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.35.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.35.self_attention.dense.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.35.self_attention.query_key_value.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.36.input_layernorm.bias": "pytorch_model-00006-of-00009.bin",
"transformer.h.36.input_layernorm.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.36.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.36.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.36.self_attention.dense.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.36.self_attention.query_key_value.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.37.input_layernorm.bias": "pytorch_model-00006-of-00009.bin",
"transformer.h.37.input_layernorm.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.37.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.37.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.37.self_attention.dense.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.37.self_attention.query_key_value.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.38.input_layernorm.bias": "pytorch_model-00006-of-00009.bin",
"transformer.h.38.input_layernorm.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.38.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.38.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.38.self_attention.dense.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.38.self_attention.query_key_value.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.39.input_layernorm.bias": "pytorch_model-00006-of-00009.bin",
"transformer.h.39.input_layernorm.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.39.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.39.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.39.self_attention.dense.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.39.self_attention.query_key_value.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.4.input_layernorm.bias": "pytorch_model-00001-of-00009.bin",
"transformer.h.4.input_layernorm.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.4.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.4.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.4.self_attention.dense.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.4.self_attention.query_key_value.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.40.input_layernorm.bias": "pytorch_model-00006-of-00009.bin",
"transformer.h.40.input_layernorm.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.40.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.40.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.40.self_attention.dense.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.40.self_attention.query_key_value.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.41.input_layernorm.bias": "pytorch_model-00006-of-00009.bin",
"transformer.h.41.input_layernorm.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.41.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.41.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.41.self_attention.dense.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.41.self_attention.query_key_value.weight": "pytorch_model-00006-of-00009.bin",
"transformer.h.42.input_layernorm.bias": "pytorch_model-00007-of-00009.bin",
"transformer.h.42.input_layernorm.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.42.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.42.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.42.self_attention.dense.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.42.self_attention.query_key_value.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.43.input_layernorm.bias": "pytorch_model-00007-of-00009.bin",
"transformer.h.43.input_layernorm.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.43.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.43.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.43.self_attention.dense.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.43.self_attention.query_key_value.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.44.input_layernorm.bias": "pytorch_model-00007-of-00009.bin",
"transformer.h.44.input_layernorm.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.44.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.44.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.44.self_attention.dense.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.44.self_attention.query_key_value.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.45.input_layernorm.bias": "pytorch_model-00007-of-00009.bin",
"transformer.h.45.input_layernorm.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.45.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.45.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.45.self_attention.dense.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.45.self_attention.query_key_value.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.46.input_layernorm.bias": "pytorch_model-00007-of-00009.bin",
"transformer.h.46.input_layernorm.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.46.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.46.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.46.self_attention.dense.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.46.self_attention.query_key_value.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.47.input_layernorm.bias": "pytorch_model-00007-of-00009.bin",
"transformer.h.47.input_layernorm.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.47.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.47.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.47.self_attention.dense.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.47.self_attention.query_key_value.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.48.input_layernorm.bias": "pytorch_model-00007-of-00009.bin",
"transformer.h.48.input_layernorm.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.48.mlp.dense_4h_to_h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.48.mlp.dense_h_to_4h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.48.self_attention.dense.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.48.self_attention.query_key_value.weight": "pytorch_model-00007-of-00009.bin",
"transformer.h.49.input_layernorm.bias": "pytorch_model-00008-of-00009.bin",
"transformer.h.49.input_layernorm.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.49.mlp.dense_4h_to_h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.49.mlp.dense_h_to_4h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.49.self_attention.dense.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.49.self_attention.query_key_value.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.5.input_layernorm.bias": "pytorch_model-00001-of-00009.bin",
"transformer.h.5.input_layernorm.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.5.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.5.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.5.self_attention.dense.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.5.self_attention.query_key_value.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.50.input_layernorm.bias": "pytorch_model-00008-of-00009.bin",
"transformer.h.50.input_layernorm.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.50.mlp.dense_4h_to_h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.50.mlp.dense_h_to_4h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.50.self_attention.dense.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.50.self_attention.query_key_value.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.51.input_layernorm.bias": "pytorch_model-00008-of-00009.bin",
"transformer.h.51.input_layernorm.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.51.mlp.dense_4h_to_h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.51.mlp.dense_h_to_4h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.51.self_attention.dense.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.51.self_attention.query_key_value.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.52.input_layernorm.bias": "pytorch_model-00008-of-00009.bin",
"transformer.h.52.input_layernorm.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.52.mlp.dense_4h_to_h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.52.mlp.dense_h_to_4h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.52.self_attention.dense.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.52.self_attention.query_key_value.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.53.input_layernorm.bias": "pytorch_model-00008-of-00009.bin",
"transformer.h.53.input_layernorm.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.53.mlp.dense_4h_to_h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.53.mlp.dense_h_to_4h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.53.self_attention.dense.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.53.self_attention.query_key_value.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.54.input_layernorm.bias": "pytorch_model-00008-of-00009.bin",
"transformer.h.54.input_layernorm.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.54.mlp.dense_4h_to_h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.54.mlp.dense_h_to_4h.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.54.self_attention.dense.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.54.self_attention.query_key_value.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.55.input_layernorm.bias": "pytorch_model-00008-of-00009.bin",
"transformer.h.55.input_layernorm.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.55.mlp.dense_4h_to_h.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.55.mlp.dense_h_to_4h.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.55.self_attention.dense.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.55.self_attention.query_key_value.weight": "pytorch_model-00008-of-00009.bin",
"transformer.h.56.input_layernorm.bias": "pytorch_model-00009-of-00009.bin",
"transformer.h.56.input_layernorm.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.56.mlp.dense_4h_to_h.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.56.mlp.dense_h_to_4h.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.56.self_attention.dense.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.56.self_attention.query_key_value.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.57.input_layernorm.bias": "pytorch_model-00009-of-00009.bin",
"transformer.h.57.input_layernorm.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.57.mlp.dense_4h_to_h.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.57.mlp.dense_h_to_4h.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.57.self_attention.dense.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.57.self_attention.query_key_value.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.58.input_layernorm.bias": "pytorch_model-00009-of-00009.bin",
"transformer.h.58.input_layernorm.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.58.mlp.dense_4h_to_h.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.58.mlp.dense_h_to_4h.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.58.self_attention.dense.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.58.self_attention.query_key_value.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.59.input_layernorm.bias": "pytorch_model-00009-of-00009.bin",
"transformer.h.59.input_layernorm.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.59.mlp.dense_4h_to_h.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.59.mlp.dense_h_to_4h.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.59.self_attention.dense.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.59.self_attention.query_key_value.weight": "pytorch_model-00009-of-00009.bin",
"transformer.h.6.input_layernorm.bias": "pytorch_model-00001-of-00009.bin",
"transformer.h.6.input_layernorm.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.6.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.6.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.6.self_attention.dense.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.6.self_attention.query_key_value.weight": "pytorch_model-00001-of-00009.bin",
"transformer.h.7.input_layernorm.bias": "pytorch_model-00002-of-00009.bin",
"transformer.h.7.input_layernorm.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.7.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.7.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.7.self_attention.dense.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.7.self_attention.query_key_value.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.8.input_layernorm.bias": "pytorch_model-00002-of-00009.bin",
"transformer.h.8.input_layernorm.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.8.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.8.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.8.self_attention.dense.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.8.self_attention.query_key_value.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.9.input_layernorm.bias": "pytorch_model-00002-of-00009.bin",
"transformer.h.9.input_layernorm.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.9.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.9.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.9.self_attention.dense.weight": "pytorch_model-00002-of-00009.bin",
"transformer.h.9.self_attention.query_key_value.weight": "pytorch_model-00002-of-00009.bin",
"transformer.ln_f.bias": "pytorch_model-00009-of-00009.bin",
"transformer.ln_f.weight": "pytorch_model-00009-of-00009.bin",
"transformer.word_embeddings.weight": "pytorch_model-00001-of-00009.bin"
}
}