upload
This commit is contained in:
parent
f0e720b441
commit
acbb28c8aa
7
1_Pooling/config.json
Executable file
7
1_Pooling/config.json
Executable file
@ -0,0 +1,7 @@
|
|||||||
|
{
|
||||||
|
"word_embedding_dimension": 384,
|
||||||
|
"pooling_mode_cls_token": false,
|
||||||
|
"pooling_mode_mean_tokens": true,
|
||||||
|
"pooling_mode_max_tokens": false,
|
||||||
|
"pooling_mode_mean_sqrt_len_tokens": false
|
||||||
|
}
|
153
README.md
Executable file
153
README.md
Executable file
@ -0,0 +1,153 @@
|
|||||||
|
---
|
||||||
|
pipeline_tag: sentence-similarity
|
||||||
|
tags:
|
||||||
|
- sentence-transformers
|
||||||
|
- feature-extraction
|
||||||
|
- sentence-similarity
|
||||||
|
language: en
|
||||||
|
license: apache-2.0
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
# all-MiniLM-L6-v2
|
||||||
|
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 384 dimensional dense vector space and can be used for tasks like clustering or semantic search.
|
||||||
|
|
||||||
|
## Usage (Sentence-Transformers)
|
||||||
|
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
|
||||||
|
|
||||||
|
```
|
||||||
|
pip install -U sentence-transformers
|
||||||
|
```
|
||||||
|
|
||||||
|
Then you can use the model like this:
|
||||||
|
```python
|
||||||
|
from sentence_transformers import SentenceTransformer
|
||||||
|
sentences = ["This is an example sentence", "Each sentence is converted"]
|
||||||
|
|
||||||
|
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
|
||||||
|
embeddings = model.encode(sentences)
|
||||||
|
print(embeddings)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage (HuggingFace Transformers)
|
||||||
|
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from transformers import AutoTokenizer, AutoModel
|
||||||
|
import torch
|
||||||
|
import torch.nn.functional as F
|
||||||
|
|
||||||
|
#Mean Pooling - Take attention mask into account for correct averaging
|
||||||
|
def mean_pooling(model_output, attention_mask):
|
||||||
|
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
|
||||||
|
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
|
||||||
|
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
|
||||||
|
|
||||||
|
|
||||||
|
# Sentences we want sentence embeddings for
|
||||||
|
sentences = ['This is an example sentence', 'Each sentence is converted']
|
||||||
|
|
||||||
|
# Load model from HuggingFace Hub
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/all-MiniLM-L6-v2')
|
||||||
|
model = AutoModel.from_pretrained('sentence-transformers/all-MiniLM-L6-v2')
|
||||||
|
|
||||||
|
# Tokenize sentences
|
||||||
|
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
|
||||||
|
|
||||||
|
# Compute token embeddings
|
||||||
|
with torch.no_grad():
|
||||||
|
model_output = model(**encoded_input)
|
||||||
|
|
||||||
|
# Perform pooling
|
||||||
|
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
|
||||||
|
|
||||||
|
# Normalize embeddings
|
||||||
|
sentence_embeddings = F.normalize(sentence_embeddings, p=2, dim=1)
|
||||||
|
|
||||||
|
print("Sentence embeddings:")
|
||||||
|
print(sentence_embeddings)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Evaluation Results
|
||||||
|
|
||||||
|
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=sentence-transformers/all-MiniLM-L6-v2)
|
||||||
|
|
||||||
|
------
|
||||||
|
|
||||||
|
## Background
|
||||||
|
|
||||||
|
The project aims to train sentence embedding models on very large sentence level datasets using a self-supervised
|
||||||
|
contrastive learning objective. We used the pretrained [`nreimers/MiniLM-L6-H384-uncased`](https://huggingface.co/nreimers/MiniLM-L6-H384-uncased) model and fine-tuned in on a
|
||||||
|
1B sentence pairs dataset. We use a contrastive learning objective: given a sentence from the pair, the model should predict which out of a set of randomly sampled other sentences, was actually paired with it in our dataset.
|
||||||
|
|
||||||
|
We developped this model during the
|
||||||
|
[Community week using JAX/Flax for NLP & CV](https://discuss.huggingface.co/t/open-to-the-community-community-week-using-jax-flax-for-nlp-cv/7104),
|
||||||
|
organized by Hugging Face. We developped this model as part of the project:
|
||||||
|
[Train the Best Sentence Embedding Model Ever with 1B Training Pairs](https://discuss.huggingface.co/t/train-the-best-sentence-embedding-model-ever-with-1b-training-pairs/7354). We benefited from efficient hardware infrastructure to run the project: 7 TPUs v3-8, as well as intervention from Googles Flax, JAX, and Cloud team member about efficient deep learning frameworks.
|
||||||
|
|
||||||
|
## Intended uses
|
||||||
|
|
||||||
|
Our model is intented to be used as a sentence and short paragraph encoder. Given an input text, it ouptuts a vector which captures
|
||||||
|
the semantic information. The sentence vector may be used for information retrieval, clustering or sentence similarity tasks.
|
||||||
|
|
||||||
|
By default, input text longer than 256 word pieces is truncated.
|
||||||
|
|
||||||
|
|
||||||
|
## Training procedure
|
||||||
|
|
||||||
|
### Pre-training
|
||||||
|
|
||||||
|
We use the pretrained [`nreimers/MiniLM-L6-H384-uncased`](https://huggingface.co/nreimers/MiniLM-L6-H384-uncased) model. Please refer to the model card for more detailed information about the pre-training procedure.
|
||||||
|
|
||||||
|
### Fine-tuning
|
||||||
|
|
||||||
|
We fine-tune the model using a contrastive objective. Formally, we compute the cosine similarity from each possible sentence pairs from the batch.
|
||||||
|
We then apply the cross entropy loss by comparing with true pairs.
|
||||||
|
|
||||||
|
#### Hyper parameters
|
||||||
|
|
||||||
|
We trained ou model on a TPU v3-8. We train the model during 100k steps using a batch size of 1024 (128 per TPU core).
|
||||||
|
We use a learning rate warm up of 500. The sequence length was limited to 128 tokens. We used the AdamW optimizer with
|
||||||
|
a 2e-5 learning rate. The full training script is accessible in this current repository: `train_script.py`.
|
||||||
|
|
||||||
|
#### Training data
|
||||||
|
|
||||||
|
We use the concatenation from multiple datasets to fine-tune our model. The total number of sentence pairs is above 1 billion sentences.
|
||||||
|
We sampled each dataset given a weighted probability which configuration is detailed in the `data_config.json` file.
|
||||||
|
|
||||||
|
|
||||||
|
| Dataset | Paper | Number of training tuples |
|
||||||
|
|--------------------------------------------------------|:----------------------------------------:|:--------------------------:|
|
||||||
|
| [Reddit comments (2015-2018)](https://github.com/PolyAI-LDN/conversational-datasets/tree/master/reddit) | [paper](https://arxiv.org/abs/1904.06472) | 726,484,430 |
|
||||||
|
| [S2ORC](https://github.com/allenai/s2orc) Citation pairs (Abstracts) | [paper](https://aclanthology.org/2020.acl-main.447/) | 116,288,806 |
|
||||||
|
| [WikiAnswers](https://github.com/afader/oqa#wikianswers-corpus) Duplicate question pairs | [paper](https://doi.org/10.1145/2623330.2623677) | 77,427,422 |
|
||||||
|
| [PAQ](https://github.com/facebookresearch/PAQ) (Question, Answer) pairs | [paper](https://arxiv.org/abs/2102.07033) | 64,371,441 |
|
||||||
|
| [S2ORC](https://github.com/allenai/s2orc) Citation pairs (Titles) | [paper](https://aclanthology.org/2020.acl-main.447/) | 52,603,982 |
|
||||||
|
| [S2ORC](https://github.com/allenai/s2orc) (Title, Abstract) | [paper](https://aclanthology.org/2020.acl-main.447/) | 41,769,185 |
|
||||||
|
| [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_xml) (Title, Body) pairs | - | 25,316,456 |
|
||||||
|
| [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_xml) (Title+Body, Answer) pairs | - | 21,396,559 |
|
||||||
|
| [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_xml) (Title, Answer) pairs | - | 21,396,559 |
|
||||||
|
| [MS MARCO](https://microsoft.github.io/msmarco/) triplets | [paper](https://doi.org/10.1145/3404835.3462804) | 9,144,553 |
|
||||||
|
| [GOOAQ: Open Question Answering with Diverse Answer Types](https://github.com/allenai/gooaq) | [paper](https://arxiv.org/pdf/2104.08727.pdf) | 3,012,496 |
|
||||||
|
| [Yahoo Answers](https://www.kaggle.com/soumikrakshit/yahoo-answers-dataset) (Title, Answer) | [paper](https://proceedings.neurips.cc/paper/2015/hash/250cf8b51c773f3f8dc8b4be867a9a02-Abstract.html) | 1,198,260 |
|
||||||
|
| [Code Search](https://huggingface.co/datasets/code_search_net) | - | 1,151,414 |
|
||||||
|
| [COCO](https://cocodataset.org/#home) Image captions | [paper](https://link.springer.com/chapter/10.1007%2F978-3-319-10602-1_48) | 828,395|
|
||||||
|
| [SPECTER](https://github.com/allenai/specter) citation triplets | [paper](https://doi.org/10.18653/v1/2020.acl-main.207) | 684,100 |
|
||||||
|
| [Yahoo Answers](https://www.kaggle.com/soumikrakshit/yahoo-answers-dataset) (Question, Answer) | [paper](https://proceedings.neurips.cc/paper/2015/hash/250cf8b51c773f3f8dc8b4be867a9a02-Abstract.html) | 681,164 |
|
||||||
|
| [Yahoo Answers](https://www.kaggle.com/soumikrakshit/yahoo-answers-dataset) (Title, Question) | [paper](https://proceedings.neurips.cc/paper/2015/hash/250cf8b51c773f3f8dc8b4be867a9a02-Abstract.html) | 659,896 |
|
||||||
|
| [SearchQA](https://huggingface.co/datasets/search_qa) | [paper](https://arxiv.org/abs/1704.05179) | 582,261 |
|
||||||
|
| [Eli5](https://huggingface.co/datasets/eli5) | [paper](https://doi.org/10.18653/v1/p19-1346) | 325,475 |
|
||||||
|
| [Flickr 30k](https://shannon.cs.illinois.edu/DenotationGraph/) | [paper](https://transacl.org/ojs/index.php/tacl/article/view/229/33) | 317,695 |
|
||||||
|
| [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_xml) Duplicate questions (titles) | | 304,525 |
|
||||||
|
| AllNLI ([SNLI](https://nlp.stanford.edu/projects/snli/) and [MultiNLI](https://cims.nyu.edu/~sbowman/multinli/) | [paper SNLI](https://doi.org/10.18653/v1/d15-1075), [paper MultiNLI](https://doi.org/10.18653/v1/n18-1101) | 277,230 |
|
||||||
|
| [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_xml) Duplicate questions (bodies) | | 250,519 |
|
||||||
|
| [Stack Exchange](https://huggingface.co/datasets/flax-sentence-embeddings/stackexchange_xml) Duplicate questions (titles+bodies) | | 250,460 |
|
||||||
|
| [Sentence Compression](https://github.com/google-research-datasets/sentence-compression) | [paper](https://www.aclweb.org/anthology/D13-1155/) | 180,000 |
|
||||||
|
| [Wikihow](https://github.com/pvl/wikihow_pairs_dataset) | [paper](https://arxiv.org/abs/1810.09305) | 128,542 |
|
||||||
|
| [Altlex](https://github.com/chridey/altlex/) | [paper](https://aclanthology.org/P16-1135.pdf) | 112,696 |
|
||||||
|
| [Quora Question Triplets](https://quoradata.quora.com/First-Quora-Dataset-Release-Question-Pairs) | - | 103,663 |
|
||||||
|
| [Simple Wikipedia](https://cs.pomona.edu/~dkauchak/simplification/) | [paper](https://www.aclweb.org/anthology/P11-2117/) | 102,225 |
|
||||||
|
| [Natural Questions (NQ)](https://ai.google.com/research/NaturalQuestions) | [paper](https://transacl.org/ojs/index.php/tacl/article/view/1455) | 100,231 |
|
||||||
|
| [SQuAD2.0](https://rajpurkar.github.io/SQuAD-explorer/) | [paper](https://aclanthology.org/P18-2124.pdf) | 87,599 |
|
||||||
|
| [TriviaQA](https://huggingface.co/datasets/trivia_qa) | - | 73,346 |
|
||||||
|
| **Total** | | **1,170,060,424** |
|
24
config.json
Normal file
24
config.json
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
{
|
||||||
|
"_name_or_path": "nreimers/MiniLM-L6-H384-uncased",
|
||||||
|
"architectures": [
|
||||||
|
"BertModel"
|
||||||
|
],
|
||||||
|
"attention_probs_dropout_prob": 0.1,
|
||||||
|
"gradient_checkpointing": false,
|
||||||
|
"hidden_act": "gelu",
|
||||||
|
"hidden_dropout_prob": 0.1,
|
||||||
|
"hidden_size": 384,
|
||||||
|
"initializer_range": 0.02,
|
||||||
|
"intermediate_size": 1536,
|
||||||
|
"layer_norm_eps": 1e-12,
|
||||||
|
"max_position_embeddings": 512,
|
||||||
|
"model_type": "bert",
|
||||||
|
"num_attention_heads": 12,
|
||||||
|
"num_hidden_layers": 6,
|
||||||
|
"pad_token_id": 0,
|
||||||
|
"position_embedding_type": "absolute",
|
||||||
|
"transformers_version": "4.8.2",
|
||||||
|
"type_vocab_size": 2,
|
||||||
|
"use_cache": true,
|
||||||
|
"vocab_size": 30522
|
||||||
|
}
|
7
config_sentence_transformers.json
Executable file
7
config_sentence_transformers.json
Executable file
@ -0,0 +1,7 @@
|
|||||||
|
{
|
||||||
|
"__version__": {
|
||||||
|
"sentence_transformers": "2.0.0",
|
||||||
|
"transformers": "4.6.1",
|
||||||
|
"pytorch": "1.8.1"
|
||||||
|
}
|
||||||
|
}
|
1452
data_config.json
Executable file
1452
data_config.json
Executable file
File diff suppressed because it is too large
Load Diff
20
modules.json
Executable file
20
modules.json
Executable file
@ -0,0 +1,20 @@
|
|||||||
|
[
|
||||||
|
{
|
||||||
|
"idx": 0,
|
||||||
|
"name": "0",
|
||||||
|
"path": "",
|
||||||
|
"type": "sentence_transformers.models.Transformer"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"idx": 1,
|
||||||
|
"name": "1",
|
||||||
|
"path": "1_Pooling",
|
||||||
|
"type": "sentence_transformers.models.Pooling"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"idx": 2,
|
||||||
|
"name": "2",
|
||||||
|
"path": "2_Normalize",
|
||||||
|
"type": "sentence_transformers.models.Normalize"
|
||||||
|
}
|
||||||
|
]
|
BIN
pytorch_model.bin
(Stored with Git LFS)
Normal file
BIN
pytorch_model.bin
(Stored with Git LFS)
Normal file
Binary file not shown.
4
sentence_bert_config.json
Executable file
4
sentence_bert_config.json
Executable file
@ -0,0 +1,4 @@
|
|||||||
|
{
|
||||||
|
"max_seq_length": 128,
|
||||||
|
"do_lower_case": false
|
||||||
|
}
|
1
special_tokens_map.json
Normal file
1
special_tokens_map.json
Normal file
@ -0,0 +1 @@
|
|||||||
|
{"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
|
1
tokenizer.json
Normal file
1
tokenizer.json
Normal file
File diff suppressed because one or more lines are too long
1
tokenizer_config.json
Normal file
1
tokenizer_config.json
Normal file
@ -0,0 +1 @@
|
|||||||
|
{"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "special_tokens_map_file": "/home/ukp-reimers/.cache/huggingface/transformers/1e5909e4dfaa904617797ed35a6105a23daa56cbefca48fef329f772584699fb.dd8bd9bfd3664b530ea4e645105f557769387b3da9f79bdb55ed556bdd80611d", "name_or_path": "nreimers/MiniLM-L6-H384-uncased", "do_basic_tokenize": true, "never_split": null, "tokenizer_class": "BertTokenizer"}
|
344
train_script.py
Executable file
344
train_script.py
Executable file
@ -0,0 +1,344 @@
|
|||||||
|
"""
|
||||||
|
Train script for a single file
|
||||||
|
|
||||||
|
Need to set the TPU address first:
|
||||||
|
export XRT_TPU_CONFIG="localservice;0;localhost:51011"
|
||||||
|
"""
|
||||||
|
|
||||||
|
import torch.multiprocessing as mp
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
import random
|
||||||
|
import sys
|
||||||
|
import argparse
|
||||||
|
import gzip
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import tqdm
|
||||||
|
import torch
|
||||||
|
from torch import nn
|
||||||
|
from torch.utils.data import DataLoader
|
||||||
|
import torch
|
||||||
|
import torch_xla
|
||||||
|
import torch_xla.core
|
||||||
|
import torch_xla.core.functions
|
||||||
|
import torch_xla.core.xla_model as xm
|
||||||
|
import torch_xla.distributed.xla_multiprocessing as xmp
|
||||||
|
import torch_xla.distributed.parallel_loader as pl
|
||||||
|
import os
|
||||||
|
from shutil import copyfile
|
||||||
|
|
||||||
|
|
||||||
|
from transformers import (
|
||||||
|
AdamW,
|
||||||
|
AutoModel,
|
||||||
|
AutoTokenizer,
|
||||||
|
get_linear_schedule_with_warmup,
|
||||||
|
set_seed,
|
||||||
|
)
|
||||||
|
|
||||||
|
class AutoModelForSentenceEmbedding(nn.Module):
|
||||||
|
def __init__(self, model_name, tokenizer, normalize=True):
|
||||||
|
super(AutoModelForSentenceEmbedding, self).__init__()
|
||||||
|
|
||||||
|
self.model = AutoModel.from_pretrained(model_name)
|
||||||
|
self.normalize = normalize
|
||||||
|
self.tokenizer = tokenizer
|
||||||
|
|
||||||
|
def forward(self, **kwargs):
|
||||||
|
model_output = self.model(**kwargs)
|
||||||
|
embeddings = self.mean_pooling(model_output, kwargs['attention_mask'])
|
||||||
|
if self.normalize:
|
||||||
|
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
|
||||||
|
|
||||||
|
return embeddings
|
||||||
|
|
||||||
|
def mean_pooling(self, model_output, attention_mask):
|
||||||
|
token_embeddings = model_output[0] # First element of model_output contains all token embeddings
|
||||||
|
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
|
||||||
|
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
|
||||||
|
|
||||||
|
def save_pretrained(self, output_path):
|
||||||
|
if xm.is_master_ordinal():
|
||||||
|
self.tokenizer.save_pretrained(output_path)
|
||||||
|
self.model.config.save_pretrained(output_path)
|
||||||
|
|
||||||
|
xm.save(self.model.state_dict(), os.path.join(output_path, "pytorch_model.bin"))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def train_function(index, args, queue):
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(args.model)
|
||||||
|
model = AutoModelForSentenceEmbedding(args.model, tokenizer)
|
||||||
|
|
||||||
|
|
||||||
|
### Train Loop
|
||||||
|
device = xm.xla_device()
|
||||||
|
model = model.to(device)
|
||||||
|
|
||||||
|
# Instantiate optimizer
|
||||||
|
optimizer = AdamW(params=model.parameters(), lr=2e-5, correct_bias=True)
|
||||||
|
|
||||||
|
lr_scheduler = get_linear_schedule_with_warmup(
|
||||||
|
optimizer=optimizer,
|
||||||
|
num_warmup_steps=500,
|
||||||
|
num_training_steps=args.steps,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Now we train the model
|
||||||
|
cross_entropy_loss = nn.CrossEntropyLoss()
|
||||||
|
max_grad_norm = 1
|
||||||
|
|
||||||
|
model.train()
|
||||||
|
|
||||||
|
for global_step in tqdm.trange(args.steps, disable=not xm.is_master_ordinal()):
|
||||||
|
#### Get the batch data
|
||||||
|
batch = queue.get()
|
||||||
|
#print(index, "batch {}x{}".format(len(batch), ",".join([str(len(b)) for b in batch])))
|
||||||
|
|
||||||
|
|
||||||
|
if len(batch[0]) == 2: #(anchor, positive)
|
||||||
|
text1 = tokenizer([b[0] for b in batch], return_tensors="pt", max_length=args.max_length, truncation=True, padding="max_length")
|
||||||
|
text2 = tokenizer([b[1] for b in batch], return_tensors="pt", max_length=args.max_length, truncation=True, padding="max_length")
|
||||||
|
|
||||||
|
### Compute embeddings
|
||||||
|
embeddings_a = model(**text1.to(device))
|
||||||
|
embeddings_b = model(**text2.to(device))
|
||||||
|
|
||||||
|
### Gather all embedings
|
||||||
|
embeddings_a = torch_xla.core.functions.all_gather(embeddings_a)
|
||||||
|
embeddings_b = torch_xla.core.functions.all_gather(embeddings_b)
|
||||||
|
|
||||||
|
### Compute similarity scores 512 x 512
|
||||||
|
scores = torch.mm(embeddings_a, embeddings_b.transpose(0, 1)) * args.scale
|
||||||
|
|
||||||
|
### Compute cross-entropy loss
|
||||||
|
labels = torch.tensor(range(len(scores)), dtype=torch.long, device=embeddings_a.device) # Example a[i] should match with b[i]
|
||||||
|
|
||||||
|
## Symmetric loss as in CLIP
|
||||||
|
loss = (cross_entropy_loss(scores, labels) + cross_entropy_loss(scores.transpose(0, 1), labels)) / 2
|
||||||
|
|
||||||
|
else: #(anchor, positive, negative)
|
||||||
|
text1 = tokenizer([b[0] for b in batch], return_tensors="pt", max_length=args.max_length, truncation=True, padding="max_length")
|
||||||
|
text2 = tokenizer([b[1] for b in batch], return_tensors="pt", max_length=args.max_length, truncation=True, padding="max_length")
|
||||||
|
text3 = tokenizer([b[2] for b in batch], return_tensors="pt", max_length=args.max_length, truncation=True, padding="max_length")
|
||||||
|
|
||||||
|
embeddings_a = model(**text1.to(device))
|
||||||
|
embeddings_b1 = model(**text2.to(device))
|
||||||
|
embeddings_b2 = model(**text3.to(device))
|
||||||
|
|
||||||
|
embeddings_a = torch_xla.core.functions.all_gather(embeddings_a)
|
||||||
|
embeddings_b1 = torch_xla.core.functions.all_gather(embeddings_b1)
|
||||||
|
embeddings_b2 = torch_xla.core.functions.all_gather(embeddings_b2)
|
||||||
|
|
||||||
|
embeddings_b = torch.cat([embeddings_b1, embeddings_b2])
|
||||||
|
|
||||||
|
### Compute similarity scores 512 x 1024
|
||||||
|
scores = torch.mm(embeddings_a, embeddings_b.transpose(0, 1)) * args.scale
|
||||||
|
|
||||||
|
### Compute cross-entropy loss
|
||||||
|
labels = torch.tensor(range(len(scores)), dtype=torch.long, device=embeddings_a.device) # Example a[i] should match with b[i]
|
||||||
|
|
||||||
|
## One-way loss
|
||||||
|
loss = cross_entropy_loss(scores, labels)
|
||||||
|
|
||||||
|
|
||||||
|
# Backward pass
|
||||||
|
optimizer.zero_grad()
|
||||||
|
loss.backward()
|
||||||
|
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
|
||||||
|
|
||||||
|
xm.optimizer_step(optimizer, barrier=True)
|
||||||
|
lr_scheduler.step()
|
||||||
|
|
||||||
|
|
||||||
|
#Save model
|
||||||
|
if (global_step+1) % args.save_steps == 0:
|
||||||
|
output_path = os.path.join(args.output, str(global_step+1))
|
||||||
|
xm.master_print("save model: "+output_path)
|
||||||
|
model.save_pretrained(output_path)
|
||||||
|
|
||||||
|
|
||||||
|
output_path = os.path.join(args.output, "final")
|
||||||
|
xm.master_print("save model final: "+ output_path)
|
||||||
|
model.save_pretrained(output_path)
|
||||||
|
|
||||||
|
|
||||||
|
def produce_data(args, queue, filepaths, dataset_indices):
|
||||||
|
global_batch_size = args.batch_size*args.nprocs #Global batch size
|
||||||
|
size_per_dataset = int(global_batch_size / args.datasets_per_batch) #How many datasets per batch
|
||||||
|
num_same_dataset = int(size_per_dataset / args.batch_size)
|
||||||
|
print("producer", "global_batch_size", global_batch_size)
|
||||||
|
print("producer", "size_per_dataset", size_per_dataset)
|
||||||
|
print("producer", "num_same_dataset", num_same_dataset)
|
||||||
|
|
||||||
|
datasets = []
|
||||||
|
for filepath in filepaths:
|
||||||
|
if "reddit_" in filepath: #Special dataset class for Reddit files
|
||||||
|
data_obj = RedditDataset(filepath)
|
||||||
|
else:
|
||||||
|
data_obj = Dataset(filepath)
|
||||||
|
datasets.append(iter(data_obj))
|
||||||
|
|
||||||
|
# Store if dataset is in a 2 col or 3 col format
|
||||||
|
num_cols = {idx: len(next(dataset)) for idx, dataset in enumerate(datasets)}
|
||||||
|
|
||||||
|
while True:
|
||||||
|
texts_in_batch = set()
|
||||||
|
batch_format = None #2 vs 3 col format for this batch
|
||||||
|
|
||||||
|
#Add data from several sub datasets
|
||||||
|
for _ in range(args.datasets_per_batch):
|
||||||
|
valid_dataset = False #Check that datasets have the same 2/3 col format
|
||||||
|
while not valid_dataset:
|
||||||
|
data_idx = random.choice(dataset_indices)
|
||||||
|
if batch_format is None:
|
||||||
|
batch_format = num_cols[data_idx]
|
||||||
|
valid_dataset = True
|
||||||
|
else: #Check that this dataset has the same format
|
||||||
|
valid_dataset = (batch_format == num_cols[data_idx])
|
||||||
|
|
||||||
|
#Get data from this dataset
|
||||||
|
dataset = datasets[data_idx]
|
||||||
|
for _ in range(num_same_dataset):
|
||||||
|
for _ in range(args.nprocs):
|
||||||
|
batch_device = [] #A batch for one device
|
||||||
|
while len(batch_device) < args.batch_size:
|
||||||
|
sample = next(dataset)
|
||||||
|
in_batch = False
|
||||||
|
for text in sample:
|
||||||
|
if text in texts_in_batch:
|
||||||
|
in_batch = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if not in_batch:
|
||||||
|
for text in sample:
|
||||||
|
texts_in_batch.add(text)
|
||||||
|
batch_device.append(sample)
|
||||||
|
|
||||||
|
queue.put(batch_device)
|
||||||
|
|
||||||
|
|
||||||
|
class RedditDataset:
|
||||||
|
"""
|
||||||
|
A class that handles the reddit data files
|
||||||
|
"""
|
||||||
|
def __init__(self, filepath):
|
||||||
|
self.filepath = filepath
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
while True:
|
||||||
|
with gzip.open(self.filepath, "rt") as fIn:
|
||||||
|
for line in fIn:
|
||||||
|
data = json.loads(line)
|
||||||
|
|
||||||
|
if "response" in data and "context" in data:
|
||||||
|
yield [data["response"], data["context"]]
|
||||||
|
|
||||||
|
class Dataset:
|
||||||
|
"""
|
||||||
|
A class that handles one dataset
|
||||||
|
"""
|
||||||
|
def __init__(self, filepath):
|
||||||
|
self.filepath = filepath
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
max_dataset_size = 10*1000*1000 #Cache small datasets in memory
|
||||||
|
dataset = []
|
||||||
|
data_format = None
|
||||||
|
|
||||||
|
while dataset is None or len(dataset) == 0:
|
||||||
|
with gzip.open(self.filepath, "rt") as fIn:
|
||||||
|
for line in fIn:
|
||||||
|
data = json.loads(line)
|
||||||
|
if isinstance(data, dict):
|
||||||
|
data = data['texts']
|
||||||
|
|
||||||
|
if data_format is None:
|
||||||
|
data_format = len(data)
|
||||||
|
|
||||||
|
#Ensure that all entries are of the same 2/3 col format
|
||||||
|
assert len(data) == data_format
|
||||||
|
|
||||||
|
if dataset is not None:
|
||||||
|
dataset.append(data)
|
||||||
|
if len(dataset) >= max_dataset_size:
|
||||||
|
dataset = None
|
||||||
|
|
||||||
|
yield data
|
||||||
|
|
||||||
|
# Data loaded. Now stream to the queue
|
||||||
|
# Shuffle for each epoch
|
||||||
|
while True:
|
||||||
|
random.shuffle(dataset)
|
||||||
|
for data in dataset:
|
||||||
|
yield data
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
parser = argparse.ArgumentParser()
|
||||||
|
parser.add_argument('--model', default='nreimers/MiniLM-L6-H384-uncased')
|
||||||
|
parser.add_argument('--steps', type=int, default=2000)
|
||||||
|
parser.add_argument('--save_steps', type=int, default=10000)
|
||||||
|
parser.add_argument('--batch_size', type=int, default=64)
|
||||||
|
parser.add_argument('--max_length', type=int, default=128)
|
||||||
|
parser.add_argument('--nprocs', type=int, default=8)
|
||||||
|
parser.add_argument('--datasets_per_batch', type=int, default=2, help="Number of datasets per batch")
|
||||||
|
parser.add_argument('--scale', type=float, default=20, help="Use 20 for cossim, and 1 when you work with unnormalized embeddings with dot product")
|
||||||
|
parser.add_argument('--data_folder', default="/data", help="Folder with your dataset files")
|
||||||
|
parser.add_argument('data_config', help="A data_config.json file")
|
||||||
|
parser.add_argument('output')
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Ensure global batch size is divisble by data_sample_size
|
||||||
|
assert (args.batch_size*args.nprocs) % args.datasets_per_batch == 0
|
||||||
|
|
||||||
|
logging.info("Output: "+args.output)
|
||||||
|
if os.path.exists(args.output):
|
||||||
|
print("Output folder already exists.")
|
||||||
|
input("Continue?")
|
||||||
|
|
||||||
|
# Write train script to output path
|
||||||
|
os.makedirs(args.output, exist_ok=True)
|
||||||
|
|
||||||
|
data_config_path = os.path.join(args.output, 'data_config.json')
|
||||||
|
copyfile(args.data_config, data_config_path)
|
||||||
|
|
||||||
|
train_script_path = os.path.join(args.output, 'train_script.py')
|
||||||
|
copyfile(__file__, train_script_path)
|
||||||
|
with open(train_script_path, 'a') as fOut:
|
||||||
|
fOut.write("\n\n# Script was called via:\n#python " + " ".join(sys.argv))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#Load data config
|
||||||
|
with open(args.data_config) as fIn:
|
||||||
|
data_config = json.load(fIn)
|
||||||
|
|
||||||
|
queue = mp.Queue(maxsize=100*args.nprocs)
|
||||||
|
|
||||||
|
filepaths = []
|
||||||
|
dataset_indices = []
|
||||||
|
for idx, data in enumerate(data_config):
|
||||||
|
filepaths.append(os.path.join(os.path.expanduser(args.data_folder), data['name']))
|
||||||
|
dataset_indices.extend([idx]*data['weight'])
|
||||||
|
|
||||||
|
# Start producer
|
||||||
|
p = mp.Process(target=produce_data, args=(args, queue, filepaths, dataset_indices))
|
||||||
|
p.start()
|
||||||
|
|
||||||
|
# Run training
|
||||||
|
print("Start processes:", args.nprocs)
|
||||||
|
xmp.spawn(train_function, args=(args, queue), nprocs=args.nprocs, start_method='fork')
|
||||||
|
print("Training done")
|
||||||
|
print("It might be that not all processes exit automatically. In that case you must manually kill this process.")
|
||||||
|
print("With 'pkill python' you can kill all remaining python processes")
|
||||||
|
p.kill()
|
||||||
|
exit()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Script was called via:
|
||||||
|
#python train_many_data_files_v2.py --steps 1000000 --batch_size 128 --model nreimers/MiniLM-L6-H384-uncased train_data_configs/all_datasets_v4.json output/all_datasets_v4_MiniLM-L6-H384-uncased-batch128
|
Loading…
x
Reference in New Issue
Block a user