Compare commits
10 Commits
37e66a5330
...
18fc720063
Author | SHA1 | Date |
---|---|---|
|
18fc720063 | |
|
12dcbc386c | |
|
940b797366 | |
|
8f9f671376 | |
|
2739dcfdcb | |
|
313db1705b | |
|
eb6946e047 | |
|
d84dc57432 | |
|
cb5c300720 | |
|
2e0d7d7875 |
|
@ -6,3 +6,4 @@
|
||||||
*.tar.gz filter=lfs diff=lfs merge=lfs -text
|
*.tar.gz filter=lfs diff=lfs merge=lfs -text
|
||||||
*.ot filter=lfs diff=lfs merge=lfs -text
|
*.ot filter=lfs diff=lfs merge=lfs -text
|
||||||
*.onnx filter=lfs diff=lfs merge=lfs -text
|
*.onnx filter=lfs diff=lfs merge=lfs -text
|
||||||
|
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
||||||
|
|
|
@ -0,0 +1,7 @@
|
||||||
|
{
|
||||||
|
"word_embedding_dimension": 768,
|
||||||
|
"pooling_mode_cls_token": false,
|
||||||
|
"pooling_mode_mean_tokens": true,
|
||||||
|
"pooling_mode_max_tokens": false,
|
||||||
|
"pooling_mode_mean_sqrt_len_tokens": false
|
||||||
|
}
|
|
@ -0,0 +1,108 @@
|
||||||
|
---
|
||||||
|
pipeline_tag: sentence-similarity
|
||||||
|
tags:
|
||||||
|
- sentence-transformers
|
||||||
|
- feature-extraction
|
||||||
|
- sentence-similarity
|
||||||
|
- transformers
|
||||||
|
license: apache-2.0
|
||||||
|
---
|
||||||
|
**⚠️ This model is deprecated. Please don't use it as it produces sentence embeddings of low quality. You can find recommended sentence embedding models here: [SBERT.net - Pretrained Models](https://www.sbert.net/docs/pretrained_models.html)**
|
||||||
|
|
||||||
|
# sentence-transformers/bert-base-nli-mean-tokens
|
||||||
|
|
||||||
|
This is a [sentence-transformers](https://www.SBERT.net) model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Usage (Sentence-Transformers)
|
||||||
|
|
||||||
|
Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed:
|
||||||
|
|
||||||
|
```
|
||||||
|
pip install -U sentence-transformers
|
||||||
|
```
|
||||||
|
|
||||||
|
Then you can use the model like this:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from sentence_transformers import SentenceTransformer
|
||||||
|
sentences = ["This is an example sentence", "Each sentence is converted"]
|
||||||
|
|
||||||
|
model = SentenceTransformer('sentence-transformers/bert-base-nli-mean-tokens')
|
||||||
|
embeddings = model.encode(sentences)
|
||||||
|
print(embeddings)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Usage (HuggingFace Transformers)
|
||||||
|
Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from transformers import AutoTokenizer, AutoModel
|
||||||
|
import torch
|
||||||
|
|
||||||
|
|
||||||
|
#Mean Pooling - Take attention mask into account for correct averaging
|
||||||
|
def mean_pooling(model_output, attention_mask):
|
||||||
|
token_embeddings = model_output[0] #First element of model_output contains all token embeddings
|
||||||
|
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
|
||||||
|
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
|
||||||
|
|
||||||
|
|
||||||
|
# Sentences we want sentence embeddings for
|
||||||
|
sentences = ['This is an example sentence', 'Each sentence is converted']
|
||||||
|
|
||||||
|
# Load model from HuggingFace Hub
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/bert-base-nli-mean-tokens')
|
||||||
|
model = AutoModel.from_pretrained('sentence-transformers/bert-base-nli-mean-tokens')
|
||||||
|
|
||||||
|
# Tokenize sentences
|
||||||
|
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
|
||||||
|
|
||||||
|
# Compute token embeddings
|
||||||
|
with torch.no_grad():
|
||||||
|
model_output = model(**encoded_input)
|
||||||
|
|
||||||
|
# Perform pooling. In this case, max pooling.
|
||||||
|
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
|
||||||
|
|
||||||
|
print("Sentence embeddings:")
|
||||||
|
print(sentence_embeddings)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Evaluation Results
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
For an automated evaluation of this model, see the *Sentence Embeddings Benchmark*: [https://seb.sbert.net](https://seb.sbert.net?model_name=sentence-transformers/bert-base-nli-mean-tokens)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Full Model Architecture
|
||||||
|
```
|
||||||
|
SentenceTransformer(
|
||||||
|
(0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: BertModel
|
||||||
|
(1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False})
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Citing & Authors
|
||||||
|
|
||||||
|
This model was trained by [sentence-transformers](https://www.sbert.net/).
|
||||||
|
|
||||||
|
If you find this model helpful, feel free to cite our publication [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](https://arxiv.org/abs/1908.10084):
|
||||||
|
```bibtex
|
||||||
|
@inproceedings{reimers-2019-sentence-bert,
|
||||||
|
title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
|
||||||
|
author = "Reimers, Nils and Gurevych, Iryna",
|
||||||
|
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
|
||||||
|
month = "11",
|
||||||
|
year = "2019",
|
||||||
|
publisher = "Association for Computational Linguistics",
|
||||||
|
url = "http://arxiv.org/abs/1908.10084",
|
||||||
|
}
|
||||||
|
```
|
|
@ -1,4 +1,5 @@
|
||||||
{
|
{
|
||||||
|
"_name_or_path": "old_models/bert-base-nli-mean-tokens/0_BERT",
|
||||||
"architectures": [
|
"architectures": [
|
||||||
"BertModel"
|
"BertModel"
|
||||||
],
|
],
|
||||||
|
@ -15,6 +16,9 @@
|
||||||
"num_attention_heads": 12,
|
"num_attention_heads": 12,
|
||||||
"num_hidden_layers": 12,
|
"num_hidden_layers": 12,
|
||||||
"pad_token_id": 0,
|
"pad_token_id": 0,
|
||||||
|
"position_embedding_type": "absolute",
|
||||||
|
"transformers_version": "4.7.0",
|
||||||
"type_vocab_size": 2,
|
"type_vocab_size": 2,
|
||||||
|
"use_cache": true,
|
||||||
"vocab_size": 30522
|
"vocab_size": 30522
|
||||||
}
|
}
|
|
@ -0,0 +1,7 @@
|
||||||
|
{
|
||||||
|
"__version__": {
|
||||||
|
"sentence_transformers": "2.0.0",
|
||||||
|
"transformers": "4.7.0",
|
||||||
|
"pytorch": "1.9.0+cu102"
|
||||||
|
}
|
||||||
|
}
|
Binary file not shown.
|
@ -0,0 +1,14 @@
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"idx": 0,
|
||||||
|
"name": "0",
|
||||||
|
"path": "",
|
||||||
|
"type": "sentence_transformers.models.Transformer"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"idx": 1,
|
||||||
|
"name": "1",
|
||||||
|
"path": "1_Pooling",
|
||||||
|
"type": "sentence_transformers.models.Pooling"
|
||||||
|
}
|
||||||
|
]
|
BIN
pytorch_model.bin (Stored with Git LFS)
BIN
pytorch_model.bin (Stored with Git LFS)
Binary file not shown.
Binary file not shown.
|
@ -1,3 +1,4 @@
|
||||||
{
|
{
|
||||||
"max_seq_length": 128
|
"max_seq_length": 128,
|
||||||
|
"do_lower_case": false
|
||||||
}
|
}
|
|
@ -0,0 +1 @@
|
||||||
|
{"unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]"}
|
Binary file not shown.
File diff suppressed because one or more lines are too long
|
@ -0,0 +1 @@
|
||||||
|
{"do_lower_case": true, "unk_token": "[UNK]", "sep_token": "[SEP]", "pad_token": "[PAD]", "cls_token": "[CLS]", "mask_token": "[MASK]", "tokenize_chinese_chars": true, "strip_accents": null, "special_tokens_map_file": "old_models/bert-base-nli-mean-tokens/0_BERT/special_tokens_map.json", "name_or_path": "old_models/bert-base-nli-mean-tokens/0_BERT", "do_basic_tokenize": true, "never_split": null}
|
Loading…
Reference in New Issue