|
|
||
|---|---|---|
| .gitattributes | ||
| README.md | ||
| config.json | ||
| flax_model.msgpack | ||
| merges.txt | ||
| pytorch_model.bin | ||
| tf_model.h5 | ||
| tokenizer.json | ||
| vocab.json | ||
README.md
| license | widget | |||
|---|---|---|---|---|
| mit |
|
roberta-large-mnli
Trained by Facebook, original source
@article{liu2019roberta,
title = {RoBERTa: A Robustly Optimized BERT Pretraining Approach},
author = {Yinhan Liu and Myle Ott and Naman Goyal and Jingfei Du and
Mandar Joshi and Danqi Chen and Omer Levy and Mike Lewis and
Luke Zettlemoyer and Veselin Stoyanov},
journal={arXiv preprint arXiv:1907.11692},
year = {2019},
}