Update README.md
This commit is contained in:
parent
699e99a6d9
commit
f7f5b86451
|
@ -73,19 +73,12 @@ For more code examples, we refer to the [documentation](https://huggingface.co/t
|
||||||
```bibtex
|
```bibtex
|
||||||
misc{https://doi.org/10.48550/arxiv.2203.12602,
|
misc{https://doi.org/10.48550/arxiv.2203.12602,
|
||||||
doi = {10.48550/ARXIV.2203.12602},
|
doi = {10.48550/ARXIV.2203.12602},
|
||||||
|
|
||||||
url = {https://arxiv.org/abs/2203.12602},
|
url = {https://arxiv.org/abs/2203.12602},
|
||||||
|
|
||||||
author = {Tong, Zhan and Song, Yibing and Wang, Jue and Wang, Limin},
|
author = {Tong, Zhan and Song, Yibing and Wang, Jue and Wang, Limin},
|
||||||
|
|
||||||
keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences},
|
keywords = {Computer Vision and Pattern Recognition (cs.CV), FOS: Computer and information sciences, FOS: Computer and information sciences},
|
||||||
|
|
||||||
title = {VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training},
|
title = {VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training},
|
||||||
|
|
||||||
publisher = {arXiv},
|
publisher = {arXiv},
|
||||||
|
|
||||||
year = {2022},
|
year = {2022},
|
||||||
|
|
||||||
copyright = {Creative Commons Attribution 4.0 International}
|
copyright = {Creative Commons Attribution 4.0 International}
|
||||||
}
|
}
|
||||||
```
|
```
|
Loading…
Reference in New Issue