diff --git a/README.md b/README.md
new file mode 100644
index 0000000..c792cf2
--- /dev/null
+++ b/README.md
@@ -0,0 +1,120 @@
+---
+datasets:
+- squad_v2
+---
+
+# roberta-base for QA
+
+NOTE: This is version 2 of the model. See [this github issue](https://github.com/deepset-ai/FARM/issues/552) from the FARM repository for an explanation of why we updated.
+
+## Overview
+**Language model:** roberta-base
+**Language:** English
+**Downstream-task:** Extractive QA
+**Training data:** SQuAD 2.0
+**Eval data:** SQuAD 2.0
+**Code:** See [example](https://github.com/deepset-ai/FARM/blob/master/examples/question_answering.py) in [FARM](https://github.com/deepset-ai/FARM/blob/master/examples/question_answering.py)
+**Infrastructure**: 4x Tesla v100
+
+## Hyperparameters
+
+```
+batch_size = 96
+n_epochs = 2
+base_LM_model = "roberta-base"
+max_seq_len = 386
+learning_rate = 3e-5
+lr_schedule = LinearWarmup
+warmup_proportion = 0.2
+doc_stride=128
+max_query_length=64
+```
+
+## Performance
+Evaluated on the SQuAD 2.0 dev set with the [official eval script](https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/).
+
+```
+"exact": 79.97136359807968
+"f1": 83.00449234495325
+
+"total": 11873
+"HasAns_exact": 78.03643724696356
+"HasAns_f1": 84.11139298441825
+"HasAns_total": 5928
+"NoAns_exact": 81.90075693860386
+"NoAns_f1": 81.90075693860386
+"NoAns_total": 5945
+```
+
+## Usage
+
+### In Transformers
+```python
+from transformers.pipelines import pipeline
+from transformers.modeling_auto import AutoModelForQuestionAnswering
+from transformers.tokenization_auto import AutoTokenizer
+
+model_name = "deepset/roberta-base-squad2-v2"
+
+# a) Get predictions
+nlp = pipeline('question-answering', model=model_name, tokenizer=model_name)
+QA_input = {
+ 'question': 'Why is model conversion important?',
+ 'context': 'The option to convert models between FARM and transformers gives freedom to the user and let people easily switch between frameworks.'
+}
+res = nlp(QA_input)
+
+# b) Load model & tokenizer
+model = AutoModelForQuestionAnswering.from_pretrained(model_name)
+tokenizer = AutoTokenizer.from_pretrained(model_name)
+```
+
+### In FARM
+
+```python
+from farm.modeling.adaptive_model import AdaptiveModel
+from farm.modeling.tokenization import Tokenizer
+from farm.infer import Inferencer
+
+model_name = "deepset/roberta-base-squad2-v2"
+
+# a) Get predictions
+nlp = Inferencer.load(model_name, task_type="question_answering")
+QA_input = [{"questions": ["Why is model conversion important?"],
+ "text": "The option to convert models between FARM and transformers gives freedom to the user and let people easily switch between frameworks."}]
+res = nlp.inference_from_dicts(dicts=QA_input, rest_api_schema=True)
+
+# b) Load model & tokenizer
+model = AdaptiveModel.convert_from_transformers(model_name, device="cpu", task_type="question_answering")
+tokenizer = Tokenizer.load(model_name)
+```
+
+### In haystack
+For doing QA at scale (i.e. many docs instead of single paragraph), you can load the model also in [haystack](https://github.com/deepset-ai/haystack/):
+```python
+reader = FARMReader(model_name_or_path="deepset/roberta-base-squad2")
+# or
+reader = TransformersReader(model="deepset/roberta-base-squad2",tokenizer="deepset/roberta-base-squad2")
+```
+
+
+## Authors
+Branden Chan: `branden.chan [at] deepset.ai`
+Timo Möller: `timo.moeller [at] deepset.ai`
+Malte Pietsch: `malte.pietsch [at] deepset.ai`
+Tanay Soni: `tanay.soni [at] deepset.ai`
+
+## About us
+
+
+We bring NLP to the industry via open source!
+Our focus: Industry specific language models & large scale QA systems.
+
+Some of our work:
+- [German BERT (aka "bert-base-german-cased")](https://deepset.ai/german-bert)
+- [FARM](https://github.com/deepset-ai/FARM)
+- [Haystack](https://github.com/deepset-ai/haystack/)
+
+Get in touch:
+[Twitter](https://twitter.com/deepset_ai) | [LinkedIn](https://www.linkedin.com/company/deepset-ai/) | [Website](https://deepset.ai)
+
diff --git a/config.json b/config.json
index fa0c801..a1f66a8 100644
--- a/config.json
+++ b/config.json
@@ -5,6 +5,7 @@
"attention_probs_dropout_prob": 0.1,
"bos_token_id": 0,
"eos_token_id": 2,
+ "gradient_checkpointing": false,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
@@ -17,7 +18,6 @@
"name": "Roberta",
"num_attention_heads": 12,
"num_hidden_layers": 12,
- "output_past": true,
"pad_token_id": 1,
"type_vocab_size": 1,
"vocab_size": 50265
diff --git a/pytorch_model.bin b/pytorch_model.bin
index ccba5f4..fe36f7e 100644
--- a/pytorch_model.bin
+++ b/pytorch_model.bin
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:6f38b2e5443a0aa0aab23c665a8c033f00ba9d85424a0f5c1acd8bbdf36df6ef
-size 498637366
+oid sha256:e0b64ccefc1bcb569b604baea27eb873e5482fdf6eb3ceff1fb5368397db5aed
+size 496313727
diff --git a/special_tokens_map.json b/special_tokens_map.json
index 6cd1d90..e97d199 100644
--- a/special_tokens_map.json
+++ b/special_tokens_map.json
@@ -1 +1 @@
-{"bos_token": "", "eos_token": "", "unk_token": "", "sep_token": "", "pad_token": "", "cls_token": "", "mask_token": ""}
\ No newline at end of file
+{"bos_token": {"content": "", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "sep_token": {"content": "", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "cls_token": {"content": "", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "mask_token": {"content": "", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
\ No newline at end of file
diff --git a/tokenizer_config.json b/tokenizer_config.json
index 32b043b..716c997 100644
--- a/tokenizer_config.json
+++ b/tokenizer_config.json
@@ -1 +1 @@
-{"do_lower_case": true, "max_len": 512, "bos_token": "", "eos_token": "", "unk_token": "", "sep_token": "", "pad_token": "", "cls_token": "", "mask_token": ""}
\ No newline at end of file
+{"do_lower_case": false, "model_max_length": 512, "full_tokenizer_file": null}