Update README.md
This commit is contained in:
parent
d0c106c1ce
commit
7de8833e3f
|
@ -42,16 +42,21 @@ As of December 2021, mDeBERTa-base is the best performing multilingual transform
|
|||
```python
|
||||
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
||||
import torch
|
||||
|
||||
model_name = "MoritzLaurer/mDeBERTa-v3-base-mnli-xnli"
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
||||
|
||||
premise = "Angela Merkel ist eine Politikerin in Deutschland und Vorsitzende der CDU"
|
||||
hypothesis = "Emmanuel Macron is the President of France"
|
||||
|
||||
input = tokenizer(premise, hypothesis, truncation=True, return_tensors="pt")
|
||||
output = model(input["input_ids"].to(device)) # device = "cuda:0" or "cpu"
|
||||
prediction = torch.softmax(output["logits"][0], -1).tolist()
|
||||
|
||||
label_names = ["entailment", "neutral", "contradiction"]
|
||||
prediction = {name: round(float(pred) * 100, 1) for pred, name in zip(prediction, label_names)}
|
||||
|
||||
print(prediction)
|
||||
```
|
||||
|
||||
|
|
Loading…
Reference in New Issue