Update README.md

This commit is contained in:
Mohammed Rakib 2023-01-18 11:30:56 +00:00 committed by huggingface-web
parent 2adacb01ca
commit 61ed0ae78a
1 changed files with 23 additions and 22 deletions

View File

@ -1,6 +1,14 @@
--- ---
language: language:
- en - en
license: mit
datasets:
- cuad
pipeline_tag: question-answering
tags:
- legal-contract-review
- roberta
- cuad
--- ---
# Model Card for roberta-base-on-cuad # Model Card for roberta-base-on-cuad
@ -121,27 +129,20 @@ More information needed
**BibTeX:** **BibTeX:**
``` ```
@article{DBLP:journals/corr/abs-1907-11692, @inproceedings{nawar-etal-2022-open,
author = {Yinhan Liu and title = "An Open Source Contractual Language Understanding Application Using Machine Learning",
Myle Ott and author = "Nawar, Afra and
Naman Goyal and Rakib, Mohammed and
Jingfei Du and Hai, Salma Abdul and
Mandar Joshi and Haq, Sanaulla",
Danqi Chen and booktitle = "Proceedings of the First Workshop on Language Technology and Resources for a Fair, Inclusive, and Safe Society within the 13th Language Resources and Evaluation Conference",
Omer Levy and month = jun,
Mike Lewis and year = "2022",
Luke Zettlemoyer and address = "Marseille, France",
Veselin Stoyanov}, publisher = "European Language Resources Association",
title = {RoBERTa: {A} Robustly Optimized {BERT} Pretraining Approach}, url = "https://aclanthology.org/2022.lateraisse-1.6",
journal = {CoRR}, pages = "42--50",
volume = {abs/1907.11692}, abstract = "Legal field is characterized by its exclusivity and non-transparency. Despite the frequency and relevance of legal dealings, legal documents like contracts remains elusive to non-legal professionals for the copious usage of legal jargon. There has been little advancement in making legal contracts more comprehensible. This paper presents how Machine Learning and NLP can be applied to solve this problem, further considering the challenges of applying ML to the high length of contract documents and training in a low resource environment. The largest open-source contract dataset so far, the Contract Understanding Atticus Dataset (CUAD) is utilized. Various pre-processing experiments and hyperparameter tuning have been carried out and we successfully managed to eclipse SOTA results presented for models in the CUAD dataset trained on RoBERTa-base. Our model, A-type-RoBERTa-base achieved an AUPR score of 46.6{\%} compared to 42.6{\%} on the original RoBERT-base. This model is utilized in our end to end contract understanding application which is able to take a contract and highlight the clauses a user is looking to find along with it{'}s descriptions to aid due diligence before signing. Alongside digital, i.e. searchable, contracts the system is capable of processing scanned, i.e. non-searchable, contracts using tesseract OCR. This application is aimed to not only make contract review a comprehensible process to non-legal professionals, but also to help lawyers and attorneys more efficiently review contracts.",
year = {2019},
url = {http://arxiv.org/abs/1907.11692},
archivePrefix = {arXiv},
eprint = {1907.11692},
timestamp = {Thu, 01 Aug 2019 08:59:33 +0200},
biburl = {https://dblp.org/rec/journals/corr/abs-1907-11692.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
} }
``` ```
@ -175,4 +176,4 @@ tokenizer = AutoTokenizer.from_pretrained("Rakib/roberta-base-on-cuad")
model = AutoModelForQuestionAnswering.from_pretrained("Rakib/roberta-base-on-cuad") model = AutoModelForQuestionAnswering.from_pretrained("Rakib/roberta-base-on-cuad")
``` ```
</details> </details>