This commit is contained in:
songw 2023-04-11 11:33:09 +08:00
parent 75c65825b6
commit 1a89f1c782
10 changed files with 306 additions and 0 deletions

34
bart-large-mnli/app.py Normal file
View File

@ -0,0 +1,34 @@
import gradio as gr
from transformers import pipeline
sentimentPipeline = pipeline("zero-shot-classification", model="facebook/bart-large-mnli")
def sentiment_analysis(text, labels):
candidate_labels = labels.split(',')
results = sentimentPipeline(text, candidate_labels)
total_results = ""
index = 0
for candidate_label in candidate_labels:
total_results += f"Sentiment: {results.get('labels')[index]}, Score: {results.get('scores')[index]}"
total_results += '\r\n'
index += 1
return total_results
demo = gr.Interface(fn=sentiment_analysis,
inputs=[
gr.components.Textbox(label="Text"),
gr.components.Textbox(label="Label")
],
outputs='text',
examples=[['I have a problem with my iphone that needs to be resolved asap!!', 'urgent, not urgent, phone, tablet, computer'], ['Last week I upgraded my iOS version and ever since then my phone has been overheating whenever I use your app.', 'mobile, website, billing, account access']],
title = "文本情感分析"
)
if __name__ == "__main__":
demo.queue(concurrency_count=3)
demo.launch(server_name = "0.0.0.0", server_port = 7028)

27
bert-base-uncased/app.py Normal file
View File

@ -0,0 +1,27 @@
import gradio as gr
from transformers import pipeline
unmasker = pipeline('fill-mask', model='bert-base-uncased')
def sentiment_analysis(text):
results = unmasker(text)
total_results = ""
for result in results:
total_results += f"Token: {result.get('token_str')}, Score: {result.get('score')}"
total_results += '\r\n'
return total_results
demo = gr.Interface(fn=sentiment_analysis,
inputs='text',
outputs='text',
examples=[['Paris is the [MASK] of France.'], ['The goal of life is [MASK].']],
title = "文本情感分析"
)
if __name__ == "__main__":
demo.queue(concurrency_count=3)
demo.launch(server_name = "0.0.0.0", server_port = 7028)

View File

@ -0,0 +1,21 @@
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoConfig, AutoModelForSequenceClassification
modelName="finiteautomata/bertweet-base-sentiment-analysis"
sentimentPipeline = pipeline("sentiment-analysis", model=modelName)
def sentiment_analysis(text):
results = sentimentPipeline(text)
return f"Sentiment: {results[0].get('label')}, Score: {results[0].get('score'):.2f}"
demo = gr.Interface(fn=sentiment_analysis,
inputs='text',
outputs='text',
title = "文本情感分析"
)
if __name__ == "__main__":
demo.queue(concurrency_count=3)
demo.launch(server_name = "0.0.0.0", server_port = 7028)

View File

@ -0,0 +1,34 @@
import gradio as gr
from transformers import pipeline
sentimentPipeline = pipeline("zero-shot-classification", model="Narsil/deberta-large-mnli-zero-cls")
def sentiment_analysis(text, labels):
candidate_labels = labels.split(',')
results = sentimentPipeline(text, candidate_labels)
total_results = ""
index = 0
for candidate_label in candidate_labels:
total_results += f"Sentiment: {results.get('labels')[index]}, Score: {results.get('scores')[index]}"
total_results += '\r\n'
index += 1
return total_results
demo = gr.Interface(fn=sentiment_analysis,
inputs=[
gr.components.Textbox(label="Text"),
gr.components.Textbox(label="Label")
],
outputs='text',
examples=[['I have a problem with my iphone that needs to be resolved asap!!', 'urgent, not urgent, phone, tablet, computer'], ['Last week I upgraded my iOS version and ever since then my phone has been overheating whenever I use your app.', 'mobile, website, billing, account access']],
title = "文本情感分析"
)
if __name__ == "__main__":
demo.queue(concurrency_count=3)
demo.launch(server_name = "0.0.0.0", server_port = 7028)

View File

@ -0,0 +1,34 @@
import gradio as gr
from transformers import pipeline
sentimentPipeline = pipeline("zero-shot-classification", model="valhalla/distilbart-mnli-12-1")
def sentiment_analysis(text, labels):
candidate_labels = labels.split(',')
results = sentimentPipeline(text, candidate_labels)
total_results = ""
index = 0
for candidate_label in candidate_labels:
total_results += f"Sentiment: {results.get('labels')[index]}, Score: {results.get('scores')[index]}"
total_results += '\r\n'
index += 1
return total_results
demo = gr.Interface(fn=sentiment_analysis,
inputs=[
gr.components.Textbox(label="Text"),
gr.components.Textbox(label="Label")
],
outputs='text',
examples=[['I have a problem with my iphone that needs to be resolved asap!!', 'urgent, not urgent, phone, tablet, computer'], ['Last week I upgraded my iOS version and ever since then my phone has been overheating whenever I use your app.', 'mobile, website, billing, account access']],
title = "文本情感分析"
)
if __name__ == "__main__":
demo.queue(concurrency_count=3)
demo.launch(server_name = "0.0.0.0", server_port = 7028)

View File

@ -0,0 +1,34 @@
import gradio as gr
from transformers import pipeline
sentimentPipeline = pipeline("zero-shot-classification", model="valhalla/distilbart-mnli-12-3")
def sentiment_analysis(text, labels):
candidate_labels = labels.split(',')
results = sentimentPipeline(text, candidate_labels)
total_results = ""
index = 0
for candidate_label in candidate_labels:
total_results += f"Sentiment: {results.get('labels')[index]}, Score: {results.get('scores')[index]}"
total_results += '\r\n'
index += 1
return total_results
demo = gr.Interface(fn=sentiment_analysis,
inputs=[
gr.components.Textbox(label="Text"),
gr.components.Textbox(label="Label")
],
outputs='text',
examples=[['I have a problem with my iphone that needs to be resolved asap!!', 'urgent, not urgent, phone, tablet, computer'], ['Last week I upgraded my iOS version and ever since then my phone has been overheating whenever I use your app.', 'mobile, website, billing, account access']],
title = "文本情感分析"
)
if __name__ == "__main__":
demo.queue(concurrency_count=3)
demo.launch(server_name = "0.0.0.0", server_port = 7028)

View File

@ -0,0 +1,34 @@
import gradio as gr
from transformers import pipeline
sentimentPipeline = pipeline("zero-shot-classification", model="typeform/distilbert-base-uncased-mnli")
def sentiment_analysis(text, labels):
candidate_labels = labels.split(',')
results = sentimentPipeline(text, candidate_labels)
total_results = ""
index = 0
for candidate_label in candidate_labels:
total_results += f"Sentiment: {results.get('labels')[index]}, Score: {results.get('scores')[index]}"
total_results += '\r\n'
index += 1
return total_results
demo = gr.Interface(fn=sentiment_analysis,
inputs=[
gr.components.Textbox(label="Text"),
gr.components.Textbox(label="Label")
],
outputs='text',
examples=[['I have a problem with my iphone that needs to be resolved asap!!', 'urgent, not urgent, phone, tablet, computer'], ['Last week I upgraded my iOS version and ever since then my phone has been overheating whenever I use your app.', 'mobile, website, billing, account access']],
title = "文本情感分析"
)
if __name__ == "__main__":
demo.queue(concurrency_count=3)
demo.launch(server_name = "0.0.0.0", server_port = 7028)

View File

@ -0,0 +1,34 @@
import gradio as gr
from transformers import pipeline
sentimentPipeline = pipeline("zero-shot-classification", model="cross-encoder/nli-distilroberta-base")
def sentiment_analysis(text, labels):
candidate_labels = labels.split(',')
results = sentimentPipeline(text, candidate_labels)
total_results = ""
index = 0
for candidate_label in candidate_labels:
total_results += f"Sentiment: {results.get('labels')[index]}, Score: {results.get('scores')[index]}"
total_results += '\r\n'
index += 1
return total_results
demo = gr.Interface(fn=sentiment_analysis,
inputs=[
gr.components.Textbox(label="Text"),
gr.components.Textbox(label="Label")
],
outputs='text',
examples=[['Apple just announced the newest iPhone X', 'technology, sports, politics'], ['Last week I upgraded my iOS version and ever since then my phone has been overheating whenever I use your app.', 'mobile, website, billing, account access']],
title = "文本情感分析"
)
if __name__ == "__main__":
demo.queue(concurrency_count=3)
demo.launch(server_name = "0.0.0.0", server_port = 7028)

33
roberta-large-mnli/app.py Normal file
View File

@ -0,0 +1,33 @@
import gradio as gr
from transformers import pipeline
sentimentPipeline = pipeline('zero-shot-classification', model='roberta-large-mnli')
def sentiment_analysis(text, labels):
candidate_labels = labels.split(',')
results = sentimentPipeline(text, candidate_labels)
total_results = ""
index = 0
for candidate_label in candidate_labels:
total_results += f"Sentiment: {results.get('labels')[index]}, Score: {results.get('scores')[index]}"
total_results += '\r\n'
index += 1
return total_results
demo = gr.Interface(fn=sentiment_analysis,
inputs=[
gr.components.Textbox(label="Text"),
gr.components.Textbox(label="Label")
],
outputs='text',
examples=[['I am happy', 'negative, netural, positive'], ['I am sad', 'negative, netural, positive']],
title = "文本情感分析"
)
if __name__ == "__main__":
demo.queue(concurrency_count=3)
demo.launch(server_name = "0.0.0.0", server_port = 7028)

View File

@ -0,0 +1,21 @@
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoConfig, AutoModelForSequenceClassification
modelName="papluca/xlm-roberta-base-language-detection"
sentimentPipeline = pipeline("sentiment-analysis", modelName)
def sentiment_analysis(text):
results = sentimentPipeline(text)
return results
#return f"Sentiment: {results[0].get('label')}, Score: {results[0].get('score'):.2f}"
demo = gr.Interface(fn=sentiment_analysis,
inputs='text',
outputs='text',
title = "语种分类"
)
if __name__ == "__main__":
demo.queue(concurrency_count=3)
demo.launch(server_name = "0.0.0.0", server_port = 7028)