import gradio as gr from transformers import AutoTokenizer, AutoModelForSeq2SeqLM from gradio.themes.utils import sizes theme = gr.themes.Default(radius_size=sizes.radius_none).set( block_label_text_color = '#4D63FF', block_title_text_color = '#4D63FF', button_primary_text_color = '#4D63FF', button_primary_background_fill='#FFFFFF', button_primary_border_color='#4D63FF', button_primary_background_fill_hover='#EDEFFF', ) tokenizer = AutoTokenizer.from_pretrained("merve/chatgpt-prompts-bart-long") model = AutoModelForSeq2SeqLM.from_pretrained("merve/chatgpt-prompts-bart-long", from_tf=True) def generate(prompt): batch = tokenizer(prompt, return_tensors="pt") generated_ids = model.generate(batch["input_ids"], max_new_tokens=150) output = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) return output[0] with gr.Blocks(theme=theme, css="footer {visibility: hidden}") as demo: with gr.Row(): with gr.Column(): box1 = gr.Textbox(label="文本") with gr.Row(): button = gr.Button("提交", variant="primary") clear = gr.Button("清除", variant="primary") box2 = gr.Textbox(label="文本") button.click(fn=generate, inputs=box1, outputs=box2) clear.click(lambda x: gr.update(value=''), [], [box1]) examples = gr.Examples(examples=[["photographer"], ["developer"]], inputs=[box1], label="例子") if __name__ == "__main__": demo.queue(concurrency_count=3).launch(server_name = "0.0.0.0", server_port = 7020)