from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler import torch from controlnet_aux import OpenposeDetector from diffusers.utils import load_image from gradio.themes.utils import sizes import gradio as gr theme = gr.themes.Default(radius_size=sizes.radius_none).set( block_label_text_color = '#4D63FF', block_title_text_color = '#4D63FF', button_primary_text_color = '#4D63FF', button_primary_background_fill='#FFFFFF', button_primary_border_color='#4D63FF', button_primary_background_fill_hover='#EDEFFF', ) openpose = OpenposeDetector.from_pretrained('lllyasviel/ControlNet') controlnet = ControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float16 ) pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16 ) def image2image(image, prompt): image = openpose(image) pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) pipe.enable_xformers_memory_efficient_attention() pipe.enable_model_cpu_offload() image = pipe(prompt, image, num_inference_steps=20).images[0] return image with gr.Blocks(theme=theme, css="footer {visibility: hidden}") as demo: gr.Markdown("""
根据姿势生成图片
""") with gr.Row(): with gr.Column(): image = gr.Image(label="图片", type='pil') prompt = gr.Textbox(label="提示词") with gr.Row(): button = gr.Button("提交", variant="primary") box2 = gr.Image(label="图片") button.click(fn=image2image, inputs=[image, prompt], outputs=box2) examples = gr.Examples(examples=[['pose.png', 'chef in the kitchen']], inputs=[image, prompt], label="例子") if __name__ == "__main__": demo.queue(concurrency_count=3).launch(server_name = "0.0.0.0")