from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler import torch from controlnet_aux import OpenposeDetector from diffusers.utils import load_image from gradio.themes.utils import sizes import gradio as gr import cv2 import numpy as np from PIL import Image theme = gr.themes.Default(radius_size=sizes.radius_none).set( block_label_text_color = '#4D63FF', block_title_text_color = '#4D63FF', button_primary_text_color = '#4D63FF', button_primary_background_fill='#FFFFFF', button_primary_border_color='#4D63FF', button_primary_background_fill_hover='#EDEFFF', ) controlnet = ControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16 ) pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16 ) def image2image(image, prompt): #image = np.array(image) low_threshold = 100 high_threshold = 200 image = cv2.Canny(image, low_threshold, high_threshold) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) image = Image.fromarray(image) pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) pipe.enable_xformers_memory_efficient_attention() pipe.enable_model_cpu_offload() image = pipe(prompt, image, num_inference_steps=20).images[0] return image with gr.Blocks(theme=theme, css="footer {visibility: hidden}") as demo: gr.Markdown("""
根据边缘生成图片
""") with gr.Row(): with gr.Column(): image = gr.Image(label="图片", type='numpy') prompt = gr.Textbox(label="提示词") with gr.Row(): button = gr.Button("提交", variant="primary") box2 = gr.Image(label="图片") button.click(fn=image2image, inputs=[image, prompt], outputs=box2) examples = gr.Examples(examples=[['bird.png', 'bird']], inputs=[image, prompt], label="例子") if __name__ == "__main__": demo.queue(concurrency_count=3).launch(server_name = "0.0.0.0")