|
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel |
|
from segment_anything import sam_model_registry, SamAutomaticMaskGenerator |
|
from PIL import Image |
|
import gradio as gr |
|
import numpy as np |
|
import requests |
|
import torch |
|
import gc |
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
|
|
|
|
|
print("[Downloading SAM Weights]") |
|
SAM_URL = "https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth" |
|
|
|
r = requests.get(SAM_URL, allow_redirects=True) |
|
|
|
print("[Writing SAM Weights]") |
|
|
|
with open("./sam_vit_h_4b8939.pth", "wb") as sam_weights: |
|
sam_weights.write(r.content) |
|
|
|
del r |
|
gc.collect() |
|
|
|
sam = sam_model_registry["vit_h"](checkpoint="./sam_vit_h_4b8939.pth").to(device) |
|
|
|
mask_generator = SamAutomaticMaskGenerator(sam) |
|
gc.collect() |
|
|
|
|
|
|
|
print("Creating ControlNet Pipeline") |
|
|
|
controlnet = ControlNetModel.from_pretrained( |
|
"mfidabel/controlnet-segment-anything", torch_dtype=torch.float16 |
|
).to(device) |
|
|
|
pipe = StableDiffusionControlNetPipeline.from_pretrained( |
|
"runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, safety_check=None |
|
).to(device) |
|
|
|
|
|
|
|
title = "# 𧨠ControlNet on Segment Anything π€" |
|
description = """This is a demo on 𧨠ControlNet based on Meta's [Segment Anything Model](https://segment-anything.com/). |
|
|
|
Upload an Image, Segment it with Segment Anything, write a prompt, and generate images π€ |
|
|
|
βοΈ It takes about 20~ seconds to generate 4 samples, to get faster results, don't forget to reduce the NΒΊ Samples to 1. |
|
|
|
You can obtain the Segmentation Map of any Image through this Colab: [![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mfidabel/JAX_SPRINT_2023/blob/main/Segment_Anything_JAX_SPRINT.ipynb) |
|
|
|
A huge thanks goes out to @GoogleCloud, for providing us with powerful TPUs that enabled us to train this model; and to the @HuggingFace Team for organizing the sprint. |
|
|
|
Check out our [Model Card π§¨](https://huggingface.co/mfidabel/controlnet-segment-anything) |
|
|
|
""" |
|
|
|
about = """ |
|
# π¨βπ» About the model |
|
|
|
This [model](https://huggingface.co/mfidabel/controlnet-segment-anything) is based on the [ControlNet Model](https://huggingface.co/blog/controlnet), which allow us to generate Images using some sort of condition image. For this model, we selected the segmentation maps produced by Meta's new segmentation model called [Segment Anything Model](https://github.com/facebookresearch/segment-anything) as the condition image. We then trained the model to generate images based on the structure of the segmentation maps and the text prompts given. |
|
|
|
|
|
|
|
# πΎ About the dataset |
|
|
|
For the training, we generated a segmented dataset based on the [COYO-700M](https://huggingface.co/datasets/kakaobrain/coyo-700m) dataset. The dataset provided us with the images, and the text prompts. For the segmented images, we used [Segment Anything Model](https://github.com/facebookresearch/segment-anything). We then created 8k samples to train our model on, which isn't a lot, but as a team, we have been very busy with many other responsibilities and time constraints, which made it challenging to dedicate a lot of time to generating a larger dataset. Despite the constraints we faced, we have still managed to achieve some nice results π |
|
|
|
You can check the generated datasets below β¬οΈ |
|
- [sam-coyo-2k](https://huggingface.co/datasets/mfidabel/sam-coyo-2k) |
|
- [sam-coyo-2.5k](https://huggingface.co/datasets/mfidabel/sam-coyo-2.5k) |
|
- [sam-coyo-3k](https://huggingface.co/datasets/mfidabel/sam-coyo-3k) |
|
|
|
""" |
|
|
|
gif_html = """ <img src="https://github.com/mfidabel/JAX_SPRINT_2023/blob/8632f0fde7388d7a4fc57225c96ef3b8411b3648/EX_1.gif?raw=true" alt= ββ height="50%" class="about"> """ |
|
|
|
examples = [["photo of a futuristic dining table, high quality, tricolor", "low quality, deformed, blurry, points", "examples/condition_image_1.jpeg"], |
|
["a monochrome photo of henry cavil using a shirt, high quality", "low quality, low res, deformed", "examples/condition_image_2.jpeg"], |
|
["photo of a japanese living room, high quality, coherent", "low quality, colors, saturation, extreme brightness, blurry, low res", "examples/condition_image_3.jpeg"], |
|
["living room, detailed, high quality", "low quality, low resolution, render, oversaturated, low contrast", "examples/condition_image_4.jpeg"], |
|
["painting of the bodiam castle, Vicent Van Gogh style, Starry Night", "low quality, low resolution, render, oversaturated, low contrast", "examples/condition_image_5.jpeg"], |
|
["painting of food, olive oil can, purple wine, green cabbage, chili peppers, pablo picasso style, high quality", "low quality, low resolution, render, oversaturated, low contrast, realistic", "examples/condition_image_6.jpeg"], |
|
["Katsushika Hokusai painting of mountains, a sky and desert landscape, The Great Wave off Kanagawa style, colorful", |
|
"low quality, low resolution, render, oversaturated, low contrast, realistic", "examples/condition_image_7.jpeg"]] |
|
|
|
default_example = examples[4] |
|
|
|
examples = examples[::-1] |
|
|
|
css = "h1 { text-align: center } .about { text-align: justify; padding-left: 10%; padding-right: 10%; }" |
|
|
|
|
|
def show_anns(anns): |
|
if len(anns) == 0: |
|
return |
|
sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True) |
|
h, w = anns[0]['segmentation'].shape |
|
final_img = Image.fromarray(np.zeros((h, w, 3), dtype=np.uint8), mode="RGB") |
|
for ann in sorted_anns: |
|
m = ann['segmentation'] |
|
img = np.empty((m.shape[0], m.shape[1], 3), dtype=np.uint8) |
|
for i in range(3): |
|
img[:,:,i] = np.random.randint(255, dtype=np.uint8) |
|
final_img.paste(Image.fromarray(img, mode="RGB"), (0, 0), Image.fromarray(np.uint8(m*255))) |
|
|
|
return final_img |
|
|
|
def segment_image(image, seed = 0): |
|
|
|
np.random.seed(int(seed)) |
|
masks = mask_generator.generate(image) |
|
torch.cuda.empty_cache() |
|
|
|
map = show_anns(masks) |
|
del masks |
|
gc.collect() |
|
torch.cuda.empty_cache() |
|
return map |
|
|
|
def infer(prompts, negative_prompts, image, num_inference_steps = 50, seed = 4, num_samples = 4): |
|
try: |
|
|
|
print("Segmenting Everything") |
|
segmented_map = segment_image(image, seed) |
|
yield segmented_map, [Image.fromarray(np.zeros((512, 512, 3), dtype=np.uint8))] * num_samples |
|
|
|
rng = torch.Generator(device="cpu").manual_seed(seed) |
|
num_inference_steps = int(num_inference_steps) |
|
|
|
print(f"Generating Prompt: {prompts}Β \nNegative Prompt: {negative_prompts} \nSamples:{num_samples}") |
|
output = pipe([prompts] * num_samples, |
|
[segmented_map] * num_samples, |
|
negative_prompt = [negative_prompts] * num_samples, |
|
generator = rng, |
|
num_inference_steps = num_inference_steps) |
|
|
|
|
|
final_image = output.images |
|
del output |
|
|
|
except Exception as e: |
|
print("Error: " + str(e)) |
|
final_image = segmented_map = [np.zeros((512, 512, 3), dtype=np.uint8)] * num_samples |
|
finally: |
|
gc.collect() |
|
torch.cuda.empty_cache() |
|
yield segmented_map, final_image |
|
|
|
|
|
cond_img = gr.Image(label="Input", shape=(512, 512), value=default_example[2])\ |
|
.style(height=400) |
|
|
|
segm_img = gr.Image(label="Segmented Image", shape=(512, 512), interactive=False)\ |
|
.style(height=400) |
|
|
|
output = gr.Gallery(label="Generated images")\ |
|
.style(height=200, rows=[2], columns=[2], object_fit="contain") |
|
|
|
prompt = gr.Textbox(lines=1, label="Prompt", value=default_example[0]) |
|
negative_prompt = gr.Textbox(lines=1, label="Negative Prompt", value=default_example[1]) |
|
|
|
|
|
with gr.Blocks(css=css) as demo: |
|
with gr.Row(): |
|
with gr.Column(): |
|
|
|
gr.Markdown(title) |
|
|
|
gr.Markdown(description) |
|
|
|
with gr.Column(): |
|
|
|
gr.Markdown(gif_html) |
|
|
|
|
|
with gr.Row(variant="panel"): |
|
with gr.Column(scale=1): |
|
cond_img.render() |
|
|
|
with gr.Column(scale=1): |
|
segm_img.render() |
|
|
|
with gr.Column(scale=1): |
|
output.render() |
|
|
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
prompt.render() |
|
negative_prompt.render() |
|
|
|
with gr.Column(): |
|
with gr.Accordion("Advanced options", open=False): |
|
num_steps = gr.Slider(10, 60, 50, step=1, label="Steps") |
|
seed = gr.Slider(0, 1024, 4, step=1, label="Seed") |
|
num_samples = gr.Slider(1, 4, 4, step=1, label="NΒΊ Samples") |
|
|
|
segment_btn = gr.Button("Segment") |
|
submit = gr.Button("Segment & Generate Images") |
|
|
|
|
|
with gr.Row(): |
|
with gr.Column(): |
|
gr.Markdown("Try some of the examples below β¬οΈ") |
|
gr.Examples(examples=examples, |
|
inputs=[prompt, negative_prompt, cond_img], |
|
outputs=output, |
|
fn=infer, |
|
examples_per_page=4) |
|
|
|
with gr.Column(): |
|
gr.Markdown(about, elem_classes="about") |
|
|
|
submit.click(infer, |
|
inputs=[prompt, negative_prompt, cond_img, num_steps, seed, num_samples], |
|
outputs = [segm_img, output]) |
|
|
|
segment_btn.click(segment_image, |
|
inputs=[cond_img, seed], |
|
outputs=segm_img) |
|
|
|
demo.queue() |
|
demo.launch() |