adirik commited on
Commit
066732d
1 Parent(s): 018e4d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -11
app.py CHANGED
@@ -50,17 +50,9 @@ def query_image(img, text_queries, score_threshold):
50
 
51
 
52
  description = """
53
- Gradio demo for <a href="https://huggingface.co/docs/transformers/main/en/model_doc/owlvit">OWL-ViT</a>,
54
- introduced in <a href="https://arxiv.org/abs/2205.06230">Simple Open-Vocabulary Object Detection
55
- with Vision Transformers</a>.
56
- \n\nYou can use OWL-ViT to query images with text descriptions of any object.
57
- To use it, simply upload an image and enter comma separated text descriptions of objects you want to query the image for. You
58
- can also use the score threshold slider to set a threshold to filter out low probability predictions.
59
-
60
- \n\nOWL-ViT is trained on text templates,
61
- hence you can get better predictions by querying the image with text templates used in training the original model: *"photo of a star-spangled banner"*,
62
- *"image of a shoe"*. Refer to the <a href="https://arxiv.org/abs/2103.00020">CLIP</a> paper to see the full list of text templates used to augment the training data.
63
- \n\n<a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/zeroshot_object_detection_with_owlvit.ipynb">Colab demo</a>
64
  """
65
  demo = gr.Interface(
66
  query_image,
 
50
 
51
 
52
  description = """
53
+ Gradio demo for <a href="https://huggingface.co/docs/transformers/main/en/model_doc/owlvit">OWL-ViT</a>, introduced in <a href="https://arxiv.org/abs/2205.06230">Simple Open-Vocabulary Object Detection with Vision Transformers</a>.
54
+ \n\nYou can use OWL-ViT to query images with text descriptions of any object. To use it, simply upload an image and enter comma separated text descriptions of objects you want to query the image for. You can also use the score threshold slider to set a threshold to filter out low probability predictions.
55
+ \n\nOWL-ViT is trained on text templates, hence you can get better predictions by querying the image with text templates used in training the original model: *"photo of a star-spangled banner"*, *"image of a shoe"*. Refer to the <a href="https://arxiv.org/abs/2103.00020">CLIP</a> paper to see the full list of text templates used to augment the training data. \n\n<a href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/zeroshot_object_detection_with_owlvit.ipynb">Colab demo</a>
 
 
 
 
 
 
 
 
56
  """
57
  demo = gr.Interface(
58
  query_image,