linoyts HF staff commited on
Commit
6e0da6e
1 Parent(s): 2d98527

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -13,6 +13,7 @@ from diffusers import StableDiffusionPipeline
13
  from diffusers import DDIMScheduler
14
  from transformers import AutoProcessor, BlipForConditionalGeneration
15
  from share_btn import community_icon_html, loading_icon_html, share_js
 
16
 
17
  # load pipelines
18
  sd_model_id = "stabilityai/stable-diffusion-2-1-base"
@@ -23,8 +24,7 @@ sem_pipe = SemanticStableDiffusionPipeline.from_pretrained(sd_model_id, torch_dt
23
  blip_processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
24
  blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base",torch_dtype=torch.float16).to(device)
25
 
26
-
27
-
28
  ## IMAGE CPATIONING ##
29
  def caption_image(input_image):
30
  inputs = blip_processor(images=input_image, return_tensors="pt").to(device, torch.float16)
 
13
  from diffusers import DDIMScheduler
14
  from transformers import AutoProcessor, BlipForConditionalGeneration
15
  from share_btn import community_icon_html, loading_icon_html, share_js
16
+ import spaces
17
 
18
  # load pipelines
19
  sd_model_id = "stabilityai/stable-diffusion-2-1-base"
 
24
  blip_processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
25
  blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base",torch_dtype=torch.float16).to(device)
26
 
27
+ @spaces.GPU
 
28
  ## IMAGE CPATIONING ##
29
  def caption_image(input_image):
30
  inputs = blip_processor(images=input_image, return_tensors="pt").to(device, torch.float16)