Update app.py
Browse files
app.py
CHANGED
@@ -12,13 +12,13 @@ from share_btn import community_icon_html, loading_icon_html, share_js
|
|
12 |
model_id = "riffusion/riffusion-model-v1"
|
13 |
blip_model_id = "Salesforce/blip-image-captioning-base"
|
14 |
pipe = StableDiffusionPipeline.from_pretrained(model_id)
|
15 |
-
pipe = pipe.to("
|
16 |
|
17 |
blip_model = BlipForConditionalGeneration.from_pretrained(blip_model_id, torch_dtype=torch.float16).to("cuda")
|
18 |
processor = BlipProcessor.from_pretrained(blip_model_id)
|
19 |
|
20 |
def predict(image):
|
21 |
-
inputs = processor(image, return_tensors="pt").to("
|
22 |
output_blip = blip_model.generate(**inputs)
|
23 |
prompt = processor.decode(output_blip[0], skip_special_tokens=True)
|
24 |
|
|
|
12 |
model_id = "riffusion/riffusion-model-v1"
|
13 |
blip_model_id = "Salesforce/blip-image-captioning-base"
|
14 |
pipe = StableDiffusionPipeline.from_pretrained(model_id)
|
15 |
+
pipe = pipe.to("cpu") #cuda
|
16 |
|
17 |
blip_model = BlipForConditionalGeneration.from_pretrained(blip_model_id, torch_dtype=torch.float16).to("cuda")
|
18 |
processor = BlipProcessor.from_pretrained(blip_model_id)
|
19 |
|
20 |
def predict(image):
|
21 |
+
inputs = processor(image, return_tensors="pt").to("cpu", torch.float16) #cuda
|
22 |
output_blip = blip_model.generate(**inputs)
|
23 |
prompt = processor.decode(output_blip[0], skip_special_tokens=True)
|
24 |
|