Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -67,20 +67,20 @@ def getModelPath(language):
|
|
67 |
return(path)
|
68 |
|
69 |
def inference(input_img,strategy,language):
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
|
85 |
|
86 |
description = "A pipeline of BLIP image captioning and Helsinki translation in order to generate image captions in a language of your choice either with beam search (deterministic) or nucleus sampling (stochastic). Enjoy! Is the language you want to use missing? Let me know and I'll integrate it."
|
|
|
67 |
return(path)
|
68 |
|
69 |
def inference(input_img,strategy,language):
|
70 |
+
image = transform(input_img).unsqueeze(0).to(device)
|
71 |
+
with torch.no_grad():
|
72 |
+
if strategy == "Beam search":
|
73 |
+
cap = model.generate(image, sample=False, num_beams=3, max_length=20, min_length=5)
|
74 |
+
else:
|
75 |
+
cap = model.generate(image, sample=True, top_p=0.9, max_length=20, min_length=5)
|
76 |
+
modelpath = getModelPath(language)
|
77 |
+
if modelpath:
|
78 |
+
translator = pipeline("translation", model=modelpath)
|
79 |
+
trans_cap = translator(cap[0])
|
80 |
+
tc = trans_cap[0]['translation_text']
|
81 |
+
return str(tc)
|
82 |
+
else:
|
83 |
+
return str(cap[0])
|
84 |
|
85 |
|
86 |
description = "A pipeline of BLIP image captioning and Helsinki translation in order to generate image captions in a language of your choice either with beam search (deterministic) or nucleus sampling (stochastic). Enjoy! Is the language you want to use missing? Let me know and I'll integrate it."
|