hubsnippetai commited on
Commit
1309060
·
verified ·
1 Parent(s): 1a15f55

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -8
app.py CHANGED
@@ -1,20 +1,21 @@
1
  import requests
 
2
 
3
  from PIL import Image
4
  from transformers import AutoProcessor, AutoModelForVision2Seq
5
 
6
 
7
- model = AutoModelForVision2Seq.from_pretrained("microsoft/kosmos-2-patch14-224")
8
- processor = AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224")
9
 
10
  # The original Kosmos-2 demo saves the image first then reload it. For some images, this will give slightly different image input and change the generation outputs.
11
 
12
  #prompt = "{question}"
13
 
14
- def describe_image(image_path, question : str):
15
- inputs = processor(text=question, images=image_path, return_tensors="pt")
16
 
17
- generated_ids = model.generate(
18
  pixel_values=inputs["pixel_values"],
19
  input_ids=inputs["input_ids"],
20
  attention_mask=inputs["attention_mask"],
@@ -23,12 +24,12 @@ def describe_image(image_path, question : str):
23
  use_cache=True,
24
  max_new_tokens=128,
25
  )
26
- generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
27
 
28
  # Specify `cleanup_and_extract=False` in order to see the raw model generation.
29
- processed_text = processor.post_process_generation(generated_text, cleanup_and_extract=False)
30
 
31
- processed_text, entities = processor.post_process_generation(generated_text)
32
 
33
  return processed_text
34
 
 
1
  import requests
2
+ import asyncio
3
 
4
  from PIL import Image
5
  from transformers import AutoProcessor, AutoModelForVision2Seq
6
 
7
 
8
+ model = await AutoModelForVision2Seq.from_pretrained("microsoft/kosmos-2-patch14-224")
9
+ processor = await AutoProcessor.from_pretrained("microsoft/kosmos-2-patch14-224")
10
 
11
  # The original Kosmos-2 demo saves the image first then reload it. For some images, this will give slightly different image input and change the generation outputs.
12
 
13
  #prompt = "{question}"
14
 
15
+ async def describe_image(image_path, question : str):
16
+ inputs = await processor(text=question, images=image_path, return_tensors="pt")
17
 
18
+ await generated_ids = model.generate(
19
  pixel_values=inputs["pixel_values"],
20
  input_ids=inputs["input_ids"],
21
  attention_mask=inputs["attention_mask"],
 
24
  use_cache=True,
25
  max_new_tokens=128,
26
  )
27
+ generated_text = await processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
28
 
29
  # Specify `cleanup_and_extract=False` in order to see the raw model generation.
30
+ processed_text = await processor.post_process_generation(generated_text, cleanup_and_extract=False)
31
 
32
+ processed_text, entities = await processor.post_process_generation(generated_text)
33
 
34
  return processed_text
35