Csplk commited on
Commit
6c84d1d
ยท
verified ยท
1 Parent(s): a8baec5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -9
app.py CHANGED
@@ -6,13 +6,17 @@ from threading import Thread
6
  from transformers import TextIteratorStreamer, AutoTokenizer, AutoModelForCausalLM
7
  from PIL import ImageDraw
8
  from torchvision.transforms.v2 import Resize
9
- import subprocess
10
 
11
  #subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
12
 
13
  #subprocess.run('cp -r moondream/torch clients/python/moondream/torch')
14
  #subprocess.run('pip install moondream[gpu]')
15
 
 
 
 
 
16
  #def load_moondream():
17
  # """Load Moondream model and tokenizer."""
18
  # model = AutoModelForCausalLM.from_pretrained(
@@ -21,14 +25,6 @@ import subprocess
21
  # tokenizer = AutoTokenizer.from_pretrained("vikhyatk/moondream2")
22
  # return model, tokenizer
23
 
24
- """Load Moondream model and tokenizer."""
25
- moondream = AutoModelForCausalLM.from_pretrained(
26
- "vikhyatk/moondream2", trust_remote_code=True, device_map={"": "cuda"}
27
- )
28
- tokenizer = AutoTokenizer.from_pretrained("vikhyatk/moondream2")
29
-
30
- #model_id = "vikhyatk/moondream2"
31
- #revision = "2025-01-09"
32
  #tokenizer = AutoTokenizer.from_pretrained(model_id, revision=revision)
33
  #moondream = AutoModelForCausalLM.from_pretrained(
34
  # model_id, trust_remote_code=True, revision=revision,
@@ -37,6 +33,15 @@ tokenizer = AutoTokenizer.from_pretrained("vikhyatk/moondream2")
37
 
38
  #moondream.eval()
39
 
 
 
 
 
 
 
 
 
 
40
  @spaces.GPU(durtion="150")
41
  def answer_questions(image_tuples, prompt_text):
42
  result = ""
 
6
  from transformers import TextIteratorStreamer, AutoTokenizer, AutoModelForCausalLM
7
  from PIL import ImageDraw
8
  from torchvision.transforms.v2 import Resize
9
+ #import subprocess
10
 
11
  #subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
12
 
13
  #subprocess.run('cp -r moondream/torch clients/python/moondream/torch')
14
  #subprocess.run('pip install moondream[gpu]')
15
 
16
+
17
+ #model_id = "vikhyatk/moondream2"
18
+ #revision = "2025-01-09"
19
+
20
  #def load_moondream():
21
  # """Load Moondream model and tokenizer."""
22
  # model = AutoModelForCausalLM.from_pretrained(
 
25
  # tokenizer = AutoTokenizer.from_pretrained("vikhyatk/moondream2")
26
  # return model, tokenizer
27
 
 
 
 
 
 
 
 
 
28
  #tokenizer = AutoTokenizer.from_pretrained(model_id, revision=revision)
29
  #moondream = AutoModelForCausalLM.from_pretrained(
30
  # model_id, trust_remote_code=True, revision=revision,
 
33
 
34
  #moondream.eval()
35
 
36
+ """Load Moondream model and tokenizer."""
37
+ tokenizer = AutoTokenizer.from_pretrained("vikhyatk/moondream2")
38
+ moondream = AutoModelForCausalLM.from_pretrained(
39
+ "vikhyatk/moondream2",
40
+ revision="2025-01-09",
41
+ trust_remote_code=True,
42
+ device_map={"": "cuda"},
43
+ )
44
+
45
  @spaces.GPU(durtion="150")
46
  def answer_questions(image_tuples, prompt_text):
47
  result = ""