Aekanun commited on
Commit
b82c4ac
·
1 Parent(s): a639bfa
Files changed (1) hide show
  1. app.py +10 -6
app.py CHANGED
@@ -12,15 +12,18 @@ try:
12
  except Exception as e:
13
  print(f"Failed to install packages: {e}")
14
 
 
15
  import os
16
  os.environ['NVIDIA_VISIBLE_DEVICES'] = ''
17
 
18
  import warnings
19
  import torch
20
-
21
- torch._dynamo.config.suppress_errors = True
22
  torch._dynamo.config.verbose = False
23
 
 
 
 
24
  from transformers import TextStreamer
25
  import gradio as gr
26
  from huggingface_hub import login
@@ -34,16 +37,17 @@ if 'HUGGING_FACE_HUB_TOKEN' in os.environ:
34
  else:
35
  print("คำเตือน: ไม่พบ HUGGING_FACE_HUB_TOKEN")
36
 
37
- ###@spaces.GPU
 
38
  def model_context():
39
  _tokenizer = None
40
  _model = None
41
-
42
  def init_models():
43
  nonlocal _tokenizer, _model
44
  try:
45
  print("กำลังโหลด tokenizer...")
46
- from unsloth import FastVisionModel
47
  base_model, _tokenizer = FastVisionModel.from_pretrained(
48
  "unsloth/Llama-3.2-11B-Vision-Instruct",
49
  use_gradient_checkpointing = "unsloth"
@@ -51,7 +55,7 @@ def model_context():
51
  print("โหลด tokenizer สำเร็จ")
52
 
53
  print("กำลังโหลดโมเดล fine-tuned...")
54
- from transformers import AutoModelForVision2Seq
55
  _model = AutoModelForVision2Seq.from_pretrained(
56
  "Aekanun/Llama-3.2-11B-Vision-Instruct-XRay",
57
  load_in_4bit=True,
 
12
  except Exception as e:
13
  print(f"Failed to install packages: {e}")
14
 
15
+ # [แก้ 1] ย้าย environment variables มาไว้ก่อน imports
16
  import os
17
  os.environ['NVIDIA_VISIBLE_DEVICES'] = ''
18
 
19
  import warnings
20
  import torch
21
+ torch._dynamo.config.suppress_errors = True
 
22
  torch._dynamo.config.verbose = False
23
 
24
+ # [แก้ 2] ย้าย imports มาไว้ที่ module level
25
+ from unsloth import FastVisionModel
26
+ from transformers import AutoModelForVision2Seq
27
  from transformers import TextStreamer
28
  import gradio as gr
29
  from huggingface_hub import login
 
37
  else:
38
  print("คำเตือน: ไม่พบ HUGGING_FACE_HUB_TOKEN")
39
 
40
+ # [แก้ 3] เพิ่ม @spaces.GPU decorator
41
+ @spaces.GPU
42
  def model_context():
43
  _tokenizer = None
44
  _model = None
45
+
46
  def init_models():
47
  nonlocal _tokenizer, _model
48
  try:
49
  print("กำลังโหลด tokenizer...")
50
+ # [แก้ 4] ลบ imports ออกจาก function
51
  base_model, _tokenizer = FastVisionModel.from_pretrained(
52
  "unsloth/Llama-3.2-11B-Vision-Instruct",
53
  use_gradient_checkpointing = "unsloth"
 
55
  print("โหลด tokenizer สำเร็จ")
56
 
57
  print("กำลังโหลดโมเดล fine-tuned...")
58
+ # [แก้ 5] ลบ import ออกจาก function
59
  _model = AutoModelForVision2Seq.from_pretrained(
60
  "Aekanun/Llama-3.2-11B-Vision-Instruct-XRay",
61
  load_in_4bit=True,