Rathapoom commited on
Commit
a0ecec2
·
verified ·
1 Parent(s): 8ca9de9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -1
app.py CHANGED
@@ -1,3 +1,5 @@
 
 
1
  import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import os
@@ -7,11 +9,16 @@ model_name = "scb10x/llama-3-typhoon-v1.5x-70b-instruct-awq"
7
  token = os.getenv("HF_TOKEN")
8
 
9
  # Check if CUDA is available
10
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
11
 
12
  tokenizer = AutoTokenizer.from_pretrained(model_name, token=token)
13
  model = AutoModelForCausalLM.from_pretrained(model_name, token=token).to(device)
14
 
 
 
 
 
15
  def generate_text(prompt):
16
  inputs = tokenizer(prompt, return_tensors="pt").to(device)
17
  outputs = model.generate(inputs.input_ids, max_length=50)
 
1
+ !pip install --upgrade transformers
2
+
3
  import gradio as gr
4
  from transformers import AutoModelForCausalLM, AutoTokenizer
5
  import os
 
9
  token = os.getenv("HF_TOKEN")
10
 
11
  # Check if CUDA is available
12
+ device = torch.device("cuda")
13
+ torch.cuda.set_device(0) # Use the first CUDA device
14
 
15
  tokenizer = AutoTokenizer.from_pretrained(model_name, token=token)
16
  model = AutoModelForCausalLM.from_pretrained(model_name, token=token).to(device)
17
 
18
+ print(f"CUDA available: {torch.cuda.is_available()}")
19
+ print(f"Current device: {torch.cuda.current_device()}")
20
+ print(f"Device name: {torch.cuda.get_device_name(0)}")
21
+
22
  def generate_text(prompt):
23
  inputs = tokenizer(prompt, return_tensors="pt").to(device)
24
  outputs = model.generate(inputs.input_ids, max_length=50)