AhmadT198 commited on
Commit
698bcdd
1 Parent(s): 59cdc11

Exploring the GPUs inside the funciton

Browse files
Files changed (1) hide show
  1. app.py +16 -4
app.py CHANGED
@@ -1,19 +1,31 @@
1
  import spaces
2
  import gradio as gr
 
3
 
4
 
5
 
6
  # Use a pipeline as a high-level helper
7
  from transformers import pipeline
8
- pipe = pipeline("text-generation", model="NousResearch/Hermes-3-Llama-3.1-8B", max_new_tokens=200)
9
 
10
- @spaces.GPU(duration=120)
11
  def llama3_1_8B(question):
12
  messages = [
13
  {"role": "user", "content": question},
14
  ]
15
- responses = pipe(messages)
16
- return str(responses)
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  def greet(name):
19
  return "Hello " + name + "!!???"
 
1
  import spaces
2
  import gradio as gr
3
+ import torch
4
 
5
 
6
 
7
  # Use a pipeline as a high-level helper
8
  from transformers import pipeline
9
+ # pipe = pipeline("text-generation", model="NousResearch/Hermes-3-Llama-3.1-8B", max_new_tokens=200, device=0)
10
 
11
+ @spaces.GPU
12
  def llama3_1_8B(question):
13
  messages = [
14
  {"role": "user", "content": question},
15
  ]
16
+ # responses = pipe(messages)
17
+ if torch.cuda.is_available():
18
+ num_devices = torch.cuda.device_count()
19
+ print(f"Number of CUDA devices: {num_devices}")
20
+
21
+ for i in range(num_devices):
22
+ print(f"Device {i}: {torch.cuda.get_device_name(i)}")
23
+
24
+ else:
25
+ print("CUDA is not available.")
26
+
27
+
28
+ return "Hi"
29
 
30
  def greet(name):
31
  return "Hello " + name + "!!???"