Update app.py
Browse files
app.py
CHANGED
@@ -111,9 +111,9 @@ def respond(
|
|
111 |
print(f"Processor: {uname.processor}")
|
112 |
|
113 |
# GPU Information
|
114 |
-
gpu_stats = gpustat.GPUStatCollection.new_query()
|
115 |
-
for gpu in gpu_stats:
|
116 |
-
|
117 |
messages = [{"role": "user", "content": "Hello, how are you?"}]
|
118 |
input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to('cuda')
|
119 |
## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
|
|
|
111 |
print(f"Processor: {uname.processor}")
|
112 |
|
113 |
# GPU Information
|
114 |
+
gpu_stats = gpustat.GPUStatCollection.new_query()
|
115 |
+
for gpu in gpu_stats:
|
116 |
+
print(f"GPU: {gpu.name} Mem Free: {get_size(gpu.memory_free)} Mem Used: {get_size(gpu.memory_used)} Mem Total: {get_size(gpu.memory_total)}")
|
117 |
messages = [{"role": "user", "content": "Hello, how are you?"}]
|
118 |
input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to('cuda')
|
119 |
## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
|