Commit
·
7e21c92
1
Parent(s):
0a54fdf
Update app.py
Browse files
app.py
CHANGED
@@ -11,15 +11,15 @@ st.markdown("<h6 style='text-align: center; color: #489DDB;'>by Bryan Mildort</h
|
|
11 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
12 |
# from accelerate import infer_auto_device_map
|
13 |
import torch
|
14 |
-
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
15 |
-
device_str = f"""Device being used: {device}"""
|
16 |
-
st.write(device_str)
|
17 |
# device_map = infer_auto_device_map(model, dtype="float16")
|
18 |
# st.write(device_map)
|
19 |
|
20 |
model = AutoModelForCausalLM.from_pretrained("bryanmildort/gpt_neo_notes_summary", low_cpu_mem_usage=True)
|
21 |
tokenizer = AutoTokenizer.from_pretrained("bryanmildort/gpt_neo_notes_summary")
|
22 |
-
model = model.to(device)
|
23 |
|
24 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
25 |
|
|
|
11 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
12 |
# from accelerate import infer_auto_device_map
|
13 |
import torch
|
14 |
+
# device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
15 |
+
# device_str = f"""Device being used: {device}"""
|
16 |
+
# st.write(device_str)
|
17 |
# device_map = infer_auto_device_map(model, dtype="float16")
|
18 |
# st.write(device_map)
|
19 |
|
20 |
model = AutoModelForCausalLM.from_pretrained("bryanmildort/gpt_neo_notes_summary", low_cpu_mem_usage=True)
|
21 |
tokenizer = AutoTokenizer.from_pretrained("bryanmildort/gpt_neo_notes_summary")
|
22 |
+
# model = model.to(device)
|
23 |
|
24 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
25 |
|