rodrigomasini commited on
Commit
b21da5b
·
1 Parent(s): f093c76

Update app_v4.py

Browse files
Files changed (1) hide show
  1. app_v4.py +62 -3
app_v4.py CHANGED
@@ -1,5 +1,64 @@
1
- import torch
2
  import streamlit as st
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
 
4
- viz = torch.cuda.memory_summary()
5
- st.write(viz)
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from transformers import AutoTokenizer
3
+ from auto_gptq import AutoGPTQForCausalLM
4
+ import torch
5
+ import subprocess
6
+
7
+ # Function to get memory info
8
+ def get_gpu_memory():
9
+ try:
10
+ result = subprocess.check_output(["nvidia-smi", "--query-gpu=memory.free,memory.total", "--format=csv,nounits,noheader"], text=True)
11
+ memory_info = [x.split(',') for x in result.strip().split('\n')]
12
+ memory_info = [{"free": int(x[0].strip()), "total": int(x[1].strip())} for x in memory_info]
13
+ except FileNotFoundError:
14
+ memory_info = [{"free": "N/A", "total": "N/A"}]
15
+ return memory_info
16
+
17
+ # Display GPU memory information
18
+ gpu_memory = get_gpu_memory()
19
+ st.write(f"GPU Memory Info: {gpu_memory}")
20
+
21
+ # Define pretrained model directory
22
+ pretrained_model_dir = "FPHam/Jackson_The_Formalizer_V2_13b_GPTQ"
23
+
24
+ # Check if CUDA is available and get the device
25
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
26
+
27
+ # Before allocating or loading the model, clear up memory if CUDA is available
28
+ if device == "cuda:0":
29
+ torch.cuda.empty_cache()
30
+
31
+ # Load tokenizer
32
+ tokenizer = AutoTokenizer.from_pretrained(pretrained_model_dir, use_fast=True)
33
+
34
+ # Attempt to load the model, catch any OOM errors
35
+ try:
36
+ model = AutoGPTQForCausalLM.from_quantized(
37
+ pretrained_model_dir,
38
+ model_basename="Jackson2-4bit-128g-GPTQ",
39
+ use_safetensors=True,
40
+ device=device,
41
+ max_memory={0: "10GIB"}
42
+ )
43
+ except RuntimeError as e:
44
+ if 'CUDA out of memory' in str(e):
45
+ st.error("CUDA out of memory. Try reducing the model size or input length.")
46
+ st.stop()
47
+ else:
48
+ raise e
49
+
50
+ # User input for the model
51
+ user_input = st.text_input("Input a phrase")
52
 
53
+ # Generate button
54
+ if st.button("Generate the prompt"):
55
+ try:
56
+ prompt_template = f'USER: {user_input}\nASSISTANT:'
57
+ inputs = tokenizer(prompt_template, return_tensors='pt', max_length=512, truncation=True, padding='max_length')
58
+ output = model.generate(**inputs)
59
+ st.markdown(f"**Generated Text:**\n{tokenizer.decode(output[0])}")
60
+ except RuntimeError as e:
61
+ if 'CUDA out of memory' in str(e):
62
+ st.error("CUDA out of memory during generation. Try reducing the input length.")
63
+ else:
64
+ raise e