Update app.py
Browse files
app.py
CHANGED
@@ -33,14 +33,9 @@ h1 {
|
|
33 |
}
|
34 |
"""
|
35 |
|
36 |
-
|
37 |
-
MODEL_ID,
|
38 |
-
torch_dtype=torch.bfloat16,
|
39 |
-
low_cpu_mem_usage=True,
|
40 |
-
trust_remote_code=True
|
41 |
-
)
|
42 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
|
43 |
-
|
44 |
|
45 |
|
46 |
def extract_text(path):
|
@@ -102,6 +97,14 @@ def mode_load(path):
|
|
102 |
|
103 |
|
104 |
@spaces.GPU()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
def stream_chat(message, history: list, temperature: float, max_length: int, top_p: float, top_k: int, penalty: float):
|
106 |
print(f'message is - {message}')
|
107 |
print(f'history is - {history}')
|
|
|
33 |
}
|
34 |
"""
|
35 |
|
36 |
+
|
|
|
|
|
|
|
|
|
|
|
37 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
|
38 |
+
|
39 |
|
40 |
|
41 |
def extract_text(path):
|
|
|
97 |
|
98 |
|
99 |
@spaces.GPU()
|
100 |
+
|
101 |
+
model = AutoModelForCausalLM.from_pretrained(
|
102 |
+
MODEL_ID,
|
103 |
+
torch_dtype=torch.bfloat16,
|
104 |
+
low_cpu_mem_usage=True,
|
105 |
+
trust_remote_code=True
|
106 |
+
)
|
107 |
+
model.eval()
|
108 |
def stream_chat(message, history: list, temperature: float, max_length: int, top_p: float, top_k: int, penalty: float):
|
109 |
print(f'message is - {message}')
|
110 |
print(f'history is - {history}')
|