Spaces:
Sleeping
Sleeping
xavierbarbier
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -32,6 +32,8 @@ model = model = GPT4All(model_name, model_path, allow_download = False, device="
|
|
32 |
|
33 |
|
34 |
# creating a pdf reader object
|
|
|
|
|
35 |
reader = PdfReader("./resource/NGAP 01042024.pdf")
|
36 |
text = []
|
37 |
for p in np.arange(0, len(reader.pages), 1):
|
@@ -65,7 +67,7 @@ index = faiss.IndexFlatL2(d)
|
|
65 |
index.add(text_embeddings)
|
66 |
|
67 |
#index = faiss.read_index("./resourse/embeddings_ngap.faiss")
|
68 |
-
|
69 |
print("Finish the model init process")
|
70 |
|
71 |
def format_chat_prompt(message, chat_history):
|
@@ -85,6 +87,8 @@ context = [
|
|
85 |
}
|
86 |
]
|
87 |
|
|
|
|
|
88 |
def respond(message, chat_history):
|
89 |
|
90 |
|
@@ -93,11 +97,13 @@ def respond(message, chat_history):
|
|
93 |
|
94 |
context.append({'role':'user', 'content':f"{prompt}"})
|
95 |
|
96 |
-
tokenized_chat = tokenizer.apply_chat_template(context, tokenize=True, add_generation_prompt=True, return_tensors="pt")
|
|
|
|
|
97 |
|
98 |
-
|
99 |
|
100 |
-
bot_message =
|
101 |
|
102 |
context.append({'role':'assistant', 'content':f"{bot_message}"})
|
103 |
|
|
|
32 |
|
33 |
|
34 |
# creating a pdf reader object
|
35 |
+
|
36 |
+
"""
|
37 |
reader = PdfReader("./resource/NGAP 01042024.pdf")
|
38 |
text = []
|
39 |
for p in np.arange(0, len(reader.pages), 1):
|
|
|
67 |
index.add(text_embeddings)
|
68 |
|
69 |
#index = faiss.read_index("./resourse/embeddings_ngap.faiss")
|
70 |
+
"""
|
71 |
print("Finish the model init process")
|
72 |
|
73 |
def format_chat_prompt(message, chat_history):
|
|
|
87 |
}
|
88 |
]
|
89 |
|
90 |
+
max_new_tokens = 2048
|
91 |
+
|
92 |
def respond(message, chat_history):
|
93 |
|
94 |
|
|
|
97 |
|
98 |
context.append({'role':'user', 'content':f"{prompt}"})
|
99 |
|
100 |
+
#tokenized_chat = tokenizer.apply_chat_template(context, tokenize=True, add_generation_prompt=True, return_tensors="pt")
|
101 |
+
|
102 |
+
#outputs = model.generate(tokenized_chat, max_new_tokens=1000, temperature = 0.0)
|
103 |
|
104 |
+
#bot_message = tokenizer.decode(outputs[0]).split("<|assistant|>")[-1].replace("</s>","")
|
105 |
|
106 |
+
bot_message = model.generate(prompt=prompt, temp=0.5, top_k = 40, top_p = 1, max_tokens = max_new_tokens, streaming=False)
|
107 |
|
108 |
context.append({'role':'assistant', 'content':f"{bot_message}"})
|
109 |
|