Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -117,11 +117,11 @@ def model(text, web_search):
|
|
117 |
"""Performs a web search, feeds the results to a language model, and returns the answer."""
|
118 |
web_results = search(text)
|
119 |
web2 = ' '.join([f"Text: {res['text']}\n\n" for res in web_results])
|
120 |
-
formatted_prompt = system_instructions1 + text + "[WEB]" + str(web2) + "[
|
121 |
stream = client1.text_generation(formatted_prompt, max_new_tokens=512, stream=True, details=True, return_full_text=False)
|
122 |
return "".join([response.token.text for response in stream if response.token.text != "</s>"])
|
123 |
else:
|
124 |
-
formatted_prompt = system_instructions1 + text + "[
|
125 |
stream = client1.text_generation(formatted_prompt, max_new_tokens=512, stream=True, details=True, return_full_text=False)
|
126 |
return "".join([response.token.text for response in stream if response.token.text != "</s>"])
|
127 |
|
|
|
117 |
"""Performs a web search, feeds the results to a language model, and returns the answer."""
|
118 |
web_results = search(text)
|
119 |
web2 = ' '.join([f"Text: {res['text']}\n\n" for res in web_results])
|
120 |
+
formatted_prompt = system_instructions1 + text + "[WEB]" + str(web2) + "[OpenGPT 4o]"
|
121 |
stream = client1.text_generation(formatted_prompt, max_new_tokens=512, stream=True, details=True, return_full_text=False)
|
122 |
return "".join([response.token.text for response in stream if response.token.text != "</s>"])
|
123 |
else:
|
124 |
+
formatted_prompt = system_instructions1 + text + "[OpenGPT 4o]"
|
125 |
stream = client1.text_generation(formatted_prompt, max_new_tokens=512, stream=True, details=True, return_full_text=False)
|
126 |
return "".join([response.token.text for response in stream if response.token.text != "</s>"])
|
127 |
|