Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -166,6 +166,57 @@ def calculate_similarity_score(sentences):
|
|
166 |
|
167 |
return score_out_of_100
|
168 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
169 |
|
170 |
text_list = []
|
171 |
|
@@ -284,19 +335,31 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
284 |
""")
|
285 |
|
286 |
with gr.Row():
|
287 |
-
model_name_input2 = gr.Dropdown([("Llama", "TheBloke/Llama-2-7B-Chat-GGML"), ("Falcon", "TheBloke/Falcon-180B-GGUF"), ("Zephyr", "TheBloke/zephyr-quiklang-3b-4K-GGUF"),("Vicuna", "TheBloke/vicuna-33B-GGUF"),("Claude","TheBloke/claude2-alpaca-13B-GGUF"),("Alpaca","TheBloke/LeoScorpius-GreenNode-Alpaca-7B-v1-GGUF")], label="Large Language Model")
|
|
|
288 |
with gr.Row():
|
289 |
-
prompt_input2 = gr.Textbox(label="Enter your question", lines=
|
|
|
290 |
with gr.Row():
|
291 |
-
upload_button1 = gr.
|
|
|
292 |
with gr.Row():
|
293 |
Relevance = gr.Slider(1, 100, value=70, label="Relevance", info="Choose between 0 and 100", interactive=True)
|
294 |
Diversity = gr.Slider(1, 100, value=25, label="Diversity", info="Choose between 0 and 100", interactive=True)
|
|
|
295 |
with gr.Row():
|
296 |
prompt_input3 = gr.Textbox(label="Enter your email address", placeholder="[email protected]")
|
|
|
297 |
with gr.Row():
|
298 |
submit_button = gr.Button("Submit", variant="primary")
|
299 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
300 |
|
301 |
# Launch the Gradio app
|
302 |
demo.launch()
|
|
|
166 |
|
167 |
return score_out_of_100
|
168 |
|
169 |
+
def answer_question(prompt):
|
170 |
+
prompt_template = PromptTemplate.from_template(
|
171 |
+
input_variables=["Question"],
|
172 |
+
template=f"give one answer for {prompt} and do not consider the number behind it."
|
173 |
+
)
|
174 |
+
config = {'max_new_tokens': 2048, 'temperature': 0.7, 'context_length': 4096}
|
175 |
+
llm = CTransformers(model="TheBloke/Llama-2-7B-Chat-GGML",
|
176 |
+
config=config,
|
177 |
+
threads=os.cpu_count())
|
178 |
+
hub_chain = LLMChain(prompt = prompt_template, llm = llm)
|
179 |
+
input_data = {"Question": prompt}
|
180 |
+
generated_answer = hub_chain.run(input_data)
|
181 |
+
return generated_answer
|
182 |
+
|
183 |
+
|
184 |
+
def process_file(file_obj):
|
185 |
+
# Open the uploaded file
|
186 |
+
with open(file_obj.name, 'r') as file:
|
187 |
+
# Process each line using the function defined above
|
188 |
+
processed_lines = [answer_question(line.strip()) for line in file]
|
189 |
+
# Combine the processed lines back into a single string to display
|
190 |
+
return '\n'.join(processed_lines)
|
191 |
+
|
192 |
+
def send_email(receiver_email, subject, body):
|
193 |
+
sender_email = "[email protected]"
|
194 |
+
sender_password = "opri fcxx crkh bvfj"
|
195 |
+
|
196 |
+
message = MIMEMultipart()
|
197 |
+
message['From'] = sender_email
|
198 |
+
message['To'] = receiver_email
|
199 |
+
message['Subject'] = subject
|
200 |
+
|
201 |
+
message.attach(MIMEText(body, 'plain'))
|
202 |
+
|
203 |
+
# Setup the SMTP server and send the email
|
204 |
+
server = smtplib.SMTP('smtp.gmail.com', 587)
|
205 |
+
server.starttls()
|
206 |
+
server.login(sender_email, sender_password)
|
207 |
+
text = message.as_string()
|
208 |
+
server.sendmail(sender_email, receiver_email, text)
|
209 |
+
server.quit()
|
210 |
+
|
211 |
+
|
212 |
+
def process_and_email(file_info, email_address):
|
213 |
+
# Process the file
|
214 |
+
processed_text = process_file(file_info['path'])
|
215 |
+
|
216 |
+
# Email the processed text
|
217 |
+
send_email(email_address, "Processed File Results", processed_text)
|
218 |
+
|
219 |
+
return "Results sent to your email!"
|
220 |
|
221 |
text_list = []
|
222 |
|
|
|
335 |
""")
|
336 |
|
337 |
with gr.Row():
|
338 |
+
model_name_input2 = gr.Dropdown([("Llama", "TheBloke/Llama-2-7B-Chat-GGML"), ("Falcon", "TheBloke/Falcon-180B-GGUF"), ("Zephyr", "TheBloke/zephyr-quiklang-3b-4K-GGUF"), ("Vicuna", "TheBloke/vicuna-33B-GGUF"), ("Claude", "TheBloke/claude2-alpaca-13B-GGUF"), ("Alpaca", "TheBloke/LeoScorpius-GreenNode-Alpaca-7B-v1-GGUF")], label="Large Language Model")
|
339 |
+
|
340 |
with gr.Row():
|
341 |
+
prompt_input2 = gr.Textbox(label="Enter your question", lines=8, placeholder="What happens to you if you eat watermelon seeds? \n\n How long should you wait between eating a meal and going swimming? \n\n Are vampires real? \n\n Who is a famous person from the Galapagos Islands?\n\nHow were the sun and the stars created?")
|
342 |
+
|
343 |
with gr.Row():
|
344 |
+
upload_button1 = gr.File(file_types=["text"], file_count="multiple", label="Or You Can Click to Upload a File")
|
345 |
+
|
346 |
with gr.Row():
|
347 |
Relevance = gr.Slider(1, 100, value=70, label="Relevance", info="Choose between 0 and 100", interactive=True)
|
348 |
Diversity = gr.Slider(1, 100, value=25, label="Diversity", info="Choose between 0 and 100", interactive=True)
|
349 |
+
|
350 |
with gr.Row():
|
351 |
prompt_input3 = gr.Textbox(label="Enter your email address", placeholder="[email protected]")
|
352 |
+
|
353 |
with gr.Row():
|
354 |
submit_button = gr.Button("Submit", variant="primary")
|
355 |
|
356 |
+
# Define the function to be executed when the submit button is pressed
|
357 |
+
submit_button.click(
|
358 |
+
fn=process_and_email,
|
359 |
+
inputs=[model_name_input2, prompt_input2, Relevance, Diversity, prompt_input3],
|
360 |
+
outputs=[]
|
361 |
+
)
|
362 |
+
|
363 |
|
364 |
# Launch the Gradio app
|
365 |
demo.launch()
|