gaur3009 commited on
Commit
e182d1b
·
verified ·
1 Parent(s): 5bc1231

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -13
app.py CHANGED
@@ -5,7 +5,6 @@ import pandas as pd
5
  import os
6
  import gradio as gr
7
 
8
- # Load the models and tokenizers
9
  bert_model_name = 'bert-base-uncased'
10
  bert_tokenizer = BertTokenizer.from_pretrained(bert_model_name)
11
  bert_model = BertModel.from_pretrained(bert_model_name)
@@ -14,7 +13,6 @@ gpt2_model_name = 'gpt2'
14
  gpt2_tokenizer = GPT2Tokenizer.from_pretrained(gpt2_model_name)
15
  gpt2_model = GPT2LMHeadModel.from_pretrained(gpt2_model_name)
16
 
17
- # Load the data
18
  data = {
19
  "questions": [
20
  "What is Rookus?",
@@ -39,6 +37,18 @@ data = {
39
  "default_answers": "I'm sorry, I cannot answer this right now. Your question has been saved, and we will get back to you with a response soon."
40
  }
41
 
 
 
 
 
 
 
 
 
 
 
 
 
42
  def get_bert_embeddings(texts):
43
  inputs = bert_tokenizer(texts, return_tensors='pt', padding=True, truncation=True)
44
  with torch.no_grad():
@@ -70,18 +80,10 @@ def answer_query(user_query):
70
  answer_index = data['questions'].index(closest_question)
71
  answer = data['answers'][answer_index]
72
  else:
73
- excel_file = 'new_questions1.xlsx'
74
- if not os.path.isfile(excel_file):
75
- df = pd.DataFrame(columns=['question'])
76
- df.to_excel(excel_file, index=False)
77
-
78
- new_data = pd.DataFrame({'questions': [user_query]})
79
- df = pd.read_excel(excel_file)
80
- df = pd.concat([df, new_data], ignore_index=True)
81
- with pd.ExcelWriter(excel_file, engine='openpyxl', mode='w') as writer:
82
- df.to_excel(writer, index=False)
83
  answer = data['default_answers']
84
-
 
 
85
  return answer
86
 
87
  iface = gr.Interface(
 
5
  import os
6
  import gradio as gr
7
 
 
8
  bert_model_name = 'bert-base-uncased'
9
  bert_tokenizer = BertTokenizer.from_pretrained(bert_model_name)
10
  bert_model = BertModel.from_pretrained(bert_model_name)
 
13
  gpt2_tokenizer = GPT2Tokenizer.from_pretrained(gpt2_model_name)
14
  gpt2_model = GPT2LMHeadModel.from_pretrained(gpt2_model_name)
15
 
 
16
  data = {
17
  "questions": [
18
  "What is Rookus?",
 
37
  "default_answers": "I'm sorry, I cannot answer this right now. Your question has been saved, and we will get back to you with a response soon."
38
  }
39
 
40
+ def save_to_excel(user_query, default_answer, excel_file='new_prompts.xlsx'):
41
+ if not os.path.isfile(excel_file):
42
+ df = pd.DataFrame(columns=['question', 'default_answer'])
43
+ else:
44
+ df = pd.read_excel(excel_file)
45
+
46
+ new_data = pd.DataFrame({'question': [user_query], 'default_answer': [default_answer]})
47
+ df = pd.concat([df, new_data], ignore_index=True)
48
+
49
+ with pd.ExcelWriter(excel_file, engine='openpyxl', mode='w') as writer:
50
+ df.to_excel(writer, index=False)
51
+
52
  def get_bert_embeddings(texts):
53
  inputs = bert_tokenizer(texts, return_tensors='pt', padding=True, truncation=True)
54
  with torch.no_grad():
 
80
  answer_index = data['questions'].index(closest_question)
81
  answer = data['answers'][answer_index]
82
  else:
 
 
 
 
 
 
 
 
 
 
83
  answer = data['default_answers']
84
+
85
+ save_to_excel(user_query, answer)
86
+
87
  return answer
88
 
89
  iface = gr.Interface(