WICKED4950 commited on
Commit
7b65907
·
verified ·
1 Parent(s): 0036538

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -19
app.py CHANGED
@@ -4,6 +4,11 @@ import tensorflow as tf
4
  import json
5
  import os
6
 
 
 
 
 
 
7
  print("Loading the model......")
8
  model_name = "WICKED4950/Irisonego5"
9
  strategy = tf.distribute.MirroredStrategy()
@@ -13,35 +18,36 @@ with strategy.scope():
13
  model = TFBlenderbotForConditionalGeneration.from_pretrained(model_name)
14
 
15
  def save_question(question,answer,path = "question_answer.json"):
16
- print(f"Saving data to: {os.path.abspath(path)}")
17
  with open(path, "r") as file:
18
  data = json.load(file)
19
  data["Interactions"].append({"Question:":question,"Answer:":answer})
20
  print(data)
21
  with open(path, "w") as file:
22
  json.dump(data, file, indent=4)
23
- print("saving question")
24
 
25
  print("Interface getting done....")
26
  # Define the chatbot function
27
  def predict(user_input):
28
- # Tokenize input text
29
- inputs = tokenizer(user_input, return_tensors="tf", padding=True, truncation=True)
30
-
31
- # Generate the response using the model
32
- response_id = model.generate(
33
- inputs['input_ids'],
34
- max_length=128, # Set max length of response
35
- do_sample=True, # Sampling for variability
36
- top_k=15, # Consider top 50 tokens
37
- top_p=0.95, # Nucleus sampling
38
- temperature=0.8 # Adjusts creativity of response
39
- )
40
-
41
- # Decode the response
42
- response = tokenizer.decode(response_id[0], skip_special_tokens=True)
43
- save_question(question = user_input,answer=response)
44
- return response
 
 
 
45
 
46
  # Gradio interface
47
  gr.Interface(
 
4
  import json
5
  import os
6
 
7
+ data = {"Interactions":[]}
8
+
9
+ with open("question_answer.json", "w") as file:
10
+ json.dump(data, file, indent=4)
11
+
12
  print("Loading the model......")
13
  model_name = "WICKED4950/Irisonego5"
14
  strategy = tf.distribute.MirroredStrategy()
 
18
  model = TFBlenderbotForConditionalGeneration.from_pretrained(model_name)
19
 
20
  def save_question(question,answer,path = "question_answer.json"):
 
21
  with open(path, "r") as file:
22
  data = json.load(file)
23
  data["Interactions"].append({"Question:":question,"Answer:":answer})
24
  print(data)
25
  with open(path, "w") as file:
26
  json.dump(data, file, indent=4)
 
27
 
28
  print("Interface getting done....")
29
  # Define the chatbot function
30
  def predict(user_input):
31
+ if user_input == "Print_data_hmm":
32
+ print(json.dumps(data, indent=4))
33
+ return "Done"
34
+ else:
35
+ inputs = tokenizer(user_input, return_tensors="tf", padding=True, truncation=True)
36
+
37
+ # Generate the response using the model
38
+ response_id = model.generate(
39
+ inputs['input_ids'],
40
+ max_length=128, # Set max length of response
41
+ do_sample=True, # Sampling for variability
42
+ top_k=15, # Consider top 50 tokens
43
+ top_p=0.95, # Nucleus sampling
44
+ temperature=0.8 # Adjusts creativity of response
45
+ )
46
+
47
+ # Decode the response
48
+ response = tokenizer.decode(response_id[0], skip_special_tokens=True)
49
+ save_question(question = user_input,answer=response)
50
+ return response
51
 
52
  # Gradio interface
53
  gr.Interface(