Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -45,35 +45,54 @@ import gradio as gr
|
|
45 |
|
46 |
##########################
|
47 |
|
48 |
-
from transformers import T5Tokenizer, DataCollatorForSeq2Seq
|
49 |
-
from transformers import T5ForConditionalGeneration, Seq2SeqTrainingArguments, Seq2SeqTrainer, AutoModelForSeq2SeqLM
|
50 |
-
|
51 |
-
model = "MD1998/chating_beginners_v1"
|
52 |
-
|
53 |
-
finetuned_model = T5ForConditionalGeneration.from_pretrained(model)
|
54 |
-
tokenizer = T5Tokenizer.from_pretrained(model)
|
55 |
-
|
56 |
-
|
57 |
-
def greet(my_prompt):
|
58 |
-
my_question = my_prompt
|
59 |
-
inputs = "Your name is Nemo, Please answer to this question in few words: " + my_question
|
60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
-
|
63 |
-
|
64 |
|
65 |
-
|
|
|
66 |
|
67 |
-
|
68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
|
70 |
-
|
71 |
-
outputs = finetuned_model.generate(**inputss)
|
72 |
-
answer = tokenizer.decode(outputs[0])
|
73 |
-
from textwrap import fill
|
74 |
|
75 |
|
76 |
-
return
|
77 |
|
78 |
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
|
79 |
iface.launch()
|
|
|
45 |
|
46 |
##########################
|
47 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
|
49 |
+
finetuned_model = T5ForConditionalGeneration.from_pretrained("MD1998/chating_beginner_v2")
|
50 |
+
tokenizer = T5Tokenizer.from_pretrained("MD1998/chating_beginner_v2")
|
51 |
+
|
52 |
+
# Initialize conversation history
|
53 |
+
conversation_history = "System_prompt: You establish that the assistant is intelligent and helpful, and that you want to have an engaging conversation.\n"
|
54 |
+
generation_params = {
|
55 |
+
"max_length": 100,
|
56 |
+
"repetition_penalty": 1.2,
|
57 |
+
"temperature": 0.2,
|
58 |
+
"top_p": 0.99,
|
59 |
+
"top_k": 1
|
60 |
+
}
|
61 |
+
|
62 |
+
# Function to handle conversation
|
63 |
+
def chat_with_model(input_text):
|
64 |
+
global conversation_history
|
65 |
+
|
66 |
+
# Combine the new input with the conversation history
|
67 |
+
my_inputs = conversation_history + input_text
|
68 |
|
69 |
+
# Encode the inputs
|
70 |
+
inputs = tokenizer(my_inputs, return_tensors="pt")
|
71 |
|
72 |
+
# Generate outputs using the model
|
73 |
+
outputs = finetuned_model.generate(**inputs, **generation_params)
|
74 |
|
75 |
+
# Decode the outputs to get the answer
|
76 |
+
answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
77 |
+
|
78 |
+
# Update conversation history (append the new input and answer)
|
79 |
+
conversation_history += f"\nUser: {input_text}\nAssistant: {answer}\n"
|
80 |
+
|
81 |
+
# Display the answer using text wrapping for readability
|
82 |
+
print(fill(answer, width=80))
|
83 |
+
|
84 |
+
# Return the answer for further use (if needed)
|
85 |
+
return answer
|
86 |
+
|
87 |
+
# Example usage
|
88 |
+
# user_input = "What is the weather like today?"
|
89 |
+
# chat_with_model(user_input)
|
90 |
+
|
91 |
|
92 |
+
def greet(user_input):
|
|
|
|
|
|
|
93 |
|
94 |
|
95 |
+
return chat_with_model(user_input)
|
96 |
|
97 |
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
|
98 |
iface.launch()
|