MD1998 commited on
Commit
55247a9
·
verified ·
1 Parent(s): 7c44894

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -42
app.py CHANGED
@@ -1,4 +1,4 @@
1
- import gradio as gr
2
  # from langchain.llms import HuggingFacePipeline
3
  # from transformers import AutoTokenizer, AutoModel
4
  # import transformers
@@ -45,59 +45,59 @@ import gradio as gr
45
 
46
  ##########################
47
 
48
- from transformers import T5Tokenizer
49
- from transformers import T5ForConditionalGeneration
50
-
51
- finetuned_model = T5ForConditionalGeneration.from_pretrained("MD1998/chating_beginner_v2")
52
- tokenizer = T5Tokenizer.from_pretrained("MD1998/chating_beginner_v2")
53
-
54
- # Initialize conversation history
55
- conversation_history = "System_prompt: You establish that the assistant is intelligent and helpful, and that you want to have an engaging conversation.\n"
56
- generation_params = {
57
- "max_length": 100,
58
- "repetition_penalty": 1.2,
59
- "temperature": 0.2,
60
- "top_p": 0.99,
61
- "top_k": 1
62
- }
63
-
64
- # Function to handle conversation
65
- def chat_with_model(input_text):
66
- global conversation_history
67
 
68
- # Combine the new input with the conversation history
69
- my_inputs = conversation_history + input_text
70
 
71
- # Encode the inputs
72
- inputs = tokenizer(my_inputs, return_tensors="pt")
73
 
74
- # Generate outputs using the model
75
- outputs = finetuned_model.generate(**inputs, **generation_params)
76
 
77
- # Decode the outputs to get the answer
78
- answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
79
 
80
- # Update conversation history (append the new input and answer)
81
- conversation_history += f"\nUser: {input_text}\nAssistant: {answer}\n"
82
 
83
- # Display the answer using text wrapping for readability
84
- print(fill(answer, width=80))
85
 
86
- # Return the answer for further use (if needed)
87
- return answer
88
 
89
- # Example usage
90
- # user_input = "What is the weather like today?"
91
- # chat_with_model(user_input)
92
 
93
 
94
- def greet(user_input):
95
- response = chat_with_model(user_input)
96
 
97
- return response
98
 
99
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
100
- iface.launch()
101
  ##########################
102
 
103
 
@@ -121,6 +121,54 @@ iface.launch()
121
  # iface = gr.Interface(fn=greet, inputs="text", outputs="text")
122
  # iface.launch()
123
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
 
125
 
126
 
 
1
+ # import gradio as gr
2
  # from langchain.llms import HuggingFacePipeline
3
  # from transformers import AutoTokenizer, AutoModel
4
  # import transformers
 
45
 
46
  ##########################
47
 
48
+ # from transformers import T5Tokenizer
49
+ # from transformers import T5ForConditionalGeneration
50
+
51
+ # finetuned_model = T5ForConditionalGeneration.from_pretrained("MD1998/chating_beginner_v2")
52
+ # tokenizer = T5Tokenizer.from_pretrained("MD1998/chating_beginner_v2")
53
+
54
+ # # Initialize conversation history
55
+ # conversation_history = "System_prompt: You establish that the assistant is intelligent and helpful, and that you want to have an engaging conversation.\n"
56
+ # generation_params = {
57
+ # "max_length": 100,
58
+ # "repetition_penalty": 1.2,
59
+ # "temperature": 0.2,
60
+ # "top_p": 0.99,
61
+ # "top_k": 1
62
+ # }
63
+
64
+ # # Function to handle conversation
65
+ # def chat_with_model(input_text):
66
+ # global conversation_history
67
 
68
+ # # Combine the new input with the conversation history
69
+ # my_inputs = conversation_history + input_text
70
 
71
+ # # Encode the inputs
72
+ # inputs = tokenizer(my_inputs, return_tensors="pt")
73
 
74
+ # # Generate outputs using the model
75
+ # outputs = finetuned_model.generate(**inputs, **generation_params)
76
 
77
+ # # Decode the outputs to get the answer
78
+ # answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
79
 
80
+ # # Update conversation history (append the new input and answer)
81
+ # conversation_history += f"\nUser: {input_text}\nAssistant: {answer}\n"
82
 
83
+ # # Display the answer using text wrapping for readability
84
+ # print(fill(answer, width=80))
85
 
86
+ # # Return the answer for further use (if needed)
87
+ # return answer
88
 
89
+ # # Example usage
90
+ # # user_input = "What is the weather like today?"
91
+ # # chat_with_model(user_input)
92
 
93
 
94
+ # def greet(user_input):
95
+ # response = chat_with_model(user_input)
96
 
97
+ # return response
98
 
99
+ # iface = gr.Interface(fn=greet, inputs="text", outputs="text")
100
+ # iface.launch()
101
  ##########################
102
 
103
 
 
121
  # iface = gr.Interface(fn=greet, inputs="text", outputs="text")
122
  # iface.launch()
123
 
124
+ #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
125
+ from transformers import pipeline, T5Tokenizer
126
+
127
+ # Load the tokenizer and the pipeline for text generation
128
+ tokenizer = T5Tokenizer.from_pretrained("MD1998/chating_beginner_v2")
129
+ chat_pipeline = pipeline("text2text-generation", model="MD1998/chating_beginner_v2", tokenizer=tokenizer)
130
+
131
+ # Initialize conversation history
132
+ conversation_history = "System_prompt: You are a helpful, respectful, and honest assistant designed to improve English language skills. Always provide accurate and helpful responses to language improvement tasks, while ensuring safety and ethical standards. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased, positive, and focused on enhancing language skills. If a question does not make sense or is not factually coherent, explain why instead of answering something incorrect. If you don't know the answer to a question, please don't share false information. Your role is to guide users through various language exercises and challenges, helping them to practice and improve their English skills in a fun and engaging way. Always encourage users to try different approaches and provide constructive feedback to help them progress.\n"
133
+ generation_params = {
134
+ "max_length": 100,
135
+ "repetition_penalty": 1.2,
136
+ "temperature": 0.2,
137
+ "top_p": 0.99,
138
+ "top_k": 1
139
+ }
140
+
141
+ # Function to handle conversation
142
+ def chat_with_model(input_text):
143
+ global conversation_history
144
+
145
+ # Combine the new input with the conversation history
146
+ prompt = conversation_history + input_text
147
+
148
+ # Generate outputs using the pipeline with the provided prompt and generation parameters
149
+ response = chat_pipeline(prompt, **generation_params)[0]["generated_text"]
150
+
151
+ # Update conversation history (append the new input and answer)
152
+ conversation_history += f"\nUser: {input_text}\nAssistant: {response}\n"
153
+
154
+ # Display the answer using text wrapping for readability
155
+ print(fill(response, width=80))
156
+
157
+ # Return the answer for further use (if needed)
158
+ return response
159
+
160
+ # Example usage
161
+ # user_input = "What is the weather like today?"
162
+ # chat_with_model(user_input)
163
+
164
+ def greet(user_input):
165
+ response = chat_with_model(user_input)
166
+ return response
167
+
168
+ # Launch a Gradio interface
169
+ import gradio as gr
170
+ iface = gr.Interface(fn=greet, inputs="text", outputs="text")
171
+ iface.launch()
172
 
173
 
174