hackergeek98 commited on
Commit
d08a780
·
verified ·
1 Parent(s): b17338a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -9
app.py CHANGED
@@ -2,28 +2,41 @@ import gradio as gr
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
  # Load your fine-tuned GPT-2 model from Hugging Face
5
- MODEL_NAME = "hackergeek98/finetuned-gpt2" # Replace with your model
6
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
7
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
8
 
 
 
 
9
  # Function to generate responses
10
  def generate_response(user_input):
11
- # Tokenize the input
12
- inputs = tokenizer(user_input, return_tensors="pt")
 
 
 
 
 
13
 
14
- # Generate a response
15
- outputs = model.generate(inputs['input_ids'], max_length=1000, num_return_sequences=1, no_repeat_ngram_size=2)
16
 
17
- # Decode the output and return the result
18
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
 
 
 
 
19
  return response
20
 
21
  # Create Gradio interface
22
  interface = gr.Interface(fn=generate_response,
23
- inputs=gr.Textbox(label="Enter your message"),
24
- outputs=gr.Textbox(label="Therapist Response"),
25
  title="Virtual Therapist",
26
- description="A fine-tuned GPT-2 model acting as a virtual therapist.")
27
 
28
  # Launch the app
29
  interface.launch()
 
2
  from transformers import AutoModelForCausalLM, AutoTokenizer
3
 
4
  # Load your fine-tuned GPT-2 model from Hugging Face
5
+ MODEL_NAME = "hackergeek98/therapist" # Replace with your model
6
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
7
  model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
8
 
9
+ # Initialize conversation history
10
+ conversation_history = ""
11
+
12
  # Function to generate responses
13
  def generate_response(user_input):
14
+ global conversation_history
15
+
16
+ # Update conversation history with user input
17
+ conversation_history += f"User: {user_input}\n"
18
+
19
+ # Tokenize the conversation history
20
+ inputs = tokenizer(conversation_history, return_tensors="pt", truncation=True, max_length=1024)
21
 
22
+ # Generate a response from the model
23
+ outputs = model.generate(inputs['input_ids'], max_length=1024, num_return_sequences=1, no_repeat_ngram_size=2)
24
 
25
+ # Decode the model's output
26
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
27
+
28
+ # Update conversation history with the model's response
29
+ conversation_history += f"Therapist: {response}\n"
30
+
31
+ # Return the therapist's response
32
  return response
33
 
34
  # Create Gradio interface
35
  interface = gr.Interface(fn=generate_response,
36
+ inputs=gr.Textbox(label="Enter your message", lines=2),
37
+ outputs=gr.Textbox(label="Therapist Response", lines=2),
38
  title="Virtual Therapist",
39
+ description="A fine-tuned GPT-2 model acting as a virtual therapist. Chat with the model and receive responses as if you are talking to a therapist.")
40
 
41
  # Launch the app
42
  interface.launch()