isitcoding commited on
Commit
cd7e83a
·
verified ·
1 Parent(s): 5a2fb73

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -11
app.py CHANGED
@@ -1,13 +1,15 @@
1
- import os
2
  import gradio as gr
3
  from transformers import pipeline
 
4
 
5
- hf_token = os.getenv("hf_token")
6
  # Initialize the text generation pipeline
7
- generator = pipeline("text-generation", model="isitcoding/gpt2_120_finetuned", use_auth_token=hf_token)
 
8
 
9
  # Define the response function with additional options for customization
10
- '''def text_generation(
11
  prompt: str,
12
  details: bool = False,
13
  stream: bool = False,
@@ -70,11 +72,35 @@ iface = gr.Interface(
70
  iface.launch()
71
  '''
72
 
73
- # Test model generation
74
- def generate_response(prompt):
75
- response = generator(prompt, max_length=50)
76
- return response[0]["generated_text"]
77
-
78
- # Gradio interface
79
  import gradio as gr
80
- gr.Interface(fn=generate_response, inputs="text", outputs="text").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '''import os
2
  import gradio as gr
3
  from transformers import pipeline
4
+ from huggingface_hub import InferenceClient
5
 
6
+ hf_token = os.getenv("gpt2_token")
7
  # Initialize the text generation pipeline
8
+ client =
9
+ generator = pipeline("text-generation", )
10
 
11
  # Define the response function with additional options for customization
12
+ def text_generation(
13
  prompt: str,
14
  details: bool = False,
15
  stream: bool = False,
 
72
  iface.launch()
73
  '''
74
 
 
 
 
 
 
 
75
  import gradio as gr
76
+ from transformers import pipeline
77
+
78
+ # Load a text generation model (e.g., GPT-2 or a model of your choice)
79
+ generator = pipeline("text-generation", model="isitcoding/gpt2_120_finetuned", tokenizer="isitcoding/gpt2_120_finetuned")
80
+
81
+ # Function to generate assistant's response based on user input
82
+ def generate_response(messages):
83
+ # Extract the user message from the input format
84
+ user_message = messages[-1]["content"]
85
+
86
+ # Generate a response based on the user's input
87
+ response = generator(user_message, max_length=100, num_return_sequences=1)
88
+
89
+ # Get the assistant's message from the generated output
90
+ assistant_message = response[0]["generated_text"]
91
+
92
+ # Return the updated conversation with user and assistant messages
93
+ messages.append({"role": "assistant", "content": assistant_message})
94
+ return messages
95
+
96
+ # Set up the Gradio interface
97
+ iface = gr.Interface(
98
+ fn=generate_response,
99
+ inputs=gr.Textbox(placeholder="Type your message..."), # User input
100
+ outputs=gr.JSON(), # JSON format output to display the conversation
101
+ live=True, # Ensure real-time updates
102
+ title="Text Generation Pipeline",
103
+ description="Enter a message to get a response from the assistant."
104
+ )
105
+
106
+ iface.launch()