File size: 1,598 Bytes
7cc87ca
 
82ea2ae
7cc87ca
 
82ea2ae
 
7cc87ca
 
82ea2ae
 
 
 
3ee13a4
82ea2ae
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7cc87ca
 
82ea2ae
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import gradio as gr
from huggingface_hub import InferenceClient
from transformers import pipeline

"""
For more information on `huggingface_hub` Inference API support, please check the docs: 
https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
"""

# Initialize the inference client with the model you're using
client = InferenceClient(model="isitcoding/gpt2_120_finetuned")

# Initialize a text generation pipeline using Hugging Face's transformer
generator = pipeline('text-generation', model=client)

def respond(message, history: list[tuple[str, str]]):
    """
    Respond function to generate text based on the user's message and conversation history.
    The `history` parameter keeps track of the conversation context.
    """
    # Add the new message to the conversation history
    history.append(("User", message))
    
    # Use the generator model to get a response from the model
    input_text = " ".join([h[1] for h in history])  # Combine the conversation history into one string
    output = generator(input_text, max_length=500, num_return_sequences=1)

    # Extract the response from the output
    response = output[0]['generated_text'].strip()

    # Add the model's response to the history
    history.append(("Bot", response))

    return response, history

# Create a Gradio interface for interaction
iface = gr.Interface(
    fn=respond, 
    inputs=[gr.Textbox(label="Enter your message", placeholder="Type here..."), gr.State()],
    outputs=[gr.Textbox(label="Response"), gr.State()],
    live=True
)

# Launch the Gradio interface
iface.launch()