File size: 958 Bytes
1631fee
b280d31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1631fee
 
b280d31
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer

# Specify the hosted model repository or URL
model_repo = "Tanvi03/ReidLM"  # Replace with the actual model repository or URL

# Load the tokenizer and model from the hosted repository
tokenizer = AutoTokenizer.from_pretrained(model_repo)
model = AutoModelForCausalLM.from_pretrained(model_repo)

# Define the function to handle the chat interaction
def chat(message):
    input_ids = tokenizer.encode(message, return_tensors="pt")
    output = model.generate(input_ids, max_length=100, num_return_sequences=1)
    response = tokenizer.decode(output[0], skip_special_tokens=True)
    return response

# Create a Gradio interface
iface = gr.Interface(
    fn=chat,
    inputs=gr.Textbox(placeholder="Enter your message..."),
    outputs=gr.Textbox(placeholder="Model's response will appear here..."),
    title="Chat with Hosted Model"
)

# Launch the Gradio app
iface.launch()