File size: 787 Bytes
8b582d7
0c0b519
57eb8dd
8b582d7
0c0b519
08fa5db
 
 
8b582d7
0c0b519
08fa5db
0853b21
0c0b519
57eb8dd
0c0b519
 
 
8b582d7
0c0b519
 
8b582d7
0c0b519
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import spaces

# Load the tokenizer and model

tokenizer = AutoTokenizer.from_pretrained("Yoxas/autotrain-gpt2-statistical1")
model = AutoModelForCausalLM.from_pretrained("Yoxas/autotrain-gpt2-statistical1")

# Use a pipeline as a high-level helper
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)

# Define the chatbot function
@spaces.GPU(duration=120)
def chatbot(input_text):
    response = pipe(input_text, max_length=150, num_return_sequences=1)
    return response[0]['generated_text']

# Create the Gradio interface
interface = gr.Interface(fn=chatbot, inputs="text", outputs="text", title="Research Paper Abstract Chatbot")

# Launch the Gradio app
interface.launch()