OmniCode / app.py
curiouscurrent's picture
Update app.py
373fa1b verified
raw
history blame
1.27 kB
import gradio as gr
# Importing the required libraries
from transformers import AutoTokenizer, AutoModelForCausalLM
# Load model directly
tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2")
model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
# System message
system_message = "I am a code teaching assistant named Repeatinfy created by Anusha K. I am here to complete your sentence and repeat it."
def generate_response(prompt, max_length=150, temperature=1.0):
input_text = system_message + "\n" + prompt
input_ids = tokenizer.encode(input_text, return_tensors='pt')
# Generate response
output = model.generate(input_ids,
max_length=max_length,
temperature=temperature,
pad_token_id=tokenizer.eos_token_id,
num_return_sequences=1)
# Decode and return the response
response = tokenizer.decode(output[0], skip_special_tokens=True)
return response
# Create Gradio interface
def chat_with_repeatinfy(prompt):
response = generate_response(prompt)
return response
iface = gr.Interface(fn=chat_with_repeatinfy, inputs="text", outputs="text", title="OmniCode")
iface.launch()