llm-test / app.py
Eminda
model change:
7a09717
raw
history blame
344 Bytes
import gradio as gr
# Use a pipeline as a high-level helper
from transformers import pipeline
pipe = pipeline("text-generation", model="mosaicml/mpt-7b-instruct", trust_remote_code=True)
def greet(name):
return pipe('who is the president of USA')
# greet('df')
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
iface.launch()