Spaces:
Sleeping
Sleeping
import gradio as gr | |
from langchain_core.messages import HumanMessage, SystemMessage | |
from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace | |
import os | |
HF_Hub_API_token = os.environ.get('HF_Hub_API_token', None) | |
llm = HuggingFaceEndpoint( | |
repo_id="mistralai/Mistral-7B-Instruct-v0.2", | |
task="text-generation", | |
max_new_tokens=128, | |
do_sample=False, | |
repetition_penalty=1.03, | |
huggingfacehub_api_token=HF_Hub_API_token | |
) | |
chat_model = ChatHuggingFace(llm=llm) | |
MAX_LEN = 100 | |
def gen_prompt1(title): | |
prompt1 = title | |
formatted_str = '' | |
if len(title) > 0: | |
messages = [HumanMessage(content=prompt1),] | |
res = chat_model.invoke(messages) | |
# censure | |
for s in res.content.split('\n'): | |
if len(s) > MAX_LEN: | |
s = s[:MAX_LEN] + " (...)" | |
formatted_str += s + '\n' | |
return formatted_str #res.content | |
example_title = "Mary had a little lamb" | |
demo = gr.Interface( | |
fn=gen_prompt1, | |
inputs=gr.Textbox(label = "Introduction", show_label=True), | |
outputs=["text"], | |
examples=[example_title] | |
) | |
demo.launch() | |