Spaces:
Runtime error
Runtime error
File size: 3,940 Bytes
966146c 1f67093 50cc5f8 5243e66 50cc5f8 5243e66 50cc5f8 5243e66 1f67093 50cc5f8 1f67093 63e0345 1f67093 50cc5f8 1f67093 50cc5f8 1f67093 04e8cf9 6f2882b 04e8cf9 6f2882b 04e8cf9 2bf87ab 6f2882b 1f67093 04e8cf9 1f67093 04e8cf9 1f67093 04e8cf9 2bf87ab 4de02dd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
import gradio as gr
# from langchain.llms import HuggingFacePipeline
# from transformers import AutoTokenizer, AutoModel
# import transformers
# import torch
# from langchain.prompts import PromptTemplate
# from langchain.chains import LLMChain
# import warnings
# warnings.filterwarnings('ignore')
# model = 'MD1998/FLAN-T5-V1'
# tokenizer=AutoTokenizer.from_pretrained(model)
# prompt_template=PromptTemplate(input_variables=["conversation"],
# template="""\
# You are a helpful, respectful, and honest assistant designed to improve English language skills. Always provide accurate and helpful responses to language improvement tasks, while ensuring safety and ethical standards. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased, positive, and focused on enhancing language skills.
# If a question does not make sense or is not factually coherent, explain why instead of answering something incorrect. If you don't know the answer to a question, please don't share false information.
# Your role is to guide users through various language exercises and challenges, helping them to practice and improve their English skills in a fun and engaging way. Always encourage users to try different approaches and provide constructive feedback to help them progress.
# {conversation}
# """)
# pipeline=transformers.pipeline(
# "text-generation",
# model=model,
# tokenizer=tokenizer,
# torch_dtype=torch.bfloat16,
# trust_remote_code=True,
# device_map="auto",
# max_length=15,
# do_sample=True,
# top_k=10,
# top_p=5,
# num_return_sequences=1,
# eos_token_id=tokenizer.eos_token_id
# )
# llm=HuggingFacePipeline(pipeline=pipeline, model_kwargs={'temperature':0.1})
# chain = LLMChain(llm=llm, prompt=prompt_template, verbose=True)
##########################
from transformers import T5Tokenizer, DataCollatorForSeq2Seq
from transformers import T5ForConditionalGeneration, Seq2SeqTrainingArguments, Seq2SeqTrainer, AutoModelForSeq2SeqLM
model = "MD1998/chating_beginners_v1"
finetuned_model = T5ForConditionalGeneration.from_pretrained(model)
tokenizer = T5Tokenizer.from_pretrained(model)
def greet(my_prompt):
my_question = my_prompt
inputs = "Your name is Nemo, Please answer to this question in few words: " + my_question
DEFAULT_SYSTEM_PROMPT="""\
You are a helpful, respectful, and honest assistant designed to improve English language skills. Always provide accurate and helpful responses to language improvement tasks, while ensuring safety and ethical standards. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased, positive, and focused on enhancing language skills.
If a question does not make sense or is not factually coherent, explain why instead of answering something incorrect. If you don't know the answer to a question, please don't share false information.
Your role is to guide users through various language exercises and challenges, helping them to practice and improve their English skills in a fun and engaging way. Always encourage users to try different approaches and provide constructive feedback to help them progress. \n\n {text}
"""
inputss = tokenizer(inputs, return_tensors="pt")
outputs = finetuned_model.generate(**inputss)
answer = tokenizer.decode(outputs[0])
from textwrap import fill
return fill(answer, width=80)
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
iface.launch()
##########################
# def greet(prompt):
# response = chain.run(prompt)
# return response
# iface = gr.Interface(fn=greet, inputs="text", outputs="text")
# iface.launch()
|