Spaces:
Runtime error
Runtime error
File size: 6,412 Bytes
55247a9 1f67093 55247a9 77f480c 55247a9 1f67093 55247a9 1f67093 55247a9 1f67093 55247a9 77f480c 55247a9 77f480c 55247a9 77f480c 55247a9 77f480c 55247a9 77f480c 1f67093 55247a9 1f67093 55247a9 1f67093 55247a9 1f67093 04e8cf9 6f2882b 04e8cf9 6f2882b 04e8cf9 2bf87ab 6f2882b 1f67093 04e8cf9 1f67093 04e8cf9 1f67093 04e8cf9 55247a9 2bf87ab 4de02dd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 |
# import gradio as gr
# from langchain.llms import HuggingFacePipeline
# from transformers import AutoTokenizer, AutoModel
# import transformers
# import torch
# from langchain.prompts import PromptTemplate
# from langchain.chains import LLMChain
# import warnings
# warnings.filterwarnings('ignore')
# model = 'MD1998/FLAN-T5-V1'
# tokenizer=AutoTokenizer.from_pretrained(model)
# prompt_template=PromptTemplate(input_variables=["conversation"],
# template="""\
# You are a helpful, respectful, and honest assistant designed to improve English language skills. Always provide accurate and helpful responses to language improvement tasks, while ensuring safety and ethical standards. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased, positive, and focused on enhancing language skills.
# If a question does not make sense or is not factually coherent, explain why instead of answering something incorrect. If you don't know the answer to a question, please don't share false information.
# Your role is to guide users through various language exercises and challenges, helping them to practice and improve their English skills in a fun and engaging way. Always encourage users to try different approaches and provide constructive feedback to help them progress.
# {conversation}
# """)
# pipeline=transformers.pipeline(
# "text-generation",
# model=model,
# tokenizer=tokenizer,
# torch_dtype=torch.bfloat16,
# trust_remote_code=True,
# device_map="auto",
# max_length=15,
# do_sample=True,
# top_k=10,
# top_p=5,
# num_return_sequences=1,
# eos_token_id=tokenizer.eos_token_id
# )
# llm=HuggingFacePipeline(pipeline=pipeline, model_kwargs={'temperature':0.1})
# chain = LLMChain(llm=llm, prompt=prompt_template, verbose=True)
##########################
# from transformers import T5Tokenizer
# from transformers import T5ForConditionalGeneration
# finetuned_model = T5ForConditionalGeneration.from_pretrained("MD1998/chating_beginner_v2")
# tokenizer = T5Tokenizer.from_pretrained("MD1998/chating_beginner_v2")
# # Initialize conversation history
# conversation_history = "System_prompt: You establish that the assistant is intelligent and helpful, and that you want to have an engaging conversation.\n"
# generation_params = {
# "max_length": 100,
# "repetition_penalty": 1.2,
# "temperature": 0.2,
# "top_p": 0.99,
# "top_k": 1
# }
# # Function to handle conversation
# def chat_with_model(input_text):
# global conversation_history
# # Combine the new input with the conversation history
# my_inputs = conversation_history + input_text
# # Encode the inputs
# inputs = tokenizer(my_inputs, return_tensors="pt")
# # Generate outputs using the model
# outputs = finetuned_model.generate(**inputs, **generation_params)
# # Decode the outputs to get the answer
# answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
# # Update conversation history (append the new input and answer)
# conversation_history += f"\nUser: {input_text}\nAssistant: {answer}\n"
# # Display the answer using text wrapping for readability
# print(fill(answer, width=80))
# # Return the answer for further use (if needed)
# return answer
# # Example usage
# # user_input = "What is the weather like today?"
# # chat_with_model(user_input)
# def greet(user_input):
# response = chat_with_model(user_input)
# return response
# iface = gr.Interface(fn=greet, inputs="text", outputs="text")
# iface.launch()
##########################
# def greet(prompt):
# response = chain.run(prompt)
# return response
# iface = gr.Interface(fn=greet, inputs="text", outputs="text")
# iface.launch()
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
from transformers import pipeline, T5Tokenizer
# Load the tokenizer and the pipeline for text generation
tokenizer = T5Tokenizer.from_pretrained("MD1998/chating_beginner_v2")
chat_pipeline = pipeline("text2text-generation", model="MD1998/chating_beginner_v2", tokenizer=tokenizer)
# Initialize conversation history
conversation_history = "System_prompt: You are a helpful, respectful, and honest assistant designed to improve English language skills. Always provide accurate and helpful responses to language improvement tasks, while ensuring safety and ethical standards. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased, positive, and focused on enhancing language skills. If a question does not make sense or is not factually coherent, explain why instead of answering something incorrect. If you don't know the answer to a question, please don't share false information. Your role is to guide users through various language exercises and challenges, helping them to practice and improve their English skills in a fun and engaging way. Always encourage users to try different approaches and provide constructive feedback to help them progress.\n"
generation_params = {
"max_length": 100,
"repetition_penalty": 1.2,
"temperature": 0.2,
"top_p": 0.99,
"top_k": 1
}
# Function to handle conversation
def chat_with_model(input_text):
global conversation_history
# Combine the new input with the conversation history
prompt = conversation_history + input_text
# Generate outputs using the pipeline with the provided prompt and generation parameters
response = chat_pipeline(prompt, **generation_params)[0]["generated_text"]
# Update conversation history (append the new input and answer)
conversation_history += f"\nUser: {input_text}\nAssistant: {response}\n"
# Display the answer using text wrapping for readability
print(fill(response, width=80))
# Return the answer for further use (if needed)
return response
# Example usage
# user_input = "What is the weather like today?"
# chat_with_model(user_input)
def greet(user_input):
response = chat_with_model(user_input)
return response
# Launch a Gradio interface
import gradio as gr
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
iface.launch()
|