Native_Bot / app.py
Last commit not found
raw
history blame
6.41 kB
# import gradio as gr
# from langchain.llms import HuggingFacePipeline
# from transformers import AutoTokenizer, AutoModel
# import transformers
# import torch
# from langchain.prompts import PromptTemplate
# from langchain.chains import LLMChain
# import warnings
# warnings.filterwarnings('ignore')
# model = 'MD1998/FLAN-T5-V1'
# tokenizer=AutoTokenizer.from_pretrained(model)
# prompt_template=PromptTemplate(input_variables=["conversation"],
# template="""\
# You are a helpful, respectful, and honest assistant designed to improve English language skills. Always provide accurate and helpful responses to language improvement tasks, while ensuring safety and ethical standards. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased, positive, and focused on enhancing language skills.
# If a question does not make sense or is not factually coherent, explain why instead of answering something incorrect. If you don't know the answer to a question, please don't share false information.
# Your role is to guide users through various language exercises and challenges, helping them to practice and improve their English skills in a fun and engaging way. Always encourage users to try different approaches and provide constructive feedback to help them progress.
# {conversation}
# """)
# pipeline=transformers.pipeline(
# "text-generation",
# model=model,
# tokenizer=tokenizer,
# torch_dtype=torch.bfloat16,
# trust_remote_code=True,
# device_map="auto",
# max_length=15,
# do_sample=True,
# top_k=10,
# top_p=5,
# num_return_sequences=1,
# eos_token_id=tokenizer.eos_token_id
# )
# llm=HuggingFacePipeline(pipeline=pipeline, model_kwargs={'temperature':0.1})
# chain = LLMChain(llm=llm, prompt=prompt_template, verbose=True)
##########################
# from transformers import T5Tokenizer
# from transformers import T5ForConditionalGeneration
# finetuned_model = T5ForConditionalGeneration.from_pretrained("MD1998/chating_beginner_v2")
# tokenizer = T5Tokenizer.from_pretrained("MD1998/chating_beginner_v2")
# # Initialize conversation history
# conversation_history = "System_prompt: You establish that the assistant is intelligent and helpful, and that you want to have an engaging conversation.\n"
# generation_params = {
# "max_length": 100,
# "repetition_penalty": 1.2,
# "temperature": 0.2,
# "top_p": 0.99,
# "top_k": 1
# }
# # Function to handle conversation
# def chat_with_model(input_text):
# global conversation_history
# # Combine the new input with the conversation history
# my_inputs = conversation_history + input_text
# # Encode the inputs
# inputs = tokenizer(my_inputs, return_tensors="pt")
# # Generate outputs using the model
# outputs = finetuned_model.generate(**inputs, **generation_params)
# # Decode the outputs to get the answer
# answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
# # Update conversation history (append the new input and answer)
# conversation_history += f"\nUser: {input_text}\nAssistant: {answer}\n"
# # Display the answer using text wrapping for readability
# print(fill(answer, width=80))
# # Return the answer for further use (if needed)
# return answer
# # Example usage
# # user_input = "What is the weather like today?"
# # chat_with_model(user_input)
# def greet(user_input):
# response = chat_with_model(user_input)
# return response
# iface = gr.Interface(fn=greet, inputs="text", outputs="text")
# iface.launch()
##########################
# def greet(prompt):
# response = chain.run(prompt)
# return response
# iface = gr.Interface(fn=greet, inputs="text", outputs="text")
# iface.launch()
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
from transformers import pipeline, T5Tokenizer
# Load the tokenizer and the pipeline for text generation
tokenizer = T5Tokenizer.from_pretrained("MD1998/chating_beginner_v2")
chat_pipeline = pipeline("text2text-generation", model="MD1998/chating_beginner_v2", tokenizer=tokenizer)
# Initialize conversation history
conversation_history = "System_prompt: You are a helpful, respectful, and honest assistant designed to improve English language skills. Always provide accurate and helpful responses to language improvement tasks, while ensuring safety and ethical standards. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased, positive, and focused on enhancing language skills. If a question does not make sense or is not factually coherent, explain why instead of answering something incorrect. If you don't know the answer to a question, please don't share false information. Your role is to guide users through various language exercises and challenges, helping them to practice and improve their English skills in a fun and engaging way. Always encourage users to try different approaches and provide constructive feedback to help them progress.\n"
generation_params = {
"max_length": 100,
"repetition_penalty": 1.2,
"temperature": 0.2,
"top_p": 0.99,
"top_k": 1
}
# Function to handle conversation
def chat_with_model(input_text):
global conversation_history
# Combine the new input with the conversation history
prompt = conversation_history + input_text
# Generate outputs using the pipeline with the provided prompt and generation parameters
response = chat_pipeline(prompt, **generation_params)[0]["generated_text"]
# Update conversation history (append the new input and answer)
conversation_history += f"\nUser: {input_text}\nAssistant: {response}\n"
# Display the answer using text wrapping for readability
print(fill(response, width=80))
# Return the answer for further use (if needed)
return response
# Example usage
# user_input = "What is the weather like today?"
# chat_with_model(user_input)
def greet(user_input):
response = chat_with_model(user_input)
return response
# Launch a Gradio interface
import gradio as gr
iface = gr.Interface(fn=greet, inputs="text", outputs="text")
iface.launch()