Spaces:
Running
Running
# import streamlit as st | |
# from transformers import pipeline | |
# # pipe=pipeline("sentiment-analysis") | |
# # col1, col2 = st.columns(2) | |
# # with col1: | |
# # x=st.button("Sentiment Analysis") | |
# # with col2: | |
# # y=st.button("Text Summarization") | |
# # if x: | |
# # t=st.text_input("Enter the Text") | |
# # st.write(pipe(t)) | |
# # if y: | |
# t1=st.text_input("Enter the Text for Summarization") | |
# st.write(summarizer(t1)) | |
#from transformers import AutoTokenizer, AutoModel | |
# import streamlit as st | |
#tokenizer = AutoTokenizer.from_pretrained("llmware/industry-bert-insurance-v0.1") | |
# #model = AutoModel.from_pretrained("llmware/industry-bert-insurance-v0.1") | |
# # Use a pipeline as a high-level helper | |
# from transformers import pipeline | |
# #pipe = pipeline("feature-extraction") | |
# t=st.text_input("Enter the Text") | |
# pipe = pipeline("summarization") | |
# st.write(pipe(t)) | |
# import pandas as pd | |
# import numpy as np | |
# from ydata_synthetic.synthesizers.regular import RegularSynthesizer | |
# from ydata_synthetic.synthesizers import ModelParameters, TrainParameters | |
# import streamlit as st | |
# from os import getcwd | |
# text_file=st.file_uploader("Upload the Data File") | |
# st.write("-------------------------") | |
# if text_file is not None: | |
# df=pd.read_csv(text_file) | |
# dd_list=df.columns | |
# cat_cols=st.multiselect("Select the Categorical Columns",dd_list) | |
# num_cols=st.multiselect("Select the Numerical Columns",dd_list) | |
# Output_file=st.text_input('Enter Output File Name') | |
# s=st.number_input('Enter the Sample Size',1000) | |
# OP=Output_file + '.csv' | |
# sub=st.button('Submit') | |
# if sub: | |
# batch_size = 50 | |
# epochs = 3 | |
# learning_rate = 2e-4 | |
# beta_1 = 0.5 | |
# beta_2 = 0.9 | |
# ctgan_args = ModelParameters(batch_size=batch_size, | |
# lr=learning_rate, | |
# betas=(beta_1, beta_2)) | |
# train_args = TrainParameters(epochs=epochs) | |
# synth = RegularSynthesizer(modelname='ctgan', model_parameters=ctgan_args) | |
# synth.fit(data=df, train_arguments=train_args, num_cols=num_cols, cat_cols=cat_cols) | |
# df_syn = synth.sample(s) | |
# df_syn.to_csv(OP) | |
# c=getcwd() | |
# c=c + '/' + OP | |
# with open(c,"rb") as file: | |
# st.download_button(label=':blue[Download]',data=file,file_name=OP,mime="image/png") | |
# st.success("Thanks for using the app !!!") | |
# import torch | |
# import streamlit as st | |
# from transformers import AutoModelForCausalLM, AutoTokenizer | |
# #torch.set_default_device("cuda") | |
# model = AutoModelForCausalLM.from_pretrained("soulhq-ai/phi-2-insurance_qa-sft-lora", torch_dtype="auto", trust_remote_code=True) | |
# tokenizer = AutoTokenizer.from_pretrained("soulhq-ai/phi-2-insurance_qa-sft-lora", trust_remote_code=True) | |
# i=st.text_input('Prompt', 'Life of Brian') | |
# #inputs = tokenizer('''### Instruction: What Does Basic Homeowners Insurance Cover?\n### Response: ''', return_tensors="pt", return_attention_mask=False) | |
# inputs = tokenizer(i, return_tensors="pt", return_attention_mask=False) | |
# outputs = model.generate(**inputs, max_length=1024) | |
# text = tokenizer.batch_decode(outputs)[0] | |
# print(text) | |
# import torch | |
# import streamlit as st | |
# from transformers import AutoModelForSeq2SeqLM, AutoTokenizer | |
# model_name="facebook/blenderbot-400M-distill" | |
# model=AutoModelForSeq2SeqLM.from_pretrained(model_name) | |
# tokenizer = AutoTokenizer.from_pretrained(model_name) | |
# ch=[] | |
# def chat(): | |
# h_s="\n".join(ch) | |
# i=st.text_input("enter") | |
# i_s=tokenizer.encode_plus(h_s,i,return_tensors="pt") | |
# outputs=model.generate(**i_s,max_length=60) | |
# response=tokenizer.decode(outputs[0],skip_special_tokens=True).strip() | |
# ch.append(i) | |
# ch.append(response) | |
# return response | |
# if __name__ == "__main__": | |
# chat() | |
import streamlit as st | |
from streamlit_chat import message as st_message | |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
def get_models(): | |
# Load the model and the tokenizer | |
tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M") | |
model = AutoModelForSeq2SeqLM.from_pretrained( | |
"facebook/blenderbot_small-90M") | |
return tokenizer, model | |
if "history" not in st.session_state: | |
st.session_state.history = [] | |
st.title("Blenderbot") | |
def generate_answer(): | |
tokenizer, model = get_models() | |
user_message = st.session_state.input_text | |
inputs = tokenizer(st.session_state.input_text, return_tensors="pt") | |
result = model.generate(**inputs) | |
message_bot = tokenizer.decode( | |
result[0], skip_special_tokens=True | |
) # decode the result to a string | |
st.session_state.history.append({"message": user_message, "is_user": True}) | |
st.session_state.history.append({"message": message_bot, "is_user": False}) | |
st.text_input("Tap to chat with the bot", | |
key="input_text", on_change=generate_answer) | |
for chat in st.session_state.history: | |
st_message(**chat) |