Spaces:
Sleeping
Sleeping
File size: 4,808 Bytes
1fddaeb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 |
import streamlit as st
import langchain
import pandas as pd
import numpy as np
import os
import re
from langchain.chat_models import ChatOpenAI
import openai
from langchain import HuggingFaceHub, LLMChain, PromptTemplate
from langchain.memory import ConversationBufferWindowMemory
from langchain.chains import ConversationalRetrievalChain
trait_content_df=pd.read_csv(r'C:\codes\Dahila UI\AI Personality Chart trait_content.csv')
trait_content_df=trait_content_df.drop(0,axis=0)
trait_content_df.rename(columns={'Column 1':'Question','Column 2':'Options','Column 3':'Traits','Column 4':'Content'},inplace=True)
trait_content_df['Title'].fillna(method='ffill',inplace=True)
trait_content_df['Question'].fillna(method='ffill',inplace=True)
template = """
You are given options selected by user for a particular question indirectly related to the personality with the traits detected.
You task is to create a personalized dating app bio for the user, Don't Includes option in the answer use it as reference for answer generation. Limit the answer in not more than 100 words
{history}
Me:{human_input}
Jack:
"""
prompt = PromptTemplate(
input_variables=["history", "human_input"],
template=template
)
llm_chain = LLMChain(
llm = ChatOpenAI(temperature=0.0,model_name='gpt-3.5-turbo'),
prompt=prompt,
verbose=True,
memory=ConversationBufferWindowMemory(k=2)
)
def extract_text_from_html(html):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', html)
return cleantext.strip()
def conversational_chat(query, replacement_word=None):
hist_dict['past'].append(query)
output = llm_chain.predict(human_input=query)
hist_dict['generated'].append(output)
if replacement_word is not None:
# Use a regular expression with the re module for case-insensitive replacement
output = re.sub(r'\bjack\b', replacement_word, output, flags=re.IGNORECASE)
return extract_text_from_html(output)
hist_dict={}
hist_dict['generated']=["Hello ! Ask me anything about " + " 🤗"]
hist_dict['past'] = ["Hey ! 👋"]
os.environ["OPENAI_API_KEY"] ='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'
trait_content_df_org=pd.read_csv(r'C:\codes\Dahila UI\AI Personality Chart trait_content.csv')
trait_content_df_org=trait_content_df_org.drop(0,axis=0)
trait_content_df_org.rename(columns={'Column 1':'Question','Column 2':'Options','Column 3':'Traits','Column 4':'Content'},inplace=True)
def ui():
# Initialize a dictionary to store responses
responses = {}
# Create checkboxes for each question and options
index = 0
while index < len(trait_content_df_org):
question = trait_content_df_org.iloc[index]["Question"]
st.write(question)
option_a = st.checkbox(f"Option A: {trait_content_df_org.iloc[index]['Options']}", key=f"option_a_{index}")
# Check if Option B has a corresponding question (not None)
if trait_content_df_org.iloc[index + 1]["Question"] is not None:
option_b = st.checkbox(f"Option B: {trait_content_df_org.iloc[index + 1]['Options']}", key=f"option_b_{index + 1}")
else:
option_b = False
st.write("") # Add some spacing between questions
# Store responses in the dictionary
if option_a:
responses[question] = f"{trait_content_df_org.iloc[index]['Options']}"
if option_b:
responses[question] = f"{trait_content_df_org.iloc[index + 1]['Options']}"
index += 2 # Move to the next question and options (skipping None)
st.write("Responses:")
for question, selected_option in responses.items():
st.write(question)
st.write(selected_option)
# Generate a prompt based on selected options
selected_traits = [responses[question] for question in responses]
options_list = []
traits_list = []
content_list = []
for trait_str in selected_traits:
matching_rows = trait_content_df_org[trait_content_df_org["Options"] == trait_str]
if not matching_rows.empty:
options_list.append(matching_rows["Options"].values[0])
traits_list.append(matching_rows["Traits"].values[0])
content_list.append(matching_rows["Content"].values[0])
prompt = f"Options selected are {', '.join(options_list)}. The following are Traits {{{', '.join(traits_list)}}}, and the content for the options is {', '.join(content_list)}"
# Display user input field
name_input = st.text_input("Enter your name:")
# Add a submit button
if st.button("Submit"):
# Generate a chatbot response
bio = conversational_chat(prompt, name_input)
st.write(bio)
if __name__=='__main__':
ui()
|