date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | navhealth/llm-medicaid-eligibility | html_to_python_combine.py | if __name__ == "__main__":
from utils import create_llm
llm = create_llm()
category_urls = [
"https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/individual-adults",
"https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/parents-and-caretakers",
"https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/pregnant-individuals",
# "https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/children",
# "https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/noncitizens",
"https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/aged-blind-or-disabled",
# "https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/age-65-and-older-or-medicare-eligible",
# "https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/foster-care",
# "https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/long-term-care-and-hospice",
# "https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/medicare-savings-program",
]
from html_to_python_clean_example import url_to_python
python_snippets_joined = "\n\n".join(
url_to_python(url, llm=llm) for url in category_urls
)
from langchain.prompts import PromptTemplate
from langchain.chains import ConversationChain
conversational_llm_prompt = "The following is a conversation between a human and an AI. The AI is an expert on Medicaid eligibility and is able to write quality Python code. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n{history}\nHuman: {input}\nAI:"
conversational_llm = ConversationChain(
llm=llm,
prompt=PromptTemplate(
input_variables=["history", "input"], template=conversational_llm_prompt
),
)
prompts = [
f"""Below are {len(category_urls)} Python code snippets. Combine all the snippets into a single Python program that determines whether the user is eligible for Medicaid. The code should not only executable but should also accurately encode eligibility rules for all programs. The code should incorporate all of the above code snippets. Write the code to determine whether a user is eligible for any of the above Medicaid programs:
{python_snippets_joined}""",
"Ensure that the code incorporates all information from the income tables, and not just a single value. Re-write the code: ",
]
for prompt in prompts:
output = conversational_llm({"input": prompt})["response"]
print(conversational_llm.memory.chat_memory.messages[-2].content)
print("=====================================================")
print(output)
print("=====================================================")
| [
"['Below are 1 Python code snippets. Combine all the snippets into a single Python program that determines whether the user is eligible for Medicaid. The code should not only executable but should also accurately encode eligibility rules for all programs. The code should incorporate all of the above code snippets. Write the code to determine whether a user is eligible for any of the above Medicaid programs: \\n\\nPLACEHOLDER', 'Ensure that the code incorporates all information from the income tables, and not just a single value. Re-write the code: ']",
"The following is a conversation between a human and an AI. The AI is an expert on Medicaid eligibility and is able to write quality Python code. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n{history}\nHuman: {input}\nAI:"
] |
2024-01-10 | navhealth/llm-medicaid-eligibility | html_to_text_to_python_combine.py | if __name__ == "__main__":
from utils import create_llm
llm = create_llm()
category_urls = [
"https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/individual-adults",
"https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/parents-and-caretakers",
"https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/pregnant-individuals",
# "https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/children",
# "https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/noncitizens",
"https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/aged-blind-or-disabled",
# "https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/age-65-and-older-or-medicare-eligible",
# "https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/foster-care",
# "https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/long-term-care-and-hospice",
# "https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/medicare-savings-program",
]
from html_to_text_to_python import url_to_python
python_snippets_joined = "\n\n".join(
url_to_python(url, llm=llm) for url in category_urls
)
from langchain.prompts import PromptTemplate
from langchain.chains import ConversationChain
conversational_llm_prompt = "The following is a conversation between a human and an AI. The AI is an expert on Medicaid eligibility and is able to write quality Python code. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n{history}\nHuman: {input}\nAI:"
conversational_llm = ConversationChain(
llm=llm,
prompt=PromptTemplate(
input_variables=["history", "input"], template=conversational_llm_prompt
),
)
prompts = [
f"""Below are {len(category_urls)} Python code snippets. Combine all the snippets into a single Python program that determines whether the user is eligible for Medicaid. The code should not only executable but should also accurately encode eligibility rules for all programs. The code should incorporate all of the above code snippets. Write the code to determine whether a user is eligible for any of the above Medicaid programs:
{python_snippets_joined}""",
"Ensure that the code incorporates all information from the income tables, and not just a single value. Re-write the code: ",
]
for prompt in prompts:
output = conversational_llm({"input": prompt})["response"]
print(conversational_llm.memory.chat_memory.messages[-2].content)
print("=====================================================")
print(output)
print("=====================================================")
| [
"['Below are 1 Python code snippets. Combine all the snippets into a single Python program that determines whether the user is eligible for Medicaid. The code should not only executable but should also accurately encode eligibility rules for all programs. The code should incorporate all of the above code snippets. Write the code to determine whether a user is eligible for any of the above Medicaid programs: \\n\\nPLACEHOLDER', 'Ensure that the code incorporates all information from the income tables, and not just a single value. Re-write the code: ']",
"The following is a conversation between a human and an AI. The AI is an expert on Medicaid eligibility and is able to write quality Python code. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n{history}\nHuman: {input}\nAI:"
] |
2024-01-10 | navhealth/llm-medicaid-eligibility | html_to_text_combine_to_python.py | if __name__ == "__main__":
from utils import create_llm
llm = create_llm()
category_urls = [
"https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/individual-adults",
"https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/parents-and-caretakers",
"https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/pregnant-individuals",
# "https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/children",
# "https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/noncitizens",
"https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/aged-blind-or-disabled",
# "https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/age-65-and-older-or-medicare-eligible",
# "https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/foster-care",
# "https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/long-term-care-and-hospice",
# "https://www.hca.wa.gov/free-or-low-cost-health-care/i-need-medical-dental-or-vision-care/medicare-savings-program",
]
from html_to_text_to_python import url_to_rules
rules_joined = "\n\n".join(url_to_rules(url, llm=llm) for url in category_urls)
from langchain.prompts import PromptTemplate
from langchain.chains import ConversationChain
conversational_llm_prompt = "The following is a conversation between a human and an AI. The AI is an expert on Medicaid eligibility and is able to write quality Python code. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n{history}\nHuman: {input}\nAI:"
conversational_llm = ConversationChain(
llm=llm,
prompt=PromptTemplate(
input_variables=["history", "input"], template=conversational_llm_prompt
),
)
prompts = [
f"""Below are descriptions of eligibility requirements for {len(category_urls)} different categories of Medicaid beneficiaries. Using all these of eligibility requirements, write a single Python program that determines whether the user is eligible for any Medicaid program. The Python program should prompt the user with questions and use their responses to determine if they are eligible:
{rules_joined}""",
"Ensure that the code incorporates all information from the income tables, and not just a single value. Re-write the code: ",
]
for prompt in prompts:
output = conversational_llm({"input": prompt})["response"]
print(conversational_llm.memory.chat_memory.messages[-2].content)
print("=====================================================")
print(output)
print("=====================================================")
| [
"['Below are descriptions of eligibility requirements for 1 different categories of Medicaid beneficiaries. Using all these of eligibility requirements, write a single Python program that determines whether the user is eligible for any Medicaid program. The Python program should prompt the user with questions and use their responses to determine if they are eligible: \\n\\nPLACEHOLDER', 'Ensure that the code incorporates all information from the income tables, and not just a single value. Re-write the code: ']",
"The following is a conversation between a human and an AI. The AI is an expert on Medicaid eligibility and is able to write quality Python code. If the AI does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n{history}\nHuman: {input}\nAI:"
] |
2024-01-10 | twahidin/workshop_final_bot | kb_module.py | import streamlit as st
import sqlite3
import streamlit_antd_components as sac
import pandas as pd
import os
import openai
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.document_loaders import UnstructuredFileLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import LanceDB
from authenticate import return_api_key
import lancedb
import pickle
import configparser
import ast
class ConfigHandler:
def __init__(self):
self.config = configparser.ConfigParser()
self.config.read('config.ini')
def get_config_values(self, section, key):
value = self.config.get(section, key)
try:
# Try converting the string value to a Python data structure
return ast.literal_eval(value)
except (SyntaxError, ValueError):
# If not a data structure, return the plain string
return value
config_handler = ConfigHandler()
TCH = config_handler.get_config_values('constants', 'TCH')
STU = config_handler.get_config_values('constants', 'STU')
SA = config_handler.get_config_values('constants', 'SA')
AD = config_handler.get_config_values('constants', 'AD')
# Create or check for the 'database' directory in the current working directory
cwd = os.getcwd()
WORKING_DIRECTORY = os.path.join(cwd, "database")
if not os.path.exists(WORKING_DIRECTORY):
os.makedirs(WORKING_DIRECTORY)
if st.secrets["sql_ext_path"] == "None":
WORKING_DATABASE= os.path.join(WORKING_DIRECTORY , st.secrets["default_db"])
else:
WORKING_DATABASE= st.secrets["sql_ext_path"]
def fetch_vectorstores_with_usernames():
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
query = '''
SELECT
Vector_Stores.vs_id,
Subject.subject_name,
Topic.topic_name,
Vector_Stores.vectorstore_name,
Users.username,
Vector_Stores.sharing_enabled
FROM Vector_Stores
JOIN Users ON Vector_Stores.user_id = Users.user_id
LEFT JOIN Subject ON Vector_Stores.subject = Subject.id
LEFT JOIN Topic ON Vector_Stores.topic = Topic.id;
'''
cursor.execute(query)
data = cursor.fetchall()
conn.close()
return data
def display_vectorstores():
data = fetch_vectorstores_with_usernames()
df = pd.DataFrame(data, columns=["vs_id", "subject_name", "topic_name", "vectorstore_name", "username", "sharing_enabled"])
# Convert the 'sharing_enabled' values
df["sharing_enabled"] = df["sharing_enabled"].apply(lambda x: '✔' if x == 1 else '')
st.dataframe(
df,
use_container_width=True,
column_order=["vs_id", "subject_name", "topic_name", "vectorstore_name", "username", "sharing_enabled"]
)
def fetch_all_files():
"""
Fetch all files either shared or based on user type
"""
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Construct the SQL query with JOINs for Subject, Topic, and Users tables
if st.session_state.user['profile_id'] == 'SA':
cursor.execute('''
SELECT Files.file_id, Files.file_name, Subject.subject_name, Topic.topic_name, Users.username
FROM Files
JOIN Subject ON Files.subject = Subject.id
JOIN Topic ON Files.topic = Topic.id
JOIN Users ON Files.user_id = Users.user_id
''')
else:
cursor.execute('''
SELECT Files.file_id, Files.file_name, Subject.subject_name, Topic.topic_name, Users.username
FROM Files
JOIN Subject ON Files.subject = Subject.id
JOIN Topic ON Files.topic = Topic.id
JOIN Users ON Files.user_id = Users.user_id
WHERE Files.sharing_enabled = 1
''')
files = cursor.fetchall()
formatted_files = [f"({file[0]}) {file[1]} ({file[4]})" for file in files]
conn.close()
return formatted_files
def fetch_file_data(file_id):
"""
Fetch file data given a file id
"""
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
cursor.execute("SELECT data, metadata FROM Files WHERE file_id = ?", (file_id,))
data = cursor.fetchone()
conn.close()
if data:
return data[0], data[1]
else:
return None, None
def insert_topic(org_id, topic_name):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
try:
cursor.execute('INSERT INTO Topic (org_id, topic_name) VALUES (?, ?);', (org_id, topic_name))
conn.commit()
return True # Indicates successful insertion
except sqlite3.IntegrityError:
# IntegrityError occurs if topic_name is not unique within the org
return False # Indicates topic_name is not unique within the org
finally:
conn.close()
def insert_subject(org_id, subject_name):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
try:
cursor.execute('INSERT INTO Subject (org_id, subject_name) VALUES (?, ?);', (org_id, subject_name))
conn.commit()
return True # Indicates successful insertion
except sqlite3.IntegrityError:
# IntegrityError occurs if subject_name is not unique within the org
return False # Indicates subject_name is not unique within the org
finally:
conn.close()
def select_organization():
with sqlite3.connect(WORKING_DATABASE) as conn:
cursor = conn.cursor()
# Org selection
org_query = "SELECT org_name FROM Organizations"
cursor.execute(org_query)
orgs = cursor.fetchall()
org_names = [org[0] for org in orgs]
# Use a Streamlit selectbox to choose an organization
selected_org_name = st.selectbox("Select an organization:", org_names)
# Retrieve the org_id for the selected organization
cursor.execute('SELECT org_id FROM Organizations WHERE org_name = ?;', (selected_org_name,))
result = cursor.fetchone()
if result:
org_id = result[0]
st.write(f"The org_id for {selected_org_name} is {org_id}.")
return org_id
else:
st.write(f"Organization '{selected_org_name}' not found in the database.")
return None
def fetch_subjects_by_org(org_id):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Check if the user is a super_admin (org_id is 0)
if org_id == 0:
cursor.execute('SELECT * FROM Subject;')
else:
cursor.execute('SELECT * FROM Subject WHERE org_id = ?;', (org_id,))
subjects = cursor.fetchall()
conn.close()
return subjects
def fetch_topics_by_org(org_id):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Check if the user is a super_admin (org_id is 0)
if org_id == 0:
cursor.execute('SELECT * FROM Topic;')
else:
cursor.execute('SELECT * FROM Topic WHERE org_id = ?;', (org_id,))
topics = cursor.fetchall()
conn.close()
return topics
def split_docs(file_path,meta):
#def split_meta_docs(file, source, tch_code):
loader = UnstructuredFileLoader(file_path)
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
metadata = {"source": meta}
for doc in docs:
doc.metadata.update(metadata)
return docs
def create_lancedb_table(embeddings, meta, table_name):
lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb")
# LanceDB connection
db = lancedb.connect(lancedb_path)
table = db.create_table(
f"{table_name}",
data=[
{
"vector": embeddings.embed_query("Query Unsuccessful"),
"text": "Query Unsuccessful",
"id": "1",
"source": f"{meta}"
}
],
mode="overwrite",
)
return table
def save_to_vectorstores(vs, vstore_input_name, subject, topic, username, share_resource=False):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Fetch the user's details
cursor.execute('SELECT user_id FROM Users WHERE username = ?', (username,))
user_details = cursor.fetchone()
if not user_details:
st.error("Error: User not found.")
return
user_id = user_details[0]
# If Vector_Store instance exists in session state, then serialize and save
if vs:
serialized_db = pickle.dumps(vs)
# Check if the entry already exists
cursor.execute('SELECT 1 FROM Vector_Stores WHERE vectorstore_name LIKE ? AND user_id = ?', (f"%{vstore_input_name}%", user_id))
exists = cursor.fetchone()
if exists:
st.error("Error: An entry with the same vectorstore_name and user_id already exists.")
return
if subject is None:
st.error("Error: Subject is missing.")
return
if topic is None:
st.error("Error: Topic is missing.")
return
# Get the subject and topic IDs
cursor.execute('SELECT id FROM Subject WHERE subject_name = ?', (subject,))
subject_id = cursor.fetchone()[0]
cursor.execute('SELECT id FROM Topic WHERE topic_name = ?', (topic,))
topic_id = cursor.fetchone()[0]
# Insert the new row
cursor.execute('''
INSERT INTO Vector_Stores (vectorstore_name, data, user_id, subject, topic, sharing_enabled)
VALUES (?, ?, ?, ?, ?, ?)
''', (vstore_input_name, serialized_db, user_id, subject_id, topic_id, share_resource))
conn.commit()
conn.close()
def create_vectorstore():
openai.api_key = return_api_key()
os.environ["OPENAI_API_KEY"] = return_api_key()
full_docs = []
st.subheader("Enter the topic and subject for your knowledge base")
embeddings = OpenAIEmbeddings()
if st.session_state.user['profile_id'] == SA:
org_id = select_organization()
if org_id is None:
return
else:
org_id = st.session_state.user["org_id"]
# Fetch all available subjects
subjects = fetch_subjects_by_org(st.session_state.user["org_id"])
subject_names = [sub[2] for sub in subjects] # Assuming index 2 holds the subject_name
selected_subject = st.selectbox("Select an existing subject or type a new one:", options=subject_names + ['New Subject'])
if selected_subject == 'New Subject':
subject = st.text_input("Please enter the new subject name:", max_chars=30)
if subject:
insert_subject(org_id, subject)
else:
subject = selected_subject
# Fetch all available topics
topics = fetch_topics_by_org(st.session_state.user["org_id"])
topic_names = [topic[2] for topic in topics] # Assuming index 2 holds the topic_name
selected_topic = st.selectbox("Select an existing topic or type a new one:", options=topic_names + ['New Topic'])
if selected_topic == 'New Topic':
topic = st.text_input("Please enter the new topic name:", max_chars=30)
if topic:
insert_topic(org_id, topic)
else:
topic = selected_topic
vectorstore_input = st.text_input("Please type in a name for your knowledge base:", max_chars=20)
vs_name = vectorstore_input + f"_({st.session_state.user['username']})"
share_resource = st.checkbox("Share this resource", value=True) # <-- Added this line
# Show the current build of files for the latest database
st.subheader("Select one or more files to build your knowledge base")
files = fetch_all_files()
if files:
selected_files = sac.transfer(items=files, label=None, index=None, titles=['Uploaded files', 'Select files for KB'], format_func='title', width='100%', height=None, search=True, pagination=False, oneway=False, reload=True, disabled=False, return_index=False)
# Alert to confirm the creation of knowledge base
st.warning("Building your knowledge base will take some time. Please be patient.")
build = sac.buttons([
dict(label='Build VectorStore', icon='check-circle-fill', color = 'green'),
dict(label='Cancel', icon='x-circle-fill', color='red'),
], label=None, index=1, format_func='title', align='center', position='top', size='default', direction='horizontal', shape='round', type='default', compact=False, return_index=False)
if build == 'Build VectorStore' and selected_files:
for s_file in selected_files:
file_id = int(s_file.split("(", 1)[1].split(")", 1)[0])
file_data, meta = fetch_file_data(file_id)
docs = split_docs(file_data, meta)
full_docs.extend(docs)
db = LanceDB.from_documents(full_docs, OpenAIEmbeddings(), connection=create_lancedb_table(embeddings, meta, vs_name))
save_to_vectorstores(db, vs_name, subject, topic, st.session_state.user["username"], share_resource) # Passing the share_resource to the function
st.success("Knowledge Base loaded")
else:
st.write("No files found in the database.")
def delete_lancedb_table(table_name):
lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb")
# LanceDB connection
db = lancedb.connect(lancedb_path)
db.drop_table(f"{table_name}")
def fetch_vectorstores_by_user_id(user_id):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Fetch vectorstores based on user_id
cursor.execute('SELECT vectorstore_name FROM Vector_Stores WHERE user_id = ?;', (user_id,))
vectorstores = cursor.fetchall()
conn.close()
return vectorstores
def delete_vectorstores():
st.subheader("Delete VectorStores in Database:")
user_vectorstores = fetch_vectorstores_by_user_id(st.session_state.user["id"])
if user_vectorstores:
vectorstore_names = [vs[0] for vs in user_vectorstores]
selected_vectorstores = st.multiselect("Select vectorstores to delete:", options=vectorstore_names)
confirm_delete = st.checkbox("I understand that this action cannot be undone.", value=False)
if st.button("Delete VectorStore"):
if confirm_delete and selected_vectorstores:
delete_vectorstores_from_db(selected_vectorstores, st.session_state.user["id"], st.session_state.user["profile_id"])
st.success(f"Deleted {len(selected_vectorstores)} vectorstores.")
else:
st.warning("Please confirm the deletion action.")
else:
st.write("No vectorstores found in the database.")
def delete_vectorstores_from_db(vectorstore_names, user_id, profile):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
for vectorstore_name in vectorstore_names:
if profile in ['SA', 'AD']:
# Delete the corresponding LanceDB table
delete_lancedb_table(vectorstore_name)
# Delete vectorstore irrespective of the user_id associated with them
cursor.execute('DELETE FROM Vector_Stores WHERE vectorstore_name=?;', (vectorstore_name,))
else:
# Delete the corresponding LanceDB table
delete_lancedb_table(vectorstore_name)
# Delete only if the user_id matches
cursor.execute('DELETE FROM Vector_Stores WHERE vectorstore_name=? AND user_id=?;', (vectorstore_name, user_id))
# Check if the row was affected
if cursor.rowcount == 0:
st.error(f"Unable to delete vectorstore '{vectorstore_name}' that is not owned by you.")
conn.commit() # Commit the changes
conn.close() # Close the connection
| [] |
2024-01-10 | twahidin/workshop_final_bot | analytics_dashboard.py | import streamlit as st
from langchain.chat_models import ChatOpenAI
import pandas as pd
from authenticate import return_api_key
import openai
from pandasai import SmartDataframe
from pandasai.llm.openai import OpenAI
import matplotlib.pyplot as plt
import configparser
import ast
import os
from class_dash import fetch_data_by_username, fetch_data_by_school, fetch_data_by_sa
# Create or check for the 'database' directory in the current working directory
cwd = os.getcwd()
WORKING_DIRECTORY = os.path.join(cwd, "database")
if not os.path.exists(WORKING_DIRECTORY):
os.makedirs(WORKING_DIRECTORY)
if st.secrets["sql_ext_path"] == "None":
WORKING_DATABASE= os.path.join(WORKING_DIRECTORY , st.secrets["default_db"])
else:
WORKING_DATABASE= st.secrets["sql_ext_path"]
class ConfigHandler:
def __init__(self):
self.config = configparser.ConfigParser()
self.config.read('config.ini')
def get_config_values(self, section, key):
value = self.config.get(section, key)
try:
# Try converting the string value to a Python data structure
return ast.literal_eval(value)
except (SyntaxError, ValueError):
# If not a data structure, return the plain string
return value
config_handler = ConfigHandler()
SA = config_handler.get_config_values('constants', 'SA')
AD = config_handler.get_config_values('constants', 'AD')
def download_data(user_id, sch_id, profile):
if profile == SA:#super admin
data, columns = fetch_data_by_sa(sch_id)
elif profile == AD:#administrator or super admin
data, columns = fetch_data_by_school(sch_id)
else:
data, columns = fetch_data_by_username(user_id)
df = pd.DataFrame(data, columns=columns)
return df
# PandasAI- A smart agent that can do visual analytics
def pandas_ai(user_id, sch_id, profile):
openai.api_key = return_api_key()
os.environ["OPENAI_API_KEY"] = return_api_key()
# Upload CSV file using st.file_uploader
uploaded_file = st.file_uploader("Choose a CSV file", type="csv")
if "api_key" not in st.session_state:
st.session_state.api_key = return_api_key()
st.write("API key: ", st.session_state.api_key)
st.session_state.prompt_history = []
st.session_state.df = None
if uploaded_file is not None:
try:
df = pd.read_csv(uploaded_file)
st.session_state.df = df
except Exception as e:
st.write("There was an error processing the CSV file.")
st.write(e)
else:
st.session_state.df = download_data(user_id, sch_id, profile)
chart_dir = os.path.join("exports/charts")
with st.form("Question"):
question = st.text_input("Question", value="", type="default")
submitted = st.form_submit_button("Submit")
if submitted:
# Check if the file exists and remove it
chart_path = os.path.join("exports/charts", "temp_chart.png")
if os.path.exists(chart_path):
os.remove(chart_path)
with st.spinner():
llm =OpenAI(api_token=st.session_state.api_key)
df = SmartDataframe(
st.session_state.df,
config={
"llm": llm,
"save_charts_path": chart_dir,
"save_charts": True,
"verbose": True,
},
)
response = df.chat(
question
) # Using 'chat' method based on your context
# Display the textual response (if any):
if response:
st.write(response)
if os.path.exists(chart_path):
st.image(
chart_path, caption="Generated Chart", use_column_width=True
)
# Append the question to the history:
st.session_state.prompt_history.append(question)
if st.session_state.df is not None:
st.subheader("Data Table:")
st.write(st.session_state.df)
st.subheader("Prompt history:")
st.write(st.session_state.prompt_history)
if st.button("Clear"):
st.session_state.prompt_history = []
st.session_state.df = None | [] |
2024-01-10 | twahidin/workshop_final_bot | prototype_application.py | import streamlit as st
from main_bot import basebot
from kb_module import display_vectorstores
from users_module import vectorstore_selection_interface
from datetime import datetime
from main_bot import insert_into_data_table
import openai
import os
from authenticate import return_api_key
from datetime import datetime
from langchain.memory import ConversationSummaryBufferMemory
from langchain.memory import ConversationBufferWindowMemory
from langchain.chat_models import ChatOpenAI
# if "form_title" not in st.session_state:
# st.session_state.form_title = "Message Generator"
# if "question_1" not in st.session_state:
# st.session_state.question_1 = "Name"
# if "question_2" not in st.session_state:
# st.session_state.question_2 = "Occupation"
# if "question_3" not in st.session_state:
# st.session_state.question_3 = "Subject"
# if "question_4" not in st.session_state:
# st.session_state.question_4 = "Message"
# if "question_5" not in st.session_state:
# st.session_state.question_5 = "Number of words"
# if "my_form_template" not in st.session_state:
# st.session_state.my_form_template = "To help you write your email, You may refer to this resources to answer your query,{resource},{source}"
# if "my_app_template" not in st.session_state:
# st.session_state.my_app_template = "Pretend you are a {q2}, your name is {q1}, I want you to write an email on {q4} on the subject {q3} , the number of words is {q5}"
# if "my_app_template_advance" not in st.session_state:
# st.session_state.my_app_template_advance = """Pretend you are a helpful assistant, Use the following pieces of context to answer the question at the end.
# If you don't know the answer, just say that you don't know, don't try to make up an answer. Search Result: {resource}, {source}.
# History of conversation: {mem}.You must quote the source of the Search Result if you are using the search result as part of the answer"""
def form_input():
with st.form("my_form"):
st.subheader(st.session_state.form_title)
q1 = st.text_input(st.session_state.question_1)
q2 = st.text_input(st.session_state.question_2)
q3 = st.text_input(st.session_state.question_3)
q4 = st.text_input(st.session_state.question_4)
q5 = st.text_input(st.session_state.question_5)
# Every form must have a submit button.
submitted = st.form_submit_button("Submit")
if submitted:
return q1, q2, q3, q4, q5
return False
def form_settings():
title = st.text_input("Form Title", value=st.session_state.form_title)
question_1 = st.text_input("Question 1:", value=st.session_state.question_1)
question_2 = st.text_input("Question 2:", value=st.session_state.question_2)
question_3 = st.text_input("Question 3:", value=st.session_state.question_3)
question_4 = st.text_input("Question 4:", value=st.session_state.question_4)
question_5 = st.text_input("Question 5:", value=st.session_state.question_5)
if st.button("Update Questions"):
st.session_state.form_title = title
st.session_state.question_1 = question_1
st.session_state.question_2 = question_2
st.session_state.question_3 = question_3
st.session_state.question_4 = question_4
st.session_state.question_5 = question_5
def chatbot_settings():
temp = st.number_input("Temperature", value=st.session_state.temp, min_value=0.0, max_value=1.0, step=0.1)
k_memory = st.number_input("K Memory", value=st.session_state.k_memory, min_value=0, max_value=4, step=1)
presence_penalty = st.number_input("Presence Penalty", value=st.session_state.presence_penalty, min_value=-2.0, max_value=2.0, step=0.1)
frequency_penalty = st.number_input("Frequency Penalty", value=st.session_state.frequency_penalty, min_value=-2.0, max_value=2.0, step=0.1)
if st.button("Update Chatbot Settings", key = 1):
st.session_state.temp = temp
st.session_state.k_memory = k_memory
st.session_state.presence_penalty = presence_penalty
st.session_state.frequency_penalty = frequency_penalty
def prompt_template_settings():
st.info("You can use the following variables which is link to your first 5 questions in your form prompt inputs: {q1}, {q2}, {q3}, {q4}, {q5}")
form_input = st.text_area("Enter your form prompt:", value = st.session_state.my_app_template, height=300 )
st.info("Enter your app prompt template here, you can add the following variables: {source}, {resource} ")
prompt_template = st.text_area("Enter your application prompt design", value = st.session_state.my_form_template, height=300)
if st.button("Update Prompt Template", key = 2):
st.session_state.my_app_template = form_input
st.session_state.my_form_template = prompt_template
def advance_prompt_template_settings():
st.info("You can use the following variables in your prompt template: {mem}, {source}, {resource}")
prompt_template = st.text_area("Enter your prompt template here:", value = st.session_state.my_app_template_advance, height=300)
if st.button("Update Prompt Template"):
st.session_state.my_app_template_advance = prompt_template
def advance_prompt_template(memory, source, resource):
text = st.session_state.my_app_template_advance
return text.format( mem=memory, source=source, resource=resource)
def prompt_template(results):
text = st.session_state.my_app_template
return text.format(q1=results[0], q2=results[1], q3=results[2], q4=results[3], q5=results[4])
def form_template(source, resource):
text = st.session_state.my_form_template
return text.format(source=source, resource=resource)
def my_first_app(bot_name):
st.subheader("Protyping a chatbot")
with st.expander("Prototype Settings"):
st.write("Current Form Template: ", st.session_state.my_form_template)
st.write("Current Prompt Template: ", st.session_state.my_app_template)
results = ""
results = form_input()
if results != False:
form_output = prompt_template(results)
basic_bot(form_output , bot_name)
def my_first_app_advance(bot_name):
st.subheader("Protyping a chatbot")
with st.expander("Prototype Settings"):
st.write("Current Prompt Template: ", st.session_state.my_app_template_advance)
prototype_advance_bot(bot_name)
def prototype_settings():
tab1, tab2, tab3, tab4 = st.tabs(["Prototype Input Settings", "Template settings", "Prototype Chatbot Settings", "KB settings"])
with tab1:
st.subheader("Basic Prototype Chatbot template Settings")
form_settings()
prompt_template_settings()
with tab2:
st.subheader("Advance Prototype Chatbot Template settings")
advance_prompt_template_settings()
with tab3:
st.subheader("Prototype Chatbot Settings")
chatbot_settings()
with tab4:
st.subheader("KB settings")
st.write("KB settings")
display_vectorstores()
vectorstore_selection_interface(st.session_state.user['id'])
#below ------------------------------ QA base bot , K=2 memory for short term memory---------------------------------------------
#using the query from lanceDB and vector store , combine with memory
def prompt_template_prototype(prompt):
#st.write(type(st.session_state.vs))
if st.session_state.vs:
docs = st.session_state.vs.similarity_search(prompt)
resource = docs[0].page_content
source = docs[0].metadata
else:
source = ""
resource = ""
if "memory" not in st.session_state:
st.session_state.memory = ConversationBufferWindowMemory(k=st.session_state.k_memory)
mem = st.session_state.memory.load_memory_variables({})
#st.write(resource)
prompt = advance_prompt_template(mem, source, resource)
return prompt
#chat completion memory for streamlit using memory buffer
def chat_completion_prototype(prompt):
openai.api_key = return_api_key()
os.environ["OPENAI_API_KEY"] = return_api_key()
prompt_template = prompt_template_prototype(prompt)
response = openai.ChatCompletion.create(
model=st.session_state.openai_model,
messages=[
{"role": "system", "content":prompt_template },
{"role": "user", "content": prompt},
],
temperature=st.session_state.temp, #settings option
presence_penalty=st.session_state.presence_penalty, #settings option
frequency_penalty=st.session_state.frequency_penalty, #settings option
stream=True #settings option
)
return response
#integration API call into streamlit chat components with memory and qa
def prototype_advance_bot(bot_name):
greetings_str = f"Hi, I am {bot_name}"
help_str = "How can I help you today?"
# Check if st.session_state.msg exists, and if not, initialize with greeting and help messages
if 'msg' not in st.session_state:
st.session_state.msg = [
{"role": "assistant", "content": greetings_str},
{"role": "assistant", "content": help_str}
]
elif st.session_state.msg == []:
st.session_state.msg = [
{"role": "assistant", "content": greetings_str},
{"role": "assistant", "content": help_str}
]
for message in st.session_state.msg:
with st.chat_message(message["role"]):
st.markdown(message["content"])
try:
if prompt := st.chat_input("Enter your query"):
st.session_state.msg.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in chat_completion_prototype(prompt):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
#Response Rating
st.session_state.msg.append({"role": "assistant", "content": full_response})
st.session_state["memory"].save_context({"input": prompt},{"output": full_response})
# Insert data into the table
now = datetime.now() # Using ISO format for date
num_tokens = len(full_response + prompt)*1.3
#st.write(num_tokens)
insert_into_data_table(now.strftime("%d/%m/%Y %H:%M:%S"), full_response, prompt, num_tokens, bot_name)
except Exception as e:
st.exception(e)
#chat completion memory for streamlit using memory buffer
def template_prompt(prompt, prompt_template):
openai.api_key = return_api_key()
os.environ["OPENAI_API_KEY"] = return_api_key()
response = openai.ChatCompletion.create(
model=st.session_state.openai_model,
messages=[
{"role": "system", "content":prompt_template},
{"role": "user", "content": prompt},
],
temperature=st.session_state.temp, #settings option
presence_penalty=st.session_state.presence_penalty, #settings option
frequency_penalty=st.session_state.frequency_penalty, #settings option
stream=True #settings option
)
return response
def basic_bot(prompt, bot_name):
try:
if prompt:
if "memory" not in st.session_state:
st.session_state.memory = ConversationBufferWindowMemory(k=st.session_state.k_memory)
st.session_state.msg.append({"role": "user", "content": prompt})
message_placeholder = st.empty()
#check if there is any knowledge base
if st.session_state.vs:
docs = st.session_state.vs.similarity_search(prompt)
resource = docs[0].page_content
source = docs[0].metadata
else:
resource = ""
source = ""
st.session_state.my_form_template = form_template(source, resource)
full_response = ""
for response in template_prompt(prompt, st.session_state.my_form_template):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.msg.append({"role": "assistant", "content": full_response})
st.session_state["memory"].save_context({"input": prompt},{"output": full_response})
# This is to send the lesson_plan to the lesson design map
st.session_state.lesson_plan = full_response
# Insert data into the table
now = datetime.now() # Using ISO format for date
num_tokens = len(full_response + prompt)*1.3
#st.write(num_tokens)
insert_into_data_table(now.strftime("%d/%m/%Y %H:%M:%S"), full_response, prompt, num_tokens, bot_name)
except Exception as e:
st.error(e)
| [
"Enter your prompt template here:",
"Enter your application prompt design"
] |
2024-01-10 | twahidin/workshop_final_bot | main_bot.py | import streamlit as st
import openai
import sqlite3
from authenticate import return_api_key
from datetime import datetime
from langchain.memory import ConversationSummaryBufferMemory
from langchain.memory import ConversationBufferWindowMemory
from langchain.chat_models import ChatOpenAI
import streamlit_antd_components as sac
from k_map import (
map_prompter_with_plantuml,
generate_plantuml_mindmap,
render_diagram
)
import configparser
import os
config = configparser.ConfigParser()
config.read('config.ini')
NEW_PLAN = config['constants']['NEW_PLAN']
FEEDBACK_PLAN = config['constants']['FEEDBACK_PLAN']
PERSONAL_PROMPT = config['constants']['PERSONAL_PROMPT']
DEFAULT_TEXT = config['constants']['DEFAULT_TEXT']
# Create or check for the 'database' directory in the current working directory
cwd = os.getcwd()
WORKING_DIRECTORY = os.path.join(cwd, "database")
if not os.path.exists(WORKING_DIRECTORY):
os.makedirs(WORKING_DIRECTORY)
if st.secrets["sql_ext_path"] == "None":
WORKING_DATABASE= os.path.join(WORKING_DIRECTORY , st.secrets["default_db"])
else:
WORKING_DATABASE= st.secrets["sql_ext_path"]
def set_chat_prompts(dict_buttons, key):
# Extract values from the dictionary and store in a list
button_labels = [dict_buttons.get(f"sent_{i+1}", "disabled") for i in range(5)]
# Create button items using the extracted labels
button_items = [sac.ButtonsItem(label=label) for label in button_labels]
str = sac.buttons(button_items, index=None, format_func='title', align='left', size='small', key=key)
if str:
return str
def metacognitive_prompter(full_response):
with st.status("Generating visuals..."):
input = map_prompter_with_plantuml(full_response)
uml = generate_plantuml_mindmap(input)
image = render_diagram(uml)
st.image(image, use_column_width=True)
#input = map_prompter_with_mermaid_syntax(full_response)
#generate_mindmap(input)
#response rating component
def rating_component():
rating_value = sac.rate(label='Response ratings:', position='left', clear=True, value=2.0, align='left', size=15, color='#25C3B0')
return rating_value
def insert_into_data_table(date, chatbot_ans,user_prompt, tokens, function_name, value=0):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Insert data into Data_Table using preloaded session state value
cursor.execute('''
INSERT INTO Data_Table (date, user_id, profile_id, chatbot_ans, user_prompt, function_name, tokens, response_rating)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
''', (date, st.session_state.data_profile["user_id"], st.session_state.data_profile["profile_id"], chatbot_ans, user_prompt, function_name, tokens, value))
conn.commit()
conn.close()
#clear messages and memory
def clear_session_states():
st.session_state.msg = []
if "memory" not in st.session_state:
pass
else:
del st.session_state["memory"]
#below ------------------------------ QA base bot , K=2 memory for short term memory---------------------------------------------
#using the query from lanceDB and vector store , combine with memory
def memory_buffer_qa_component(prompt):
#st.write(type(st.session_state.vs))
if st.session_state.vs:
docs = st.session_state.vs.similarity_search(prompt)
resource = docs[0].page_content
source = docs[0].metadata
if "memory" not in st.session_state:
st.session_state.memory = ConversationBufferWindowMemory(k=st.session_state.k_memory)
mem = st.session_state.memory.load_memory_variables({})
#st.write(resource)
prompt_template = st.session_state.chatbot + f"""
Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
Search Result:
{resource}
{source}
History of conversation:
{mem}
You must quote the source of the Search Result if you are using the search result as part of the answer"""
return prompt_template
#chat completion memory for streamlit using memory buffer
def chat_completion_qa_memory(prompt):
openai.api_key = return_api_key()
os.environ["OPENAI_API_KEY"] = return_api_key()
prompt_template = memory_buffer_qa_component(prompt)
response = openai.ChatCompletion.create(
model=st.session_state.openai_model,
messages=[
{"role": "system", "content":prompt_template },
{"role": "user", "content": prompt},
],
temperature=st.session_state.temp, #settings option
presence_penalty=st.session_state.presence_penalty, #settings option
frequency_penalty=st.session_state.frequency_penalty, #settings option
stream=True #settings option
)
return response
#integration API call into streamlit chat components with memory and qa
def basebot_qa_memory(bot_name):
greetings_str = f"Hi, I am {bot_name}"
help_str = "How can I help you today?"
# Check if st.session_state.msg exists, and if not, initialize with greeting and help messages
if 'msg' not in st.session_state:
st.session_state.msg = [
{"role": "assistant", "content": greetings_str},
{"role": "assistant", "content": help_str}
]
elif st.session_state.msg == []:
st.session_state.msg = [
{"role": "assistant", "content": greetings_str},
{"role": "assistant", "content": help_str}
]
for message in st.session_state.msg:
with st.chat_message(message["role"]):
st.markdown(message["content"])
try:
if prompt := st.chat_input("Enter your query"):
st.session_state.msg.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in chat_completion_qa_memory(prompt):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
#Response Rating
if st.session_state.rating == True:
feedback_value = rating_component()
else:
feedback_value = 0
st.session_state.msg.append({"role": "assistant", "content": full_response})
st.session_state["memory"].save_context({"input": prompt},{"output": full_response})
# Insert data into the table
now = datetime.now() # Using ISO format for date
num_tokens = len(full_response + prompt)*1.3
#st.write(num_tokens)
insert_into_data_table(now.strftime("%d/%m/%Y %H:%M:%S"), full_response, prompt, num_tokens, bot_name, feedback_value)
if st.session_state.visuals == True:
metacognitive_prompter(full_response)
#metacognitive_prompter(full_response)
except Exception as e:
st.exception(e)
#below ------------------------------ base bot , K=2 memory for short term memory---------------------------------------------
#faster and more precise but no summary
def memory_buffer_component():
if "memory" not in st.session_state:
st.session_state.memory = ConversationBufferWindowMemory(k=st.session_state.k_memory)
#st.write("Messages ", messages)
mem = st.session_state.memory.load_memory_variables({})
#For more customisation, this can be in the config.ini file
prompt_template = st.session_state.chatbot + f"""
History of conversation:
{mem}"""
return prompt_template
#chat completion memory for streamlit using memory buffer
def chat_completion_memory(prompt):
openai.api_key = return_api_key()
os.environ["OPENAI_API_KEY"] = return_api_key()
prompt_template = memory_buffer_component()
#st.write("Prompt Template ", prompt_template)
response = openai.ChatCompletion.create(
model=st.session_state.openai_model,
messages=[
{"role": "system", "content":prompt_template },
{"role": "user", "content": prompt},
],
temperature=st.session_state.temp, #settings option
presence_penalty=st.session_state.presence_penalty, #settings option
frequency_penalty=st.session_state.frequency_penalty, #settings option
stream=True #settings option
)
return response
#integration API call into streamlit chat components with memory
def basebot_memory(bot_name):
greetings_str = f"Hi, I am {bot_name}"
help_str = "How can I help you today?"
# Check if st.session_state.msg exists, and if not, initialize with greeting and help messages
if 'msg' not in st.session_state:
st.session_state.msg = [
{"role": "assistant", "content": greetings_str},
{"role": "assistant", "content": help_str}
]
elif st.session_state.msg == []:
st.session_state.msg = [
{"role": "assistant", "content": greetings_str},
{"role": "assistant", "content": help_str}
]
for message in st.session_state.msg:
with st.chat_message(message["role"]):
st.markdown(message["content"])
try:
if prompt := st.chat_input("What is up?"):
st.session_state.msg.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in chat_completion_memory(prompt):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
if st.session_state.rating == True:
feedback_value = rating_component()
else:
feedback_value = 0
st.session_state.msg.append({"role": "assistant", "content": full_response})
st.session_state["memory"].save_context({"input": prompt},{"output": full_response})
# Insert data into the table
now = datetime.now() # Using ISO format for date
num_tokens = len(full_response + prompt)*1.3
#st.write(num_tokens)
insert_into_data_table(now.strftime("%d/%m/%Y %H:%M:%S"), full_response, prompt, num_tokens, bot_name, feedback_value)
if st.session_state.visuals == True:
metacognitive_prompter(full_response)
except Exception as e:
st.error(e)
#below ------------------------------ Suitable for Q & A--------------------------------------------
#below ------------------------------ base bot , no memory ---------------------------------------------
#chat completion for streamlit function
def chat_completion(prompt):
openai.api_key = return_api_key()
os.environ["OPENAI_API_KEY"] = return_api_key()
response = openai.ChatCompletion.create(
model=st.session_state.openai_model,
messages=[
{"role": "system", "content": st.session_state.chatbot},
{"role": "user", "content": prompt},
],
temperature=st.session_state.temp, #settings option
presence_penalty=st.session_state.presence_penalty, #settings option
frequency_penalty=st.session_state.frequency_penalty, #settings option
stream=True #settings option
)
return response
#integration API call into streamlit chat components
def basebot(bot_name):
greetings_str = f"Hi, I am {bot_name}"
help_str = "How can I help you today?"
# Check if st.session_state.msg exists, and if not, initialize with greeting and help messages
if 'msg' not in st.session_state:
st.session_state.msg = [
{"role": "assistant", "content": greetings_str},
{"role": "assistant", "content": help_str}
]
elif st.session_state.msg == []:
st.session_state.msg = [
{"role": "assistant", "content": greetings_str},
{"role": "assistant", "content": help_str}
]
for message in st.session_state.msg:
with st.chat_message(message["role"]):
st.markdown(message["content"])
try:
if prompt := st.chat_input("What is up?"):
st.session_state.msg.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in chat_completion(prompt):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
#Response Rating
if st.session_state.rating == True:
feedback_value = rating_component()
else:
feedback_value = 0
now = datetime.now() # Using ISO format for date
num_tokens = len(full_response + prompt)*1.3
st.session_state.msg.append({"role": "assistant", "content": full_response})
insert_into_data_table(now.strftime("%d/%m/%Y %H:%M:%S"), full_response, prompt, num_tokens, bot_name, feedback_value)
if st.session_state.visuals == True:
metacognitive_prompter(full_response)
except Exception as e:
st.error(e)
#below ------------------------------ base bot , with vectorstore ---------------------------------------------
def qa_component(prompt):
#st.write(type(st.session_state.vs))
if st.session_state.vs:
docs = st.session_state.vs.similarity_search(prompt)
resource = docs[0].page_content
source = docs[0].metadata
#st.write(resource)
prompt_template = st.session_state.chatbot + f"""
Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
Search Result:
{resource}
{source}
You must quote the source of the Search Result if you are using the search result as part of the answer"""
return prompt_template
#chat completion with vectorstore for streamlit
def chat_completion_qa(prompt):
openai.api_key = return_api_key()
os.environ["OPENAI_API_KEY"] = return_api_key()
#show the qa component results in the prompt
prompt_template = qa_component(prompt)
response = openai.ChatCompletion.create(
model=st.session_state.openai_model,
messages=[
{"role": "system", "content":prompt_template },
{"role": "user", "content": prompt},
],
temperature=st.session_state.temp, #settings option
presence_penalty=st.session_state.presence_penalty, #settings option
frequency_penalty=st.session_state.frequency_penalty, #settings option
stream=True #settings option
)
return response
#chat completion with vectorstore for streamlit
def basebot_qa(bot_name):
greetings_str = f"Hi, I am {bot_name}"
help_str = "How can I help you today?"
# Check if st.session_state.msg exists, and if not, initialize with greeting and help messages
if 'msg' not in st.session_state:
st.session_state.msg = [
{"role": "assistant", "content": greetings_str},
{"role": "assistant", "content": help_str}
]
elif st.session_state.msg == []:
st.session_state.msg = [
{"role": "assistant", "content": greetings_str},
{"role": "assistant", "content": help_str}
]
for message in st.session_state.msg:
with st.chat_message(message["role"]):
st.markdown(message["content"])
try:
if prompt := st.chat_input("What is up?"):
st.session_state.msg.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in chat_completion_qa(prompt):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
#Response Rating
if st.session_state.rating == True:
feedback_value = rating_component()
else:
feedback_value = 0
st.session_state.msg.append({"role": "assistant", "content": full_response})
# Insert data into the table
now = datetime.now() # Using ISO format for date
num_tokens = len(full_response + prompt)*1.3
#st.write(num_tokens)
insert_into_data_table(now.strftime("%d/%m/%Y %H:%M:%S"), full_response, prompt, num_tokens, bot_name, feedback_value)
if st.session_state.visuals == True:
metacognitive_prompter(full_response)
#metacognitive_prompter(full_response)
except Exception as e:
st.exception(e)
#----------------------------------return search results--------------------------------------------
def return_search_raw_results(prompt):
if st.session_state.vs:
docs = st.session_state.vs.similarity_search(prompt)
ans = docs[0].page_content
source = docs[0].metadata.get('source', None)
return f"""{ans} \n\n Source: ({source})"""
def search_bot():
for message in st.session_state.msg:
with st.chat_message(message["role"]):
st.markdown(message["content"])
try:
if prompt := st.chat_input("Enter your search query"):
st.session_state.msg.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
full_response = return_search_raw_results(prompt)
message_placeholder.markdown(full_response)
if st.session_state.rating == True:
feedback_value = rating_component()
else:
feedback_value = 0
#message_placeholder.markdown(source)
st.session_state.msg.append({"role": "assistant", "content": full_response})
# Insert data into the table
now = datetime.now() # Using ISO format for date
num_tokens = len(full_response + prompt)*1.3
#st.write(num_tokens)
insert_into_data_table(now.strftime("%d/%m/%Y %H:%M:%S"), full_response, prompt, num_tokens, feedback_value)
if st.session_state.visuals == True:
metacognitive_prompter(full_response)
except Exception as e:
st.error(e)
#below ------------------------------ base bot , summary memory for long conversation---------------------------------------------
#summary of conversation , requires another LLM call for every input, useful for feedback and summarising what was spoken
def memory_summary_component(prompt): #currently not in use
if "memory" not in st.session_state:
llm = ChatOpenAI(model_name=st.session_state.openai_model,temperature=st.session_state.temp)
st.session_state.memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=1000)
messages = st.session_state["memory"].chat_memory.messages
#st.write("Messages ", messages)
previous_summary = ""
mem = st.session_state["memory"].predict_new_summary(messages, previous_summary)
prompt_template = st.session_state.chatbot + f"""
Summary of current conversation:
{mem}"""
return prompt_template
| [
"constants",
" \n\t\t\t\t\t\tHistory of conversation:\n\t\t\t\t\t\tPLACEHOLDER",
"\n\t\t\t\t\t\tUse the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. \n\t\t\t\t\t\tSearch Result:\n\t\t\t\t\t\tPLACEHOLDER\n\t\t\t\t\t\tPLACEHOLDER\n\t\t\t\t\t\tHistory of conversation:\n\t\t\t\t\t\tPLACEHOLDER\n\t\t\t\t\t\tYou must quote the source of the Search Result if you are using the search result as part of the answer",
"PERSONAL_PROMPT",
"\n\t\t\t\t\t\tSummary of current conversation:\n\t\t\t\t\t\tPLACEHOLDER",
"\n\t\t\t\t\t\tUse the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. \n\t\t\t\t\t\tSearch Result:\n\t\t\t\t\t\tPLACEHOLDER\n\t\t\t\t\t\tPLACEHOLDER\n\t\t\t\t\t\tYou must quote the source of the Search Result if you are using the search result as part of the answer"
] |
2024-01-10 | twahidin/workshop_final_bot | k_map.py | import streamlit as st
import openai
from plantuml import PlantUML
from authenticate import return_api_key
from streamlit.components.v1 import html
import os
import re
# Create or check for the 'database' directory in the current working directory
cwd = os.getcwd()
WORKING_DIRECTORY = os.path.join(cwd, "database")
if not os.path.exists(WORKING_DIRECTORY):
os.makedirs(WORKING_DIRECTORY)
if st.secrets["sql_ext_path"] == "None":
WORKING_DATABASE= os.path.join(WORKING_DIRECTORY , st.secrets["default_db"])
else:
WORKING_DATABASE= st.secrets["sql_ext_path"]
if "svg_height" not in st.session_state:
st.session_state["svg_height"] = 1000
if "previous_mermaid" not in st.session_state:
st.session_state["previous_mermaid"] = ""
def mermaid(code: str) -> None:
html(
f"""
<pre class="mermaid">
{code}
</pre>
<script type="module">
import mermaid from 'https://cdn.jsdelivr.net/npm/mermaid@10/dist/mermaid.esm.min.mjs';
mermaid.initialize({{ startOnLoad: true }});
</script>
""",
height=st.session_state["svg_height"] + 50,
)
def map_creation_form():
"""
Creates a Streamlit form to collect user input for the knowledge map.
Returns:
tuple: subject, topic, levels
"""
subject = st.text_input("Enter a subject:")
topic = st.text_input("Enter a topic to create a knowledge map:")
levels = st.slider("Enter the number of map levels:", 1, 5, 2)
if st.button('Step 1. Generate knowledge map syntax'):
if not topic:
st.error('Please input a topic')
else:
return subject, topic, levels
return None, None, None
def map_prompter(subject, topic, levels):
"""
Generates a prompt based on the provided subject, topic, and levels.
Args:
subject (str): Subject input by user.
topic (str): Topic input by user.
levels (int): Levels input by user.
Returns:
str: Generated prompt
"""
prompt = f"""Let's start by creating a diagram using the mermaid js syntax on the subject of {subject} on the topic of {topic}.
You must give a mindmap, class diagram or flowchart diagram in mermaid js syntax. Keep it structured from the core central topic branching out to other domains and sub-domains.
Let's go to {levels} levels to begin with.
Expand the branch based on the complexity of each topic in terms of the time it takes to learn that topic for a beginner.You must output between these brackets with * and & as shown here for example: *(& MERMAID SYNTAY &)*"""
return prompt
def extract_mermaid_syntax(text):
#st.text(text)
pattern = r"```\s*mermaid\s*([\s\S]*?)\s*```"
match = re.search(pattern, text)
if match:
return match.group(1).strip()
else:
pattern = r"\*\(&\s*([\s\S]*?)\s*&\)\*"
match = re.search(pattern, text)
if match:
return match.group(1).strip()
else:
return "Mermaid syntax not found in the provided text."
def map_prompter_with_mermaid_syntax(bot_response):
"""
Generates a prompt based on a response from a chatbot for Mermaid diagram.
Args:
bot_response (str): Response from a chatbot over a topic.
Returns:
str: Generated prompt
"""
prompt = f"""Given the insights from our chatbot: '{bot_response}',
let's create a visual representation. Generate a diagram using the Mermaid JS syntax.
This can be a mindmap, class diagram, or flowchart.
Structure it from the central topic, branching out to other domains and sub-domains.
Expand the branch based on the complexity of each topic in terms of the time it takes to learn that topic for a beginner.
You must output the mermaid syntax between these special brackets with * and &: *(& MERMAID SYNTAX &)*"""
return prompt
def generate_mindmap(prompt):
try:
openai.api_key = return_api_key()
os.environ["OPENAI_API_KEY"] = return_api_key()
# Generate response using OpenAI API
response = openai.ChatCompletion.create(
model=st.session_state.openai_model,
messages=[{"role": "user", "content": prompt}],
temperature=st.session_state.temp, #settings option
presence_penalty=st.session_state.presence_penalty, #settings option
frequency_penalty=st.session_state.frequency_penalty #settings option
)
if response['choices'][0]['message']['content'] != None:
msg = response['choices'][0]['message']['content']
st.text(msg)
extracted_code = extract_mermaid_syntax(msg)
st.write(extracted_code)
return extracted_code
except openai.APIError as e:
st.error(e)
st.error("Please type in a new topic or change the words of your topic again")
return False
except Exception as e:
st.error(e)
st.error("Please type in a new topic or change the words of your topic again")
return False
def output_mermaid_diagram(mermaid_code):
"""
Outputs the mermaid diagram in a Streamlit app.
Args:
mermaid_code (str): Mermaid code to be rendered.
"""
if mermaid_code:
mermaid(mermaid_code)
else:
st.error("Please type in a new topic or change the words of your topic again")
return False
def map_prompter_with_plantuml_form(subject, topic, levels):
"""
Generates a prompt based on a response from a chatbot for plantuml.
"""
prompt = prompt = f"""Let's start by creating a simple MindMap on the subject of {subject} with topic of {topic}.
Can you give the mindmap in PlantUML format. Keep it structured from the core central topic branching out to other domains and sub-domains.
Let's go to {levels} levels to begin with. Add the start and end mindmap tags and keep it expanding on one side for now.
Also, please add color codes to each node based on the complexity of each topic in terms of the time it takes to learn that topic for a beginner. Use the format *[#colour] topic.
"""
return prompt
def map_prompter_with_plantuml(response):
"""
Generates a prompt based on a response from a chatbot for plantuml.
"""
prompt = prompt = f"""Let's start by creating a simple MindMap on the chatbot response which is {response}.
Can you give the mindmap in PlantUML format. Keep it structured from the core central topic branching out to other domains and sub-domains.
Let's go to 3 levels to begin with and up to 6 at most. Add the start and end mindmap tags and keep it expanding on one side for now.
Also, please add color codes to each node based on the complexity of each topic in terms of the time it takes to learn that topic for a beginner. Use the format *[#colour] topic.
"""
return prompt
def generate_plantuml_mindmap(prompt):
try:
openai.api_key = return_api_key()
os.environ["OPENAI_API_KEY"] = return_api_key()
# Generate response using OpenAI API
response = openai.ChatCompletion.create(
model=st.session_state.openai_model,
messages=[{"role": "user", "content": prompt}],
temperature=st.session_state.temp, #settings option
presence_penalty=st.session_state.presence_penalty, #settings option
frequency_penalty=st.session_state.frequency_penalty #settings option
)
if response['choices'][0]['message']['content'] != None:
msg = response['choices'][0]['message']['content']
p_syntax = re.search(r'@startmindmap.*?@endmindmap', msg, re.DOTALL).group()
modified_syntax = re.sub(r'(\*+) \[', r'\1[', p_syntax)
return modified_syntax
except openai.APIError as e:
st.error(e)
st.error("Please type in a new topic or change the words of your topic again")
return False
except Exception as e:
st.error(e)
st.error("Please type in a new topic or change the words of your topic again")
return False
# Define a function to render the PlantUML diagram
def render_diagram(uml):
p = PlantUML("http://www.plantuml.com/plantuml/img/")
image = p.processes(uml)
return image
| [
"Given the insights from our chatbot: 'PLACEHOLDER', \n let's create a visual representation. Generate a diagram using the Mermaid JS syntax. \n This can be a mindmap, class diagram, or flowchart. \n Structure it from the central topic, branching out to other domains and sub-domains.\n Expand the branch based on the complexity of each topic in terms of the time it takes to learn that topic for a beginner.\n You must output the mermaid syntax between these special brackets with * and &: *(& MERMAID SYNTAX &)*",
"Let's start by creating a diagram using the mermaid js syntax on the subject of PLACEHOLDER on the topic of PLACEHOLDER.\n You must give a mindmap, class diagram or flowchart diagram in mermaid js syntax. Keep it structured from the core central topic branching out to other domains and sub-domains.\n Let's go to PLACEHOLDER levels to begin with. \n Expand the branch based on the complexity of each topic in terms of the time it takes to learn that topic for a beginner.You must output between these brackets with * and & as shown here for example: *(& MERMAID SYNTAY &)*",
"Let's start by creating a simple MindMap on the chatbot response which is PLACEHOLDER. \n Can you give the mindmap in PlantUML format. Keep it structured from the core central topic branching out to other domains and sub-domains. \n Let's go to 3 levels to begin with and up to 6 at most. Add the start and end mindmap tags and keep it expanding on one side for now. \n Also, please add color codes to each node based on the complexity of each topic in terms of the time it takes to learn that topic for a beginner. Use the format *[#colour] topic. \n ",
"Let's start by creating a simple MindMap on the subject of PLACEHOLDER with topic of PLACEHOLDER. \n Can you give the mindmap in PlantUML format. Keep it structured from the core central topic branching out to other domains and sub-domains. \n Let's go to PLACEHOLDER levels to begin with. Add the start and end mindmap tags and keep it expanding on one side for now. \n Also, please add color codes to each node based on the complexity of each topic in terms of the time it takes to learn that topic for a beginner. Use the format *[#colour] topic. \n "
] |
2024-01-10 | Hackerismydream/GLOBAL-CS-CVHelper | application~interview.py | """
Module for generating interview questions from a PDF file.
"""
import re
import PyPDF2
from application.prompts import QUESTION_PROMPT,GLOBAL_PROMPT
from application.utils import OpenAIConfig, query_ai
from typing import Union, IO
class InterviewQuestionMaker:
"""
Class to create interview questions based on a PDF resume.
"""
def __init__(self, config: OpenAIConfig = OpenAIConfig(), prompt: str = QUESTION_PROMPT):
"""Initialize the InterviewQuestionMaker with the specified configuration."""
self.config = config
self.prompt = prompt
self.global_prompt = GLOBAL_PROMPT
def create_questions(self, pdf_stream: Union[str, IO]) -> str:
"""
Create interview questions for the given PDF resume file.
Args:
pdf_stream (IO): The PDF file as a stream.
"""
pdf_str = self.pdf_to_str(pdf_stream)
prompt = self.complete_prompt(pdf_str)
return query_ai(self.config, prompt)
def make_global(self, pdf_stream: Union[str, IO]) -> str:
pdf_str = self.pdf_to_str(pdf_stream)
prompt = self.complete_global_prompt(pdf_str)
print(prompt)
return query_ai(self.config, prompt)
def complete_prompt(self, pdf_str: str) -> str:
"""
Complete the prompt with the given PDF string.
Args:
pdf_str (str): PDF content as a string.
"""
return self.prompt.format(resume=pdf_str)
def complete_global_prompt(self, pdf_str: str) -> str:
"""
Complete the prompt with the given PDF string.
Args:
pdf_str (str): PDF content as a string.
"""
return self.global_prompt.format(resume=pdf_str)
def pdf_to_str(self, pdf_stream: Union[str, IO]) -> str:
"""
Convert the given PDF file to a string.
Args:
pdf_stream (IO): The PDF file as a stream.
"""
pdf = PyPDF2.PdfReader(pdf_stream)
pages = [self.format_pdf(p.extract_text()) for p in pdf.pages]
return "\n\n".join(pages)
def format_pdf(self, pdf_str: str) -> str:
"""
Format the given PDF string by applying pattern replacements.
Args:
pdf_str (str): PDF content as a string.
"""
pattern_replacements = {
r"\s[,.]": ",",
r"[\n]+": "\n",
r"[\s]+": " ",
r"http[s]?(://)?": "",
}
for pattern, replacement in pattern_replacements.items():
pdf_str = re.sub(pattern, replacement, pdf_str)
return pdf_str
| [] |
2024-01-10 | xorsuyash/LUMOS | src~backend%20~dataset.py | import os
import sys
import tqdm
from langchain.text_splitter import CharacterTextSplitter
#function which takes in the path and then it run parse the files present in the folder
# Subject - Units and then topics
# if its a book then whole book in general
# web parser
# pdf parser
# book parser
# parse karne ke baad json format me store kar denge
#text data for gfg
class VocabuLary:
def __init__():
pass
def ch_to_index():
pass
def inedex_to_character():
pass
class TextDataset:
def __init__(self,parent_folder):
self.parent_folder=parent_folder
def path_loader(self):
return self._find_files(self.parent_folder)
def _find_files(self,root_dir):
file_list=[]
for root, _, files in os.walk(root_dir):
for file in files:
file_list.append(os.path.join(root, file))
return file_list
def text_splitter(self):
file_list=self.path_loader()
text_splitter=CharacterTextSplitter(
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
text='<SOS>'
for file in file_list:
with open(file,'r') as f:
raw_text=f.read()
text=text+' '+raw_text
chunks = text_splitter.split_text(text)
return chunks
def text_loader(self):
return self.text_splitter()
def vocabulary(self):
pass
| [] |
2024-01-10 | xorsuyash/LUMOS | src~backend%20~vectorize.py | import langchain
from langchain.vectorstores import Qdrant
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
import dotenv
import os
import qdrant_client
import sys
from dataset import TextDataset
import openai
#loading enviroment variables for quadrant
dotenv.load_dotenv()
quadrant_host=os.getenv('QUADRANT_HOST')
quadrant_api_key=os.getenv('QUADRANT_API_KEY')
#seting up quadrant client
client = qdrant_client.QdrantClient(
quadrant_host,
api_key=quadrant_api_key
)
#collection name
#os.environ["QDRANT_COLLECTION_NAME"] = "DSA-collection"
#creating_collection
vector_config=qdrant_client.http.models.VectorParams(
size=1536,
distance=qdrant_client.http.models.Distance.COSINE
)
client.recreate_collection(
collection_name="DSA-collection",
vectors_config=vector_config,
)
#creating a vector store
openai_api_key=os.getenv('OPENAI_API_KEY')
embeddings=OpenAIEmbeddings()
vectorstore=Qdrant(
client=client,
collection_name="DSA-collection",
embeddings=embeddings
)
dataset=TextDataset('/home/xorsuyash/Desktop/LUMOS/GFG')
text_chunks=dataset.text_loader()
print("Chunks created")
vectorstore.add_texts(text_chunks[:100])
| [] |
2024-01-10 | xorsuyash/LUMOS | src~backend%20~app_streamlit.py | from langchain.llms import OpenAI
from langchain.chains import RetrievalQA
import streamlit as st
from PyPDF2 import PdfReader
def get_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def pdf_text(file_path):
with open(file_path,'r') as f:
f=f.read()
st.header("LUMOS :books:")
user_question = st.text_input("Ask a question about your subject:")
with st.sidebar:
st.subheader("Your Documents")
pdf_docs = st.file_uploader(
"Upload your PDFs here and click on 'Process'", accept_multiple_files=True)
if st.button("Process"):
with st.spinner("Processing"):
raw_text = get_pdf_text(pdf_docs) | [] |
2024-01-10 | xta0/CodeBase | openai~hello.py | import os
from openai import OpenAI
# token
import tiktoken
encoding = tiktoken.encoding_for_model("gpt-4")
chinese = "宛如宿命一般,如今他再次被卷入“命运转盘”的争夺,而纳粹的残余势力也卷土重来,觊觎着这件宝物"
tokens = encoding.encode(chinese)
print(tokens)
print(len(tokens))
api_key = os.environ.get("OPEN_AI_API_KEY")
print(api_key)
client = OpenAI(
api_key = api_key
)
def test():
response = chat_completion = client.chat.completions.create(
messages=[
{"role": "system", "content": "你是一个AI助理"},
{"role": "user", "content": "你好!你叫什么名字?"}
],
temperature = 0.9, # (0~2), 越小越稳定
max_tokens = 200,
model="gpt-4",
# n = 3, # 回复个数
)
for choice in response.choices:
print(choice.message.content)
# test()
| [
"你好!你叫什么名字?",
"你是一个AI助理"
] |
2024-01-10 | PrefectHQ/langchain-prefect | examples~openai~qa_with_sources.py | """Example observing LLM calls made by querying `VectorstoreIndexWrapper`."""
from langchain import document_loaders
from langchain.indexes import VectorstoreIndexCreator
from langchain_prefect.plugins import RecordLLMCalls
loader = document_loaders.TextLoader("context/state_of_the_union.txt")
index = VectorstoreIndexCreator().from_loaders([loader])
query = "What did the president say about Ketanji Brown Jackson?"
with RecordLLMCalls(tags={index.vectorstore.__class__.__name__}):
# defaults to OpenAI llm if not specified
index.query_with_sources(query)
| [] |
2024-01-10 | PrefectHQ/langchain-prefect | tests~test_plugins.py | from langchain.llms import OpenAI
from langchain_prefect.utilities import NotAnArtifact, llm_invocation_summary
class TestParseInvocationSummary:
def test_parse_callable_llm(self):
"""Test that LLM invocation summary is parsed correctly using a callable LLM."""
llm_input = "What would be a good name for a company that makes colorful socks?"
artifact = llm_invocation_summary(
OpenAI(), llm_input, invocation_fn=lambda x: None
)
assert isinstance(artifact, NotAnArtifact)
assert artifact.content["llm_endpoint"] == "langchain.llms.openai"
assert artifact.content["prompts"] == llm_input
| [] |
2024-01-10 | PrefectHQ/langchain-prefect | examples~openai~llm_agenerate.py | """Example observing LLM calls made by `OpenAI.agenerate`."""
import asyncio
from langchain.llms import OpenAI
from langchain_prefect.plugins import RecordLLMCalls
llm = OpenAI(temperature=0.9)
async def record_call_using_LLM_agenerate():
"""async func"""
await llm.agenerate(
[
"What would be a good name for a company that makes colorful socks?",
"What would be a good name for a company that sells carbonated water?",
]
)
with RecordLLMCalls():
asyncio.run(record_call_using_LLM_agenerate())
| [] |
2024-01-10 | PrefectHQ/langchain-prefect | examples~openai~github_issue_finder.py | """Ingesting GitHub issues and finding similar ones using OpenAI."""
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms.openai import OpenAI
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain_prefect.loaders import GithubIssueLoader
from langchain_prefect.plugins import RecordLLMCalls
loader = GithubIssueLoader("prefecthq/prefect", n_issues=20)
chain = load_qa_with_sources_chain(OpenAI(temperature=0))
splitter = CharacterTextSplitter(separator=" ", chunk_size=1024, chunk_overlap=0)
source_chunks = [
Document(
page_content=chunk,
metadata=source.metadata,
)
for source in loader.load()
for chunk in splitter.split_text(source.page_content)
]
index = Chroma.from_documents(
documents=source_chunks,
embedding=OpenAIEmbeddings(),
)
def answer(question: str, k: int = 5):
"""Answer a question using the index."""
return chain(
{
"input_documents": index.similarity_search(question, k=k),
"question": question,
},
return_only_outputs=True,
)["output_text"]
with RecordLLMCalls(tags={index.__class__.__name__}):
print(answer("Are there open issues related to large mapped tasks?"))
| [] |
2024-01-10 | PrefectHQ/langchain-prefect | tests~test_utilities.py | import pytest
from prefect import Flow
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from langchain_prefect import utilities as utils
@pytest.mark.parametrize(
"text, expected_num_tokens",
[
("", 0),
(" ", 1),
("Hello, world!", 4),
("Foo bar baz", 3),
("Foo bar baz".split(), 3),
],
)
def test_num_tokens(text, expected_num_tokens):
"""Test that num_tokens returns the correct number of tokens."""
assert utils.num_tokens(text) == expected_num_tokens
def test_flow_wrapped_fn():
"""Test that flow_wrapped_fn returns a flow."""
def fn():
pass
async def async_fn():
pass
wrapped_sync_fn = utils.flow_wrapped_fn(fn)
wrapped_async_fn = utils.flow_wrapped_fn(async_fn)
assert isinstance(wrapped_sync_fn, Flow)
assert isinstance(wrapped_async_fn, Flow)
@pytest.mark.parametrize(
"text, max_length, expected_truncated_text",
[
("123", 4, "123"),
("123", 3, "123"),
("ICE ICE BABY", 10, "ICE...ABY"),
],
)
def test_truncate(text, max_length, expected_truncated_text):
"""Test that truncate returns the correct truncated text."""
assert utils.truncate(text, max_length) == expected_truncated_text
@pytest.mark.parametrize(
"prompts, expected_prompt_content",
[
(
[
"You should speak like a pirate.",
"I don't care about frogs.",
"What did I just say?",
],
[
"You should speak like a pirate.",
"I don't care about frogs.",
"What did I just say?",
],
),
(
[
SystemMessage(content="You should speak like a pirate."),
HumanMessage(content="I don't care about frogs."),
HumanMessage(content="What did I just say?"),
],
[
"You should speak like a pirate.",
"I don't care about frogs.",
"What did I just say?",
],
),
(
[
[
SystemMessage(content="You should speak like a pirate."),
HumanMessage(content="I don't care about frogs."),
HumanMessage(content="What did I just say?"),
]
],
[
"You should speak like a pirate.",
"I don't care about frogs.",
"What did I just say?",
],
),
],
)
def test_get_prompt_content(prompts, expected_prompt_content):
"""Test that get_prompt_content returns the correct content."""
assert utils.get_prompt_content(prompts) == expected_prompt_content
| [
"What did I just say?",
"You should speak like a pirate.",
"I don't care about frogs."
] |
2024-01-10 | PrefectHQ/langchain-prefect | langchain_prefect~loaders.py | """Loaders for Prefect."""
import asyncio
import httpx
import os
import shutil
import tempfile
from pathlib import Path
from typing import List
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
from langchain_prefect.types import GitHubComment, GitHubIssue
from prefect.utilities.asyncutils import sync_compatible
class GithubIssueLoader(BaseLoader):
"""Loader for GitHub issues for a given repository."""
def __init__(self, repo: str, n_issues: int):
"""
Initialize the loader with the given repository.
Args:
repo: The name of the repository, in the format "<owner>/<repo>"
"""
self.repo = repo
self.n_issues = n_issues
self.request_headers = {
"Accept": "application/vnd.github.v3+json",
}
# If a GitHub token is available, use it to increase the rate limit
if token := os.environ.get("GITHUB_TOKEN"):
self.request_headers["Authorization"] = f"Bearer {token}"
def _get_issue_comments(
self, issue_number: int, per_page: int = 100
) -> List[GitHubComment]:
"""
Get a list of all comments for the given issue.
Returns:
A list of dictionaries, each representing a comment.
"""
url = f"https://api.github.com/repos/{self.repo}/issues/{issue_number}/comments"
comments = []
page = 1
while True:
response = httpx.get(
url=url,
headers=self.request_headers,
params={"per_page": per_page, "page": page},
)
response.raise_for_status()
if not (new_comments := response.json()):
break
comments.extend([GitHubComment(**comment) for comment in new_comments])
page += 1
return comments
def _get_issues(self, per_page: int = 100) -> List[GitHubIssue]:
"""
Get a list of all issues for the given repository.
Returns:
A list of `GitHubIssue` objects, each representing an issue.
"""
url = f"https://api.github.com/repos/{self.repo}/issues"
issues = []
page = 1
while True:
if len(issues) >= self.n_issues:
break
remaining = self.n_issues - len(issues)
response = httpx.get(
url=url,
headers=self.request_headers,
params={
"per_page": remaining if remaining < per_page else per_page,
"page": page,
"include": "comments",
},
)
response.raise_for_status()
if not (new_issues := response.json()):
break
issues.extend([GitHubIssue(**issue) for issue in new_issues])
page += 1
return issues
def load(self) -> List[Document]:
"""
Load all issues for the given repository.
Returns:
A list of `Document` objects, each representing an issue.
"""
issues = self._get_issues()
documents = []
for issue in issues:
text = f"{issue.title}\n{issue.body}"
if issue.comments:
for comment in self._get_issue_comments(issue.number):
text += f"\n\n{comment.user.login}: {comment.body}\n\n"
metadata = {
"source": issue.html_url,
"title": issue.title,
"labels": ",".join([label.name for label in issue.labels]),
}
documents.append(Document(page_content=text, metadata=metadata))
return documents
class GitHubRepoLoader(BaseLoader):
"""Loader for files on GitHub that match a glob pattern."""
def __init__(self, repo: str, glob: str):
"""Initialize with the GitHub repository and glob pattern.
Attrs:
repo: The organization and repository name, e.g. "prefecthq/prefect"
glob: The glob pattern to match files, e.g. "**/*.md"
"""
self.repo = f"https://github.com/{repo}.git"
self.glob = glob
@sync_compatible
async def load(self) -> List[Document]:
"""Load files from GitHub that match the glob pattern."""
tmp_dir = tempfile.mkdtemp()
try:
process = await asyncio.create_subprocess_exec(
*["git", "clone", "--depth", "1", self.repo, tmp_dir]
)
if (await process.wait()) != 0:
raise OSError(
f"Failed to clone repository:\n {process.stderr.decode()}"
)
# Read the contents of each file that matches the glob pattern
documents = []
for file in Path(tmp_dir).glob(self.glob):
with open(file, "r") as f:
text = f.read()
metadata = {
"source": os.path.join(self.repo, file.relative_to(tmp_dir))
}
documents.append(Document(page_content=text, metadata=metadata))
return documents
finally:
shutil.rmtree(tmp_dir)
| [] |
2024-01-10 | PrefectHQ/langchain-prefect | langchain_prefect~utilities.py | """Utilities for the langchain_prefect package."""
from typing import Any, Callable, List
import tiktoken
from langchain.schema import BaseMessage, LLMResult
from prefect import Flow, flow
from prefect.utilities.asyncutils import is_async_fn
from prefect.utilities.collections import listrepr
from pydantic import BaseModel
def get_prompt_content(prompts: Any) -> List[str]:
"""Return the content of the prompts."""
if isinstance(prompts[0], str):
return prompts
elif isinstance(prompts[0], BaseMessage):
return [p.content for p in prompts]
else:
return [p.content for msg_list in prompts for p in msg_list]
def num_tokens(text: str | List[str], encoding_name: str = "cl100k_base") -> int:
"""Returns the number of tokens in a text string."""
if isinstance(text, list):
text = "".join(text)
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(text))
return num_tokens
def truncate(text: str, max_length: int = 300) -> str:
"""Truncate text to max_length."""
if len(text) > 3 and len(text) >= max_length:
i = (max_length - 3) // 2
return f"{text[:i]}...{text[-i:]}"
return text
class NotAnArtifact(BaseModel):
"""Placeholder class for soon-to-come `Artifact`."""
name: str
description: str
content: Any
def llm_invocation_summary(*args, **kwargs) -> NotAnArtifact:
"""Will eventually return an artifact."""
subcls, prompts, *rest = args
invocation_fn = kwargs["invocation_fn"]
llm_endpoint = subcls.__module__
prompt_content = get_prompt_content(prompts)
summary = (
f"Sending {listrepr([truncate(p) for p in prompt_content])} "
f"to {llm_endpoint} via {invocation_fn!r}"
)
return NotAnArtifact(
name="LLM Invocation Summary",
description=f"Query {llm_endpoint} via {invocation_fn.__name__}",
content={
"llm_endpoint": llm_endpoint,
"prompts": prompts,
"summary": summary,
"args": rest,
**kwargs,
},
)
def parse_llm_result(llm_result: LLMResult) -> NotAnArtifact:
"""Will eventually return an artifact."""
return NotAnArtifact(
name="LLM Result",
description="The result of the LLM invocation.",
content=llm_result,
)
def flow_wrapped_fn(
func: Callable[..., LLMResult],
flow_kwargs: dict | None = None,
*args,
**kwargs,
) -> Flow:
"""Define a function to be wrapped in a flow depending
on whether the original function is sync or async."""
flow_kwargs = flow_kwargs or dict(name="Execute LLM Call", log_prints=True)
if is_async_fn(func):
async def execute_async_llm_call(llm_input: NotAnArtifact) -> LLMResult:
"""async flow for async LLM calls via `SubclassofBaseLLM.agenerate`"""
print(llm_input.content["summary"])
llm_result = await func(*args, **kwargs)
print(f"Recieved: {parse_llm_result(llm_result)!r}")
return llm_result
return flow(**flow_kwargs)(execute_async_llm_call)
else:
def execute_llm_call(llm_input: NotAnArtifact) -> LLMResult:
"""sync flow for sync LLM calls via `SubclassofBaseLLM.generate`"""
print(llm_input.content["summary"])
llm_result = func(*args, **kwargs)
print(f"Recieved: {parse_llm_result(llm_result)!r}")
return llm_result
return flow(**flow_kwargs)(execute_llm_call)
| [] |
2024-01-10 | PrefectHQ/langchain-prefect | examples~openai~conversation_memory.py | """Example observing LLM calls made by `ConversationChain`."""
from langchain.chains import ConversationChain
from langchain.llms import OpenAI
from langchain.memory import ConversationBufferMemory
from langchain_prefect.plugins import RecordLLMCalls
conversation = ConversationChain(llm=OpenAI(), memory=ConversationBufferMemory())
with RecordLLMCalls():
conversation.predict(input="What is the meaning of life?")
conversation.predict(input="What did I just ask?")
| [] |
2024-01-10 | PrefectHQ/langchain-prefect | langchain_prefect~plugins.py | """Module for defining Prefect plugins for langchain."""
from contextlib import ContextDecorator
from functools import wraps
from typing import Callable
from langchain.schema import LLMResult
from langchain.base_language import BaseLanguageModel
from prefect import Flow
from prefect import tags as prefect_tags
from langchain_prefect.utilities import (
flow_wrapped_fn,
get_prompt_content,
llm_invocation_summary,
num_tokens,
)
def record_llm_call(
func: Callable[..., LLMResult],
tags: set | None = None,
max_prompt_tokens: int | None = int(1e4),
flow_kwargs: dict | None = None,
) -> Callable[..., Flow]:
"""Decorator for wrapping a Langchain LLM call with a prefect flow."""
tags = tags or set()
@wraps(func)
def wrapper(*args, **kwargs):
"""wrapper for LLM calls"""
invocation_artifact = llm_invocation_summary(
invocation_fn=func, *args, **kwargs
)
llm_endpoint = invocation_artifact.content["llm_endpoint"]
prompts = invocation_artifact.content["prompts"]
if max_prompt_tokens and (
(N := num_tokens(get_prompt_content(prompts))) > max_prompt_tokens
):
raise ValueError(
f"Prompt is too long: it contains {N} tokens"
f" and {max_prompt_tokens=}. Did not call {llm_endpoint!r}. "
"If desired, increase `max_prompt_tokens`."
)
llm_generate = flow_wrapped_fn(func, flow_kwargs, *args, **kwargs)
with prefect_tags(*[llm_endpoint, *tags]):
return llm_generate.with_options(
flow_run_name=f"Calling {llm_endpoint}" # noqa: E501
)(llm_input=invocation_artifact)
return wrapper
class RecordLLMCalls(ContextDecorator):
"""Context decorator for patching LLM calls with a prefect flow."""
def __init__(self, **decorator_kwargs):
"""Context decorator for patching LLM calls with a prefect flow.
Args:
tags: Tags to apply to flow runs created by this context manager.
flow_kwargs: Keyword arguments to pass to the flow decorator.
max_prompt_tokens: The maximum number of tokens allowed in a prompt.
Example:
Create a flow with `a_custom_tag` upon calling `OpenAI.generate`:
>>> with RecordLLMCalls(tags={"a_custom_tag"}):
>>> llm = OpenAI(temperature=0.9)
>>> llm(
>>> "What would be a good company name "
>>> "for a company that makes carbonated water?"
>>> )
Track many LLM calls when using a langchain agent
>>> llm = OpenAI(temperature=0)
>>> tools = load_tools(["llm-math"], llm=llm)
>>> agent = initialize_agent(tools, llm)
>>> @flow
>>> def my_flow(): # noqa: D103
>>> agent.run(
>>> "How old is the current Dalai Lama? "
>>> "What is his age divided by 2 (rounded to the nearest integer)?"
>>> )
>>> with RecordLLMCalls():
>>> my_flow()
Create an async flow upon calling `OpenAI.agenerate`:
>>> with RecordLLMCalls():
>>> llm = OpenAI(temperature=0.9)
>>> await llm.agenerate(
>>> [
>>> "Good name for a company that makes colorful socks?",
>>> "Good name for a company that sells carbonated water?",
>>> ]
>>> )
Create flow for LLM call and enforce a max number of tokens in the prompt:
>>> with RecordLLMCalls(max_prompt_tokens=100):
>>> llm = OpenAI(temperature=0.9)
>>> llm(
>>> "What would be a good company name "
>>> "for a company that makes carbonated water?"
>>> )
"""
self.decorator_kwargs = decorator_kwargs
def __enter__(self):
"""Called when entering the context manager.
This is what would need to be changed if Langchain started making
LLM api calls in a different place.
"""
self.patched_methods = []
for subcls in BaseLanguageModel.__subclasses__():
if subcls.__name__ == "BaseChatModel":
for subsubcls in subcls.__subclasses__():
# patch `BaseChatModel` generate methods when used as callable
self._patch_method(subsubcls, "_generate", record_llm_call)
self._patch_method(subsubcls, "_agenerate", record_llm_call)
self._patch_method(subcls, "generate", record_llm_call)
self._patch_method(subcls, "agenerate", record_llm_call)
def __exit__(self, exc_type, exc_val, exc_tb):
"""Reset methods when exiting the context manager."""
for cls, method_name, original_method in self.patched_methods:
setattr(cls, method_name, original_method)
def _patch_method(self, cls, method_name, decorator):
"""Patch a method on a class with a decorator."""
original_method = getattr(cls, method_name)
modified_method = decorator(original_method, **self.decorator_kwargs)
setattr(cls, method_name, modified_method)
self.patched_methods.append((cls, method_name, original_method))
| [] |
2024-01-10 | PrefectHQ/langchain-prefect | examples~openai~chroma_docs_ingest.py | """This example shows how to use the ChatGPT API
with LangChain to answer questions about Prefect."""
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import CharacterTextSplitter
from langchain.chains import ChatVectorDBChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain_prefect.loaders import GitHubRepoLoader
from langchain_prefect.plugins import RecordLLMCalls
documents = GitHubRepoLoader("PrefectHQ/prefect", glob="**/*.md").load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
documents = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
system_template = """Use the following pieces of context to answer the users question.
If you don't know the answer, just say that you don't know, don't make up an answer.
----------------
{context}"""
prompt = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}"),
]
)
qa = ChatVectorDBChain.from_llm(
llm=ChatOpenAI(temperature=0),
vectorstore=Chroma.from_documents(documents, embeddings),
qa_prompt=prompt,
)
with RecordLLMCalls(
tags={qa.vectorstore.__class__.__name__}, max_prompt_tokens=int(1e4)
):
chat_history = []
query = "What infrastructures does Prefect support?"
result = qa({"question": query, "chat_history": chat_history})
print(result["answer"])
chat_history = [(query, result["answer"])]
query = "Can I use Prefect with AWS?"
result = qa({"question": query, "chat_history": chat_history})
print(result["answer"])
| [
"Use the following pieces of context to answer the users question. \nIf you don't know the answer, just say that you don't know, don't make up an answer.\n----------------\n{context}",
"{question}"
] |
2024-01-10 | PrefectHQ/langchain-prefect | examples~openai~callable_llm.py | """Example of observing LLM calls made by via callable OpenAI LLM."""
from langchain.llms import OpenAI
from langchain_prefect.plugins import RecordLLMCalls
llm = OpenAI(temperature=0.9)
with RecordLLMCalls():
llm("What would be a good name for a company that makes colorful socks?")
| [] |
2024-01-10 | PrefectHQ/langchain-prefect | examples~hugging_face~hugging_face_hub.py | """Example observing LLM calls made by `HuggingFaceHub` LLM."""
from langchain.llms import HuggingFaceHub
from langchain_prefect.plugins import RecordLLMCalls
hf = HuggingFaceHub(repo_id="gpt2")
with RecordLLMCalls():
hf("How are you today?")
| [] |
2024-01-10 | PrefectHQ/langchain-prefect | docs~gen_examples_catalog.py | """
Locates all the examples in the Collection and puts them in a single page.
"""
import re
from collections import defaultdict
from inspect import getmembers, isclass, isfunction
from pathlib import Path
from pkgutil import iter_modules
from textwrap import dedent
from types import ModuleType
from typing import Callable, Set, Union
import mkdocs_gen_files
from griffe.dataclasses import Docstring
from griffe.docstrings.dataclasses import DocstringSectionKind
from griffe.docstrings.parsers import Parser, parse
from prefect.logging.loggers import disable_logger
from prefect.utilities.importtools import load_module, to_qualified_name
import langchain_prefect
COLLECTION_SLUG = "langchain_prefect"
def skip_parsing(name: str, obj: Union[ModuleType, Callable], module_nesting: str):
"""
Skips parsing the object if it's a private object or if it's not in the
module nesting, preventing imports from other libraries from being added to the
examples catalog.
"""
try:
wrong_module = not to_qualified_name(obj).startswith(module_nesting)
except AttributeError:
wrong_module = False
return obj.__doc__ is None or name.startswith("_") or wrong_module
def skip_block_load_code_example(code_example: str) -> bool:
"""
Skips the code example if it's just showing how to load a Block.
"""
return re.search(r'\.load\("BLOCK_NAME"\)\s*$', code_example.rstrip("`"))
def get_code_examples(obj: Union[ModuleType, Callable]) -> Set[str]:
"""
Gathers all the code examples within an object.
"""
code_examples = set()
with disable_logger("griffe.docstrings.google"):
with disable_logger("griffe.agents.nodes"):
docstring = Docstring(obj.__doc__)
parsed_sections = parse(docstring, Parser.google)
for section in parsed_sections:
if section.kind == DocstringSectionKind.examples:
code_example = "\n".join(
(part[1] for part in section.as_dict().get("value", []))
)
if not skip_block_load_code_example(code_example):
code_examples.add(code_example)
if section.kind == DocstringSectionKind.admonition:
value = section.as_dict().get("value", {})
if value.get("annotation") == "example":
code_example = value.get("description")
if not skip_block_load_code_example(code_example):
code_examples.add(code_example)
return code_examples
code_examples_grouping = defaultdict(set)
for _, module_name, ispkg in iter_modules(langchain_prefect.__path__):
module_nesting = f"{COLLECTION_SLUG}.{module_name}"
module_obj = load_module(module_nesting)
# find all module examples
if skip_parsing(module_name, module_obj, module_nesting):
continue
code_examples_grouping[module_name] |= get_code_examples(module_obj)
# find all class and method examples
for class_name, class_obj in getmembers(module_obj, isclass):
if skip_parsing(class_name, class_obj, module_nesting):
continue
code_examples_grouping[module_name] |= get_code_examples(class_obj)
for method_name, method_obj in getmembers(class_obj, isfunction):
if skip_parsing(method_name, method_obj, module_nesting):
continue
code_examples_grouping[module_name] |= get_code_examples(method_obj)
# find all function examples
for function_name, function_obj in getmembers(module_obj, callable):
if skip_parsing(function_name, function_obj, module_nesting):
continue
code_examples_grouping[module_name] |= get_code_examples(function_obj)
examples_catalog_path = Path("examples_catalog.md")
with mkdocs_gen_files.open(examples_catalog_path, "w") as generated_file:
generated_file.write(
dedent(
"""
# Examples Catalog
Below is a list of examples for `langchain-prefect`.
"""
)
)
for module_name, code_examples in code_examples_grouping.items():
if len(code_examples) == 0:
continue
module_title = module_name.replace("_", " ").title()
generated_file.write(
f"## [{module_title} Module][{COLLECTION_SLUG}.{module_name}]\n"
)
for code_example in code_examples:
generated_file.write(code_example + "\n")
| [] |
2024-01-10 | steinskeeper/judgy-backend | agents~marketagent.py | import asyncio
from fastapi import APIRouter
from langchain import OpenAI, SerpAPIWrapper
from langchain.agents import initialize_agent, Tool, AgentType
from langchain.tools import DuckDuckGoSearchRun
from langchain.llms import VertexAI
from pymongo import MongoClient
from bson.objectid import ObjectId
import vertexai
from vertexai.language_models import TextGenerationModel
search = DuckDuckGoSearchRun()
router = APIRouter()
client = MongoClient("mongodb://localhost:27017/")
db = client["judgy"]
@router.get("/market-agent")
async def marketAgent_endpoint():
await asyncio.sleep(5)
return {"message": "Hello from Market Agent"}
async def invoke_market_agent(project_id: str, idea: str):
vertexai.init(project="lofty-bolt-383703", location="us-central1")
parameters = {
"temperature": 0.2,
"max_output_tokens": 1000,
"top_p": 0.8,
"top_k": 40
}
for x in db.hackathons.find():
technologies = x["technologies"]
theme = x["theme"]
break
llm = VertexAI()
tools = [
Tool(
name="Intermediate Answer",
func=search.run,
description="useful for when you need to ask with search",
)
]
marketQuestion = [
"Who is the target audience of this idea?",
"What is the potential of this idea?",
"What is the market size of this idea?",
"What are the pitfalls of this idea?",
"Are there any platforms like the idea, that already exist?"]
agentAnswers = []
def getAnswer(question):
self_ask_with_search = initialize_agent(
tools, llm, agent=AgentType.SELF_ASK_WITH_SEARCH, verbose=True
)
prompt = """
You are a market researcher. You have to answer the question about the idea.
Idea: {idea}
Question: {question}
Rules for answering:
1. Use statistical data where ever possible.
2. Remember to answer like a market researcher.
3. Answer the question as best you can, in a paragraph.
4. You must answer in one paragraph. Do not use formatting.
5. Your paragraph must not have more than 70 words.
"""
prompt = prompt.format(question=question, idea=idea)
resp = self_ask_with_search.run(prompt)
agentAnswers.append(resp)
for i in marketQuestion:
getAnswer(i)
final = []
for i in range(len(marketQuestion)):
newval = {"question": marketQuestion[i], "answer": agentAnswers[i]}
final.append(newval)
print(final)
model = TextGenerationModel.from_pretrained("text-bison@001")
theme_prompt = """
Themes : {theme}
Idea: {idea}
Task : To which of the above themes does the idea belong to?
Rules:
1. The theme must be from the above list
2. Just give the matched theme and nothing more.
3. If the idea does not match any of the themes, just say "None"
"""
theme_prompt = theme_prompt.format(theme=theme, idea=idea)
response = model.predict(
theme_prompt,
**parameters
)
finalTheme = response.text
print("project_id", project_id)
query = {"_id": ObjectId(project_id)}
newvalues = {"$set": {"marketAgentAnalysis": final, "theme": finalTheme}}
temp = db.projects.update_one(query, newvalues)
print("Market Agent : Task Complete")
| [
"\n Themes : PLACEHOLDER\n Idea: PLACEHOLDER\n Task : To which of the above themes does the idea belong to?\n Rules: \n 1. The theme must be from the above list\n 2. Just give the matched theme and nothing more.\n 3. If the idea does not match any of the themes, just say \"None\"\n ",
"\n You are a market researcher. You have to answer the question about the idea.\n Idea: PLACEHOLDER\n Question: PLACEHOLDER\n Rules for answering: \n 1. Use statistical data where ever possible.\n 2. Remember to answer like a market researcher.\n 3. Answer the question as best you can, in a paragraph.\n 4. You must answer in one paragraph. Do not use formatting.\n 5. Your paragraph must not have more than 70 words.\n ",
"\n You are a market researcher. You have to answer the question about the idea.\n Idea: {idea}\n Question: {question}\n Rules for answering: \n 1. Use statistical data where ever possible.\n 2. Remember to answer like a market researcher.\n 3. Answer the question as best you can, in a paragraph.\n 4. You must answer in one paragraph. Do not use formatting.\n 5. Your paragraph must not have more than 70 words.\n ",
"\n Themes : {theme}\n Idea: {idea}\n Task : To which of the above themes does the idea belong to?\n Rules: \n 1. The theme must be from the above list\n 2. Just give the matched theme and nothing more.\n 3. If the idea does not match any of the themes, just say \"None\"\n "
] |
2024-01-10 | steinskeeper/judgy-backend | agents~chatagent.py | from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.prompts import PromptTemplate
from langchain.chains import ConversationChain
from bson import ObjectId
from fastapi import APIRouter, Request
from langchain.document_loaders import GitLoader
from dotenv import load_dotenv
from langchain.document_loaders import DirectoryLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.chat_models import ChatVertexAI
import os
from langchain.memory import VectorStoreRetrieverMemory
from pymongo import MongoClient
load_dotenv()
router = APIRouter()
client = MongoClient("mongodb://localhost:27017/")
db = client["judgy"]
@router.get("/chat-agent")
def chatAgent_endpoint():
return {"message": "Hello from Chat Agent"}
@router.post("/chat-agent")
async def invoke_chat_agent(request: Request):
data = await request.json()
technologies = ""
theme = ""
isAllowed = False
for x in db.hackathons.find():
technologies = x["technologies"]
theme = x["theme"]
if "isAllowed" in x:
isAllowed = x["isAllowed"]
else:
isAllowed = False
break
if isAllowed == False:
return {"answer": "Sorry, We have reached our credit limit.", "chathistory": []}
print("Data",data)
project = db.projects.find_one({"_id": ObjectId(data["project_id"])})
DIRECTORY = "projects_source_code/"+data["project_id"]
loader = DirectoryLoader(DIRECTORY, silent_errors=True)
print("loader", loader)
llm = ChatVertexAI()
index = VectorstoreIndexCreator().from_loaders([loader])
print("index creation", index)
retriever = index.vectorstore.as_retriever()
memory = VectorStoreRetrieverMemory(retriever=retriever)
print("before context memory", memory)
memory.save_context(
{"input": "Idea : "+project["shortDescription"]}, {"output": "..."})
memory.save_context(
{"input": "Theme for the Hackathon : "+theme}, {"output": "..."})
memory.save_context(
{"input": "Technologies that must be used for the hackathon project : "+technologies}, {"output": "..."})
print("after context memory", memory)
_DEFAULT_TEMPLATE = """The following is a conversation between a hackathon judge and an AI.
The AI is a market researcher and a code reviewer.
Rules for answering:
1. Use statistical data where ever possible.
2. Remember to answer like a market researcher.
3. Answer the question as best you can, in a paragraph.
4. You must answer in one paragraph. Do not use formatting.
5. Your paragraph must not have more than 70 words.
6. You must analyze all the files in the project when a code related question is asked.
Relevant pieces of previous conversation:
{history}
(You do not need to use these pieces of information if not relevant)
Current conversation:
Human: {input}
AI:"""
PROMPT = PromptTemplate(
input_variables=["history", "input"], template=_DEFAULT_TEMPLATE
)
conversation_with_summary = ConversationChain(
llm=llm,
prompt=PROMPT,
memory=memory,
verbose=True
)
aiResp = conversation_with_summary.predict(input=data["question"])
chatHistory = data["chathistory"]
chatHistory.append(
{"input": data["question"], "output": aiResp})
return {
"answer": aiResp,
"chathistory": chatHistory
}
| [
"The following is a conversation between a hackathon judge and an AI. \n The AI is a market researcher and a code reviewer. \n Rules for answering: \n 1. Use statistical data where ever possible.\n 2. Remember to answer like a market researcher.\n 3. Answer the question as best you can, in a paragraph.\n 4. You must answer in one paragraph. Do not use formatting.\n 5. Your paragraph must not have more than 70 words.\n 6. You must analyze all the files in the project when a code related question is asked.\n Relevant pieces of previous conversation:\n {history}\n\n (You do not need to use these pieces of information if not relevant)\n\n Current conversation:\n Human: {input}\n AI:",
"input"
] |
2024-01-10 | steinskeeper/judgy-backend | agents~crudagent.py | from bson import ObjectId
from fastapi import APIRouter, Request
from pymongo import MongoClient
from agents.codeagent import invoke_code_agent
import asyncio
from agents.marketagent import invoke_market_agent
from annoy import AnnoyIndex
import cohere
import numpy as np
import pandas as pd
import os
router = APIRouter()
client = MongoClient("mongodb://localhost:27017/")
db = client["judgy"]
@router.get("/crud-agent")
def crudAgent_endpoint():
return {"message": "Hello from Crud Agent, Okay I'm not really an agent"}
@router.post("/create-project")
async def create_project(request: Request):
data = await request.json()
print(data)
data["isReviewed"] = False
new_project = db.projects.insert_one(data)
print(new_project.inserted_id)
asyncio.create_task(invoke_market_agent(
str(new_project.inserted_id), data["shortDescription"]))
asyncio.create_task(invoke_code_agent(
data["githubLink"], str(new_project.inserted_id)))
return {"message": "Project created", "project_id": str(new_project.inserted_id)}
@router.post("/create-hackathon")
async def create_hackathon(request: Request):
data = await request.json()
print(data)
new_hackathon = db.hackathons.insert_one(data)
print(new_hackathon.inserted_id)
return {"message": "Hackathon created", "hackathon_id": str(new_hackathon.inserted_id)}
@router.get("/get-project/{project_id}")
async def get_project(project_id: str):
project = db.projects.find_one({"_id": ObjectId(project_id)})
project["_id"] = str(project["_id"])
return {"message": "successful", "project": project}
@router.get("/get-all")
async def get_all_projects():
projects = db.projects.find({})
final = []
for project in projects:
project["_id"] = str(project["_id"])
final.append(project)
return {"message": "successful", "projects": final.reverse()}
@router.post("/review")
async def review_project(request: Request):
data = await request.json()
project_id = data["project_id"]
query = {"_id": ObjectId(project_id)}
new_values = {"$set": {"isReviewed": data["isReviewed"]}}
db.projects.update_one(query, new_values)
return {"message": "successful", "project_id": project_id}
@router.post("/search")
async def search_projects(request: Request):
data = await request.json()
cohere_api_key = os.getenv("COHERE_API_KEY")
co = cohere.Client(cohere_api_key)
projects = []
for x in db.projects.find({}):
x["_id"] = str(x["_id"])
projects.append(x)
docs = []
for y in projects:
text = "Project Description: " + \
y["longDescription"] + " Hackathon Theme: " + y["theme"]
docs.append(text)
df = pd.DataFrame({'docs': docs})
embeds = co.embed(texts=list(df["docs"]),
model="large",
truncate="RIGHT").embeddings
embeds = np.array(embeds)
search_index = AnnoyIndex(embeds.shape[1], 'angular')
for i in range(len(embeds)):
search_index.add_item(i, embeds[i])
search_index.build(10)
query_embed = co.embed(texts=[data["query"]],
model="large",
truncate="RIGHT").embeddings
similar_item_ids = search_index.get_nns_by_vector(
query_embed[0], 10, include_distances=True)
result = similar_item_ids[0]
ress = []
for i in range(len(result)):
ress.append(projects[result[i]])
return {"message": "successful", "projects": ress}
| [] |
2024-01-10 | steinskeeper/judgy-backend | agents~codeagent.py | from langchain.chat_models import ChatVertexAI
from langchain.indexes import VectorstoreIndexCreator
from langchain.document_loaders import DirectoryLoader
from git import Repo
from langchain.document_loaders import GitLoader
from fastapi import APIRouter
import asyncio
from bson.objectid import ObjectId
from pymongo import MongoClient
router = APIRouter()
client = MongoClient("mongodb://localhost:27017/")
db = client["judgy"]
@router.get("/code-agent")
def codeAgent_endpoint():
return {"message": "Hello from Code Agent"}
async def invoke_code_agent(repolink: str, project_id: str):
DIRECTORY = "./projects_source_code/"+project_id
repo = Repo.clone_from(
repolink, to_path=DIRECTORY
)
branch = repo.head.reference
loader = GitLoader(repo_path=DIRECTORY, branch=branch)
llm = ChatVertexAI()
index = VectorstoreIndexCreator().from_loaders([loader])
# Get theme from hackathon collection
technologies = ""
for x in db.hackathons.find():
technologies = x["technologies"]
break
prompt = """
You are a code reviewer. This is a hackathon project. You have to answer the question about the project.
Question: {question}
Rules for answering:
1. Remember to answer like a code reviewer.
2. Answer the question as best you can. If you are unable to answer, say 'I am unsure, I need human assistance' .
3. You must answer in one paragraph. Do not use formatting.
4. Your paragraph must not have more than 70 words.
5. You must analyze all the files in the project.
6. If you don't know the answer, you must research and answer.
"""
questionToAsk = [
"What are the technologies and programming language used in this project?",
"Explain the project in brief",
"How is the code quality of this project?",
"Does the project import and use any of the following dependencies/packages/APIs/libraries : "+technologies + "? ",
]
agentAnswers = []
for question in questionToAsk:
response = index.query(question, llm)
agentAnswers.append(response)
# Save the answers to the database
final = []
for i in range(len(questionToAsk)):
newval = {"question": questionToAsk[i], "answer": agentAnswers[i]}
final.append(newval)
query = {"_id": ObjectId(project_id)}
newvalues = {"$set": {"codeAgentAnalysis": final}}
db.projects.update_one(query, newvalues)
print("Code Agent : Task Complete")
| [
"\n You are a code reviewer. This is a hackathon project. You have to answer the question about the project.\n Question: {question}\n Rules for answering: \n 1. Remember to answer like a code reviewer.\n 2. Answer the question as best you can. If you are unable to answer, say 'I am unsure, I need human assistance' .\n 3. You must answer in one paragraph. Do not use formatting.\n 4. Your paragraph must not have more than 70 words.\n 5. You must analyze all the files in the project.\n 6. If you don't know the answer, you must research and answer.\n "
] |
2024-01-10 | hasans30/qnabot | aiutils.py | from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain import OpenAI
from langchain.chains import RetrievalQA
from langchain.document_loaders import DirectoryLoader, TextLoader
from decouple import config
import sys
import os
openai_api_key = config("OPENAI_API_KEY")
# it loads a directory of documents and return vector db
def load_documents():
if not os.path.exists('data/'):
print('data folder does not exist')
return None
loader = DirectoryLoader('data/', glob='**/*.txt', loader_cls=TextLoader)
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
return text_splitter.split_documents(documents)
def get_qa():
embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
texts = load_documents()
if texts is None:
print('texts is none possibly due to data folder does not exist')
return None
docsearch = FAISS.from_documents(texts, embeddings)
llm = OpenAI(temperature=0, openai_api_key=openai_api_key)
# Create your Retriever
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=docsearch.as_retriever(), return_source_documents=True)
return qa
def query_my_question(queryText):
qa=get_qa()
if qa is None:
print('qa is none possibly due to data folder does not exist')
return 'unable to answer your question'
query={"query": queryText}
result=qa(query)
return result['result']
# Compare this snippet from app.py:
if __name__ == '__main__':
if len(sys.argv) <2 :
print('not enough arguments')
sys.exit(1)
print(f'querying {sys.argv[1]}')
print(query_my_question(sys.argv[1])) | [] |
2024-01-10 | Garryofc/HyperAI | HyperAI.py | from ctypes import FormatError
import os
import openai
import random
import logging
import discord
import time
from discord.ext import commands
from asyncio import sleep
import asyncio
import colorama
from discord import app_commands
from colorama import Fore
colorama.init()
client = discord.Client(intents = discord.Intents.all())
tree = app_commands.CommandTree(client)
openai.api_key = ''
logging.basicConfig(filename="assets/log.txt", level=logging.INFO,
format="%(asctime)s %(message)s")
@client.event
async def on_ready():
activity = discord.Game(name="HyperAI", type=3)
await client.change_presence(status=discord.Status.online, activity=activity)
await tree.sync(guild=discord.Object(id="1054463932317835285"))
print("Ready!")
@client.event
async def on_message(message):
if message.author == client.user:
return
elif message.content == '!creator':
try:
await message.channel.send('HyperAI creator is Garry')
except:
await message.channel.send('HyperAI is under Maintaince error')
print('An error ocured in 4-th slot')
else:
try:
response = openai.Completion.create(
engine="text-davinci-003",
prompt=f"{message.content}\n",
max_tokens=500,
temperature=1,
top_p=1,
frequency_penalty=0,
presence_penalty=0
).choices[0].text
print(f'{Fore.BLUE}Author: {message.author}')
print(f'{Fore.CYAN}Message: {message.content}')
print(f'{Fore.GREEN}Response: {response}{Fore.RESET}')
logging.info(f" Author = {message.author} ; Message: {message.content} ; Response: {response}")
print('')
await message.channel.send(response)
except:
await message.channel.send('HyperAI is under Maintaince error')
print('An error ocured in 4-th slot')
@tree.command(name = "creator", description = "Shows who created HyperAI", guild=discord.Object(id=1054463932317835285)) #Add the guild ids in which the slash command will appear. If it should be in all, remove the argument, but note that it will take some time (up to an hour) to register the command if it's for all guilds.
async def first_command(interaction):
await interaction.response.send_message("HyperAI founder is Garry")
client.run('')
| [] |
2024-01-10 | DevGauge/PursuitProphet | backend~welcome.py | import os
import sys
from flask import Flask, jsonify, render_template, request, redirect, url_for, flash, session, after_this_request
sys.path.insert(0, '../')
sys.path.insert(0, '/app')
# from app.models import ChatBot
from langchain_m.langchain_module import TaskChatBot, GoalChatBot
import app.app
from app.models import Goal, Task, User
from app.app import app_instance
from app.pp_logging.db_logger import db
from flask_socketio import SocketIO, send, emit
from flask_security import roles_required, login_required, login_user, user_registered, current_user
from flask_security.confirmable import confirm_user, confirm_email_token_status
from LangChainAgentFactory import AgentFactory
from urllib.parse import quote
# import feature blueprints
import features.demo.demo_blueprint
import features.dream.dream_blueprint
import features.task.task_blueprint
import features.subtask.subtask_blueprint
import features.google_oauth.google_blueprint
import features.profile.profile_blueprint
from shared.blueprints.blueprints import register_blueprints
app = app_instance.app
socketio = SocketIO(app, debug=True)
register_blueprints(app)
print(app.url_map)
@user_registered.connect_via(app)
def user_registered_sighandler(sender, user, confirm_token, confirmation_token, form_data):
@after_this_request
def transfer_goals_after_request(response):
if 'temp_user_id' in session:
temp_user_id = session['temp_user_id']
temp_user = User.query.get(temp_user_id)
if temp_user is not None:
# Transfer the goals from the temporary user to the registered user
for goal in temp_user.goals:
user.goals.append(goal)
# Ensure that goals are persisted in the database before deleting temp_user
db.session.flush()
# Delete the temporary user
db.session.delete(temp_user)
db.session.commit()
return response
@app.teardown_appcontext
def shutdown_session(exception=None):
db.session.remove()
@socketio.on('message')
def handle_message(data):
print('received message: ' + data)
send(data, broadcast=True)
from sqlalchemy import exc
@app.errorhandler(exc.SQLAlchemyError)
def handle_db_exceptions(error):
db.session.rollback()
@app.route('/error/<string:error_message>')
def error_page(error_message):
print(f'error message: {error_message}')
# traceback.print_exc()
return render_template('error.html', error_message=error_message, pageTitle='Error')
@app.errorhandler(500)
def handle_500(error):
error_message = "Internal Server Error"
return redirect(url_for('error_page', error_message=error_message))
@app.route('/admin')
@login_required
@roles_required('admin')
def admin_home():
return "Hello, Admin!"
@app.route('/thank_you')
def thank_you():
return render_template('thank_you.html')
@app.route('/confirm/<token>')
def confirm_email(token):
# Check the token status first
expired, invalid, user = confirm_email_token_status(token)
if not expired and not invalid:
# confirm the user
if confirm_user(token):
# if successful, log the user in
login_user(user)
return redirect(url_for('dashboard'))
return 'The confirmation link is invalid or has expired.'
@app.route('/feature/<feature_key>', methods=['GET'])
def get_user_feature(feature_key):
user_id = request.args.get('user_id', None)
if user_id is None:
user = current_user
else:
user = User.query.get(user_id)
feature_value = getattr(user, feature_key, None)
if feature_value is not None:
print(f'feature key value: {feature_value}')
return jsonify({feature_key: feature_value})
else:
print(f'feature key {feature_key} not found. user attribs: {user.__dict__}')
return jsonify({feature_key: False})
@app.route('/feature/<feature_key>', methods=['PUT'])
def update_feature_value(feature_key):
user_id = request.args.get('user_id', None)
print(f'user id from request: {user_id}')
if user_id is None:
user = current_user
else:
user = User.query.get(user_id)
if hasattr(user, feature_key):
print(f'feature value: {feature_key} found, setting false')
setattr(user, feature_key, False)
db.session.commit()
return jsonify({'success': True})
else:
print(f'feature value {feature_key} not found. user attribs: {user.__dict__}')
app.logger.error(f'feature value {feature_key} not found. user attribs: {user.__dict__}')
return jsonify({'success': False})
@login_required
@app.route('/')
def dashboard():
if isinstance(current_user, User): # current_user can be anonymous and need to login
try:
user_id = current_user.id
user = app_instance.user_datastore.find_user(id=user_id)
if user:
goals = Goal.query.filter_by(user_id=user_id).all()
return render_template('dream-home.html', goals=goals)
except Exception as e:
app.logger.error(f'Dashboard loading error: {e}')
return redirect(url_for('error_page', error_message='Error logging you in.'))
return redirect(url_for('security.login'))
@app.route('/chat')
def chat():
return render_template('chat.html')
@app.route('/chat_api/<string:goal_id>', methods=['POST'])
def chat_api(goal_id):
print("a POST request was made to chat")
# task = Task.query.filter_by(id=task_id).first()
goal = Goal.query.filter_by(id=goal_id).first()
message = request.json['message']
chat_bot = GoalChatBot(goal)
response = chat_bot.get_response(message)
return jsonify({'response': response})
@app.route('/task_chat_api/<string:task_id>', methods=['POST'])
def task_chat_api(task_id):
task = Task.query.filter_by(id=task_id).first()
goal = Goal.query.filter_by(id=task.goal_id).first()
chat_bot = TaskChatBot(task, goal, [task.get_task() for task in task.subtasks])
| [] |
2024-01-10 | adamattar123/Quiz-Application | MCQ.py | # Import necessary libraries
import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.output_parsers import StructuredOutputParser, ResponseSchema
# OpenAI API key for authentication
api = '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'
# Function to call OpenAI and generate quiz questions
def callOpenAi(topic, numOfQ):
# Define the expected response schema for OpenAI output
response_schema = ResponseSchema(
name="questions",
subschemas=[
ResponseSchema(
name="question",
description="The text of the question",
type="str"
),
ResponseSchema(
name="options",
description="Multiple choice options",
type="str",
subschemas=[
ResponseSchema(name="option", description="A multiple choice option", type="str")
]
),
ResponseSchema(
name="answer",
description="The correct answer option",
type="str"
)
],
description="The list of questions",
type="list of dict"
)
# Initialize output parser based on the response schema
output_parser = StructuredOutputParser.from_response_schemas([response_schema])
format_instructions = output_parser.get_format_instructions()
# Define the prompt template for OpenAI
prompt = """
You are an MCQ quiz guru, you have to generate {num} mcq questions about {topic}.
Provide the right answer for each question and return the response in JSON format.
Here is an example of how the JSON structure should be in this format {format_instructions}.
All the values in the JSON should not be prefixed with anything like "A.", "A)", "A:", "A:-".
"""
# Create a ChatPromptTemplate object from the template
prompt = ChatPromptTemplate.from_template(prompt)
# Initialize ChatOpenAI model with the OpenAI API key
model = ChatOpenAI(openai_api_key=api)
# Define the processing chain: prompt -> model -> output_parser
chain = prompt | model | output_parser
# Invoke the processing chain with the input parameters
answer = chain.invoke({"num": numOfQ, "topic": topic, "format_instructions": format_instructions})
return answer
# Streamlit application title
st.title("Question Generator")
# Input fields for topic and number of questions
topic = st.text_input("Enter the topic of the questions:")
num_questions = st.number_input("Enter the number of questions:", min_value=1)
# Button to start the quiz
start_quiz_button = st.button("Start Quiz")
# Check if the quiz has started
if start_quiz_button:
if topic and num_questions > 0:
# Check if quiz data is not stored in session state
if not 'quiz_data' in st.session_state:
# Retrieve the selected topic and number of questions
selected_topic = topic
selected_num_questions = num_questions
# Call OpenAI to generate quiz questions
quiz_JSON = callOpenAi(selected_topic, selected_num_questions)
# Initialize question index for tracking progress
question_index = 0
# Store quiz data in session state
st.session_state['quiz_data'] = {
'selected_topic': selected_topic,
'selected_num_questions': selected_num_questions,
'questions': questions,
'question_index': question_index,
}
else:
# Subsequent interactions, use stored state
selected_topic = st.session_state['quiz_data']['selected_topic']
selected_num_questions = st.session_state['quiz_data']['selected_num_questions']
questions = st.session_state['quiz_data']['questions']
question_index = st.session_state['quiz_data']['question_index']
# Check if 'questions' key exists in the JSON response
if 'questions' in quiz_JSON:
questions = quiz_JSON['questions']
# Check if there are questions available
if len(questions) > 0:
# Get the current question
current_question = questions[question_index]
st.write(current_question['question'])
# Display radio buttons for answer options
user_input = st.radio(f"Select an answer for Question {question_index + 1}:", current_question['options'], key=f"question_{question_index}")
# Check the answer on submit
if st.button("Submit"):
if user_input == current_question['answer']:
result = "Success! That's the correct answer."
else:
result = f"Wrong answer. The correct answer is '{current_question['answer']}'."
st.write(result)
# Check if there are more questions
if question_index < len(questions) - 1:
question_index += 1
current_question = questions[question_index]
st.write(current_question['question'])
user_input = st.radio(f"Select an answer for Question {question_index + 1}:", current_question['options'], key=f"question_{question_index}")
else:
st.write("No questions found for this topic.")
else:
st.write("Invalid quiz data.")
else:
# Display errors for missing or invalid input
if not topic:
st.error("Please enter a valid topic.")
if num_questions <= 0:
st.error("Please enter a valid number of questions (minimum 1).")
| [
"\n You are an MCQ quiz guru, you have to generate {num} mcq questions about {topic}.\n Provide the right answer for each question and return the response in JSON format. \n Here is an example of how the JSON structure should be in this format {format_instructions}.\n All the values in the JSON should not be prefixed with anything like \"A.\", \"A)\", \"A:\", \"A:-\".\n "
] |
2024-01-10 | HyperUpscale/Talk-To-ChatGPT | talk.py | import sounddevice as sd
import soundfile as sf
import playsound
import numpy as np
import openai
import os
import requests
import re
from colorama import Fore, Style, init
import datetime
import base64
#from pydub import AudioSegment
#from pydub.playback import play
init()
def open_file(filepath):
output_folder = os.path.dirname(os.path.abspath(__file__))
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
api_key = open_file('openaiapikey2.txt')
elapikey = open_file('elabapikey.txt')
conversation1 = []
chatbot1 = open_file('chatbot1.txt')
def chatgpt(api_key, conversation, chatbot, user_input, temperature=0.9, frequency_penalty=0.2, presence_penalty=0):
openai.api_key = api_key
conversation.append({"role": "user","content": user_input})
messages_input = conversation.copy()
prompt = [{"role": "system", "content": chatbot}]
messages_input.insert(0, prompt[0])
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
temperature=temperature,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
messages=messages_input)
chat_response = completion['choices'][0]['message']['content']
conversation.append({"role": "assistant", "content": chat_response})
return chat_response
def text_to_speech(text, voice_id, api_key):
url = f'https://api.elevenlabs.io/v1/text-to-speech/{voice_id}'
headers = {
'Accept': 'audio/mpeg',
'xi-api-key': api_key,
'Content-Type': 'application/json'
}
data = {
'text': text,
'model_id': 'eleven_monolingual_v1',
'voice_settings': {
'stability': 0.6,
'similarity_boost': 0.85
}
}
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
# Generate a unique filename based on the current timestamp
timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
filename = f'output_{timestamp}.mp3'
# Save the audio content to a unique MP3 file
with open(filename, 'wb') as f:
f.write(response.content)
# Play the audio file
playsound.playsound(filename)
# Clean up: delete the audio file after playing
os.remove(filename)
else:
print('Error:', response.text)
def print_colored(agent, text):
agent_colors = {
"Angel:": Fore.YELLOW,
}
color = agent_colors.get(agent, "")
print(color + f"{agent}: {text}" + Style.RESET_ALL, end="")
voice_id1 = '21m00Tcm4TlvDq8ikWAM'
def record_and_transcribe(duration=8, fs=44100):
print('Recording...')
myrecording = sd.rec(int(duration * fs), samplerate=fs, channels=2)
sd.wait()
print('Recording complete.')
filename = 'myrecording.wav'
sf.write(filename, myrecording, fs)
with open(filename, "rb") as file:
openai.api_key = api_key
result = openai.Audio.transcribe("whisper-1", file)
transcription = result['text']
return transcription
while True:
user_message = record_and_transcribe()
response = chatgpt(api_key, conversation1, chatbot1, user_message)
print_colored("Julie:", f"{response}\n\n")
user_message_without_generate_image = re.sub(r'(Response:|Narration:|Image: generate_image:.*|)', '', response).strip()
text_to_speech(user_message_without_generate_image, voice_id1, elapikey)
| [] |
2024-01-10 | mazharm/openaichatbot | tools~tune.py | """
This tool uses a genetic algorithm to tune the hyperparameters of the GPT-3 model.
It uses two methods to evaluate the fitness of a candidate hyperparameter set:
(1) the perplexity of the generated text and
(2) the cosine similarity between the generated text and the target response.
The fitness function is a weighted sum of the two methods. The tools invokes the algorithm with
different weights to find the optimal hyperparameters. The final judgement of the optimal
hyperparameters is based on the human judgement of the generated text.
"""
import random
import json
from sklearn.metrics.pairwise import cosine_similarity
from .openaicli import OpenAICli
with open('training_data.json', 'r', encoding='utf-8') as file:
training_data = json.load(file)
oac = OpenAICli()
# Define the hyperparameters
temperature_range = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
top_p_range = [0.5, 0.7, 0.9, 1]
frequency_penalty_range = [0, 0.2, 0.4, 0.6, 0.8, 1.2, 1.4, 1.6, 1.8, 2]
presence_penalty_range = [0, 0.2, 0.4, 0.6, 0.8, 1.2, 1.4, 1.6, 1.8, 2]
def fitness(hyperparameters, messages, target_response, p_w, s_w):
"""
Define the fitness function
"""
generated_text, perplexity = oac.get_response(messages, hyperparameters)
generated_vector = oac.get_embedding(generated_text)
target_vector = oac.get_embedding(target_response)
similarity = cosine_similarity(
generated_vector.reshape(1, -1), target_vector.reshape(1, -1))
score = p_w * \
(1.0 / float(perplexity)) + s_w * similarity[0][0]
return score
def generate_individual():
"""
Generate a random individual
"""
temperature = random.choice(temperature_range)
top_p = random.choice(top_p_range)
frequency_penalty = random.choice(frequency_penalty_range)
presence_penalty = random.choice(presence_penalty_range)
return {'temperature': temperature, 'top_p': top_p, 'frequency_penalty': \
frequency_penalty, 'presence_penalty': presence_penalty}
def generate_population(population_size=100):
"""
Generate a population of random individuals
"""
population = [generate_individual() for _ in range(population_size)]
return population
def evaluate_population(population, p_w, s_w):
"""
Evaluate the fitness of each individual in the population
"""
fitness_scores = []
for individual in population:
prompt = random.choice(training_data['prompt'])
target_response = training_data.loc[training_data['prompt']
== prompt, 'response'].values[0]
score = fitness(individual, prompt, target_response, p_w, s_w)
fitness_scores.append(score)
return fitness_scores
def alpha(population, population_size, alpha_parent):
"""
Breed the population by having the alpha seed the next generation
"""
new_population = []
while len(new_population) < population_size:
parent = random.choice(population)
child = {}
for key in parent.keys():
if random.random() < 0.5:
child[key] = parent[key]
else:
child[key] = alpha_parent[key]
new_population.append(child)
return new_population
def crossover(population, population_size):
"""
Breed the population by random crossover
"""
new_population = []
while len(new_population) < population_size:
parent1 = random.choice(population)
parent2 = random.choice(population)
child = {}
for key in parent1.keys():
if random.random() < 0.5:
child[key] = parent1[key]
else:
child[key] = parent2[key]
new_population.append(child)
return new_population
def breed_popuplation(population, population_size, strategy, alpha_parent):
"""
Breed the population using the specified strategy
"""
if strategy == 'crossover':
return crossover(population, population_size)
elif strategy == 'alpha':
return alpha(population, population_size, alpha_parent)
else:
raise ValueError('Unknown strategy')
def mutate_population(population):
"""
Mutate the population
"""
for individual in population:
if random.random() < 0.1:
individual['temperature'] = random.choice(temperature_range)
if random.random() < 0.1:
individual['top_p'] = random.choice(top_p_range)
if random.random() < 0.1:
individual['frequency_penalty'] = random.choice(
frequency_penalty_range)
if random.random() < 0.1:
individual['presence_penalty'] = random.choice(
presence_penalty_range)
return population
def evolve_population(strategy, population, population_size, fitness_scores, alpha_parent):
"""
Evolve the population
"""
num_selected = int(population_size * 0.2)
selected_indices = sorted(range(
population_size), key=lambda i: fitness_scores[i], reverse=True)[:num_selected]
selected_population = [population[i] for i in selected_indices]
new_population = breed_popuplation(
selected_population, population_size, strategy, alpha_parent)
new_population = mutate_population(new_population)
return new_population
def run_genetic_algorithm(strategy, population_size, num_generations, p_w, s_w):
"""
Run the genetic algorithm
"""
population = generate_population(population_size)
fitness_scores = evaluate_population(population, p_w, s_w)
alpha_parent = population[fitness_scores.index(max(fitness_scores))]
# Evolution loop
num_generations = 10
for generation in range(num_generations):
print('Generation:', generation)
new_population = evolve_population(strategy,
population, population_size, fitness_scores, alpha_parent)
# Evaluate the fitness of the new population
new_fitness_scores = evaluate_population(new_population, p_w, s_w)
# Replace the old population with the new population
population = new_population
fitness_scores = new_fitness_scores
alpha_parent = population[fitness_scores.index(max(fitness_scores))]
print(f"Generation:{generation}, Best individual:{alpha_parent}")
return alpha_parent
def run_with_strategy(strategy, population_size, generations):
"""
Run the genetic algorithm with the specified strategy
"""
weights = {(0, 1), (1, 0), (0.25, 0.75), (0.75, 0.25), (0.5, 0.5)}
for weight in weights:
perplexity_weight, similarity_weight = weight
best_individual = run_genetic_algorithm(strategy,
population_size, generations, perplexity_weight, similarity_weight)
print(f'perplexity_weight:{perplexity_weight},\
similarity_weight:{similarity_weight},\
Best individual:{best_individual}')
if __name__ == '__main__':
run_with_strategy('crossover', 100, 10)
run_with_strategy('alpha', 100, 10)
| [] |
2024-01-10 | hellomikelo/hackathon-cohere-qdrant | pycord.py | import discord
import os # default module
from dotenv import load_dotenv
from discord.ext import commands
import requests
import numpy as np
import cohere
API_KEY = "u4uSutHawHYkDkfWHZ0TL0ETVmE1G6lGrLlFYnHW"
co = cohere.Client('u4uSutHawHYkDkfWHZ0TL0ETVmE1G6lGrLlFYnHW')
BASE_MESSAGE_URL = "https://discord.com/channels/{guild_id}/{channel_id}/{message_id}"
def get_embeddings(text):
embeddings = co.embed(text).embeddings
return embeddings
def cosine_similarity(a, b):
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
load_dotenv() # load all the variables from the env file
bot = discord.Bot()
@bot.event
async def on_ready():
print(f"{bot.user} is ready and online!")
@bot.slash_command(name="get_messages", description="Get the last 100 messages in the current channel")
async def get_messages(ctx: commands.Context):
channel = ctx.channel
messages = []
async for message in channel.history(limit=100):
messages.append(message.content)
messages_str = "\n".join(messages)
await ctx.send(f"Recent messages:\n{messages_str}")
@bot.command()
async def embed(ctx):
embed = discord.Embed(
title="My Amazing Embed",
description="Embeds are super easy, barely an inconvenience.",
# Pycord provides a class with default colors you can choose from
color=discord.Colour.blurple(),
)
embed.add_field(name="A Normal Field",
value="A really nice field with some information. **The description as well as the fields support markdown!**")
embed.add_field(name="Inline Field 1", value="Inline Field 1", inline=True)
embed.add_field(name="Inline Field 2", value="Inline Field 2", inline=True)
embed.add_field(name="Inline Field 3", value="Inline Field 3", inline=True)
# footers can have icons too
embed.set_footer(text="Footer! No markdown here.")
embed.set_author(name="Pycord Team",
icon_url="https://example.com/link-to-my-image.png")
embed.set_thumbnail(url="https://example.com/link-to-my-thumbnail.png")
embed.set_image(url="https://example.com/link-to-my-banner.png")
# Send the embed with some text
await ctx.respond("Hello! Here's a cool embed.", embed=embed)
@bot.slash_command(name="search3", description="Search for messages containing a keyword")
async def search_messages(ctx: commands.Context, keyword: str):
channel = ctx.channel
messages = []
async for message in channel.history(limit=100):
if keyword in message.content:
messages.append(f"{message.content}\nURL: {message.jump_url}")
if messages:
messages_str = "\n".join(messages)
# 將訊息分割成多個部分,每個部分不超過 1000 字元
message_parts = [messages_str[i:i+1000]
for i in range(0, len(messages_str), 1000)]
for part in message_parts:
await ctx.send(f"Matching messages:\n{part}")
else:
await ctx.send(f"No messages found containing '{keyword}'")
@bot.slash_command(name="search_embedding", description="Search for messages by embedding")
async def search(ctx, query: str):
search_results = []
async for msg in ctx.channel.history(limit=100):
embeddings1 = co.embed([query]).embeddings[0]
embeddings2 = co.embed([msg.content]).embeddings[0]
similarity = cosine_similarity(embeddings1, embeddings2)
if similarity > 0.5:
search_results.append(msg)
print(search_results)
if len(search_results) > 0:
result_str = "Search results:\n\n"
for result in search_results:
message = result
result_str += f"{message.author}: [{message.content}]({message.jump_url})\n"
await ctx.send(content=result_str)
else:
await ctx.send("No matching messages found.")
bot.run(
os.environ['TOKEN'])
| [] |
2024-01-10 | hellomikelo/hackathon-cohere-qdrant | fetcher.py | import discord
from discord.ext.commands import slash_command
from discord.ext import commands
from discord.ext.commands import Cog
import os
import regex as re
from dotenv import load_dotenv
import requests
import numpy as np
import pandas as pd
import cohere
from qdrant_client import QdrantClient
from qdrant_client import models
from qdrant_client.http import models as rest
from qdrant_client.models import Filter
load_dotenv() # load all the variables from the env file
CHAT_HISTORY_PATH = '/content/drive/MyDrive/career/projects/hackathons/lablab-cohere-qdrant-hackathon/discord-chat-history.csv'
BASE_MESSAGE_URL = "https://discord.com/channels/{guild_id}/{channel_id}/{message_id}"
QDRANT_CLOUD_HOST = "19531f2c-0717-4706-ac90-bd8dd1a6b0cc.us-east-1-0.aws.cloud.qdrant.io"
QDRANT_COLLECTION_NAME = 'discord'
co = cohere.Client(os.getenv('COHERE_API_KEY'))
qdrant_client = QdrantClient(
host=QDRANT_CLOUD_HOST,
prefer_grpc=False,
api_key=os.getenv('QDRANT_API_KEY'),
)
discord_client = discord.Client()
bot = discord.Bot()
def embed_text(text: list, model='multilingual-22-12'):
"""Generate text embeddings."""
if type(text) is str:
text = [text]
embeddings = co.embed(text, model=model)
vectors = [list(map(float, vector)) for vector in embeddings.embeddings]
return vectors
@bot.event
async def on_ready():
print(f"{bot.user} is ready and online!")
# Define a search filter for author
ignore_author = "Fetcher"
author_filter = Filter(**{"must_not": [{"key": "author", "match": {"value": "Fetcher"}},
{"key": "author", "match": {"value": "Findio"}},
{"key": "author", "match": {"value": "Chatter"}},
],
"must": [{ "key": "word_count", "range": { "gte": 3 }}]
})
@bot.slash_command(name="fetch", description="Search for messages by embedding")
async def fetch(ctx, query: str, k_max=5):
min_words = 20
vectors = embed_text(query)
for vector in vectors:
response = qdrant_client.search(
collection_name=QDRANT_COLLECTION_NAME,
query_vector=vector,
query_filter=author_filter,
limit=k_max,
)
results = [record.payload for record in response]
# def get_plain(content: str):
# plain = re.sub(r'\n', ' \n', content)
# plain = re.sub(r'(>.*\n|```(.|\n)*```|`?|\n)', '', plain.lower().strip())
# return plain
if len(results) > 0:
output = []
# result_str = f'Search query: "{query}":\nSearch results:\n'
# for result in results:
# TODO: summarize by thread not single messages
# if len(result['content'].split()) >= min_words:
# summary = co.summarize(
# text=result['content'],
# model='summarize-xlarge',
# length='medium',
# extractiveness='low',
# temperature=0.3,
# additional_command="to remember a conversation"
# ).summary
# else:
# summary = result['content']
# result_message = result['content']
# if len(result_message) > 100:
# result_message = result_message[:100] + '...'
# result_str += f"""
# * {result['author']} wrote [{result['created_at'][:16]}]:
# [{result_message}]({result['jump_url']})
# {result['guild_id']}/{result['channel_id']}/{result['msg_id']}
# """
embed=discord.Embed(color=0x1eff00)
for result in results:
embed.add_field(name=f"{result['author']} at {result['created_at'][:16]}\n{result['channel_id']}.{result['msg_id']}",
value=f"[{result['content'][:200]}...]({result['jump_url']})",
inline=False)
embed.set_footer(text="Use `/discuss` to message user on this topic.")
# await ctx.respond(content=result_str)
await ctx.respond(f':wave: Your search results for "{query}"', embed=embed)
else:
await ctx.respond("No matching messages found.")
@bot.slash_command(name="revise", description="Revise sentence for clarity")
async def revise(ctx, sentence: str):
"""Use generate API to revise sentence."""
prompt = f"Give me a better version of the following sentence that is more concise and clear, in a polite, fun, yet professional tone: {sentence}"
response = co.generate(model='command-xlarge-beta',
prompt = prompt,
max_tokens=90,
temperature=0.5,
)
revised = response.generations[0].text
await bot.wait_until_ready()
if revised:
embed=discord.Embed(color=0x1eff00)
embed.add_field(name="Original", value=sentence, inline=False)
embed.add_field(name="Revised", value=revised, inline=False)
await ctx.respond(":wave: Here you go.", embed=embed)
# await ctx.respond(content=f"__Old__:\n{sentence}\n__Revised__: {revised}")
else:
await ctx.respond(content="No revision available.")
@bot.slash_command(name="discuss", description="Start a conversation on a topic.")
async def start_convo(ctx: discord.ApplicationContext, user, id: str):
try:
channel_id, msg_id = id.split('.')
try:
# Get the message
msg = await ctx.fetch_message(int(msg_id))
except:
# Try to get the thread's parent channel
msg = await ctx.fetch_message(int(channel_id))
def get_plain_thread(content: str):
plain = re.sub(r'\n', ' \n', content)
plain = re.sub(r'(>.*\n|```(.|\n)*```|`?|\n)', '', plain.lower().strip())
return plain
plain_thread = get_plain_thread(msg.content) + ' '
thread_summary = ''
if msg.flags.has_thread:
async for m in msg.thread.history(limit=100, oldest_first=True):
formatted_content = m.content
plain_thread += get_plain_thread(formatted_content) + ' '
else:
pass
thread_summary = co.summarize(
text=plain_thread,
model='summarize-xlarge',
length='medium',
extractiveness='low',
temperature=0.3,
additional_command="to remember a conversation"
).summary
embed=discord.Embed(color=0x1eff00)
embed.add_field(name=f"Original message thread", value=f"[{msg.content[:200]}...]({msg.jump_url})", inline=False)
embed.add_field(name=f"TL;DR", value=thread_summary, inline=False)
await ctx.respond(f':wave: {user}, <@{ctx.author.id}> wants to chat with you about below.', embed=embed)
except:
await ctx.respond("No message found.")
@bot.slash_command(name="getthread", description="Summarize a thread or message.")
async def get_thread(ctx: discord.ApplicationContext, id):
try:
channel_id, msg_id = id.split('.')
try:
# Get the message
msg = await ctx.fetch_message(int(msg_id))
except:
# Try to get the thread's parent channel
msg = await ctx.fetch_message(int(channel_id))
def get_plain_thread(content: str):
plain = re.sub(r'\n', ' \n', content)
plain = re.sub(r'(>.*\n|```(.|\n)*```|`?|\n)', '', plain.lower().strip())
return plain
plain_thread = get_plain_thread(msg.content) + ' '
if msg.flags.has_thread:
async for m in msg.thread.history(limit=100, oldest_first=True):
formatted_content = m.content
plain_thread += get_plain_thread(formatted_content) + ' '
await ctx.respond(plain_thread)
except:
await ctx.respond("No message found.")
@bot.slash_command(name="keyword_search", description="Search for messages containing a keyword")
async def search_messages(ctx: commands.Context, keyword: str):
channel = ctx.channel
messages = []
async for message in channel.history(limit=100):
if keyword in message.content:
messages.append(f"{message.content}\nURL: {message.jump_url}")
if messages:
messages_str = "\n".join(messages)
# 將訊息分割成多個部分,每個部分不超過 1000 字元
message_parts = [messages_str[i:i+1000] for i in range(0, len(messages_str), 1000)]
for part in message_parts:
await ctx.send(f"Matching messages:\n{part}")
else:
await ctx.send(f"No messages found containing '{keyword}'")
@bot.slash_command(name="savehistory", description="Save chat history")
async def fetch(ctx: discord.ApplicationContext):
def is_command(msg):
"""Checking if the message is a command call"""
if len(msg.content) == 0:
return False
elif msg.content.split()[0] == '_scan':
return True
else:
return False
data = []
async for msg in ctx.channel.history(limit=10000, oldest_first=True):
# if msg.author != ctx.user:
# if not is_command(msg):
# Get root message
data.append({'content': msg.content,
'created_at': msg.created_at,
'author': msg.author.name,
'jump_url': msg.jump_url,
'author_id': msg.author.id,
'msg_id': msg.id,
'channel_id': msg.channel.id,
'guild_id': msg.guild.id,
})
# Get thread messages (if any)
if msg.flags.has_thread:
async for thread_msg in msg.thread.history(limit=100, oldest_first=True):
data.append({'content': thread_msg.content,
'created_at': thread_msg.created_at,
'author': thread_msg.author.name,
'jump_url': thread_msg.jump_url,
'author_id': thread_msg.author.id,
'msg_id': thread_msg.id,
'channel_id': thread_msg.channel.id,
'guild_id': thread_msg.guild.id,
})
# if len(data) == limit:
# break
data = pd.DataFrame(data)
data.to_csv(CHAT_HISTORY_PATH)
await ctx.respond("Chat history saved!")
print(f'Chat history saved to {CHAT_HISTORY_PATH}')
bot.run(os.getenv('DISCORD_TOKEN'))
| [
"Give me a better version of the following sentence that is more concise and clear, in a polite, fun, yet professional tone: PLACEHOLDER"
] |
2024-01-10 | hellomikelo/hackathon-cohere-qdrant | embed_chat_history.py | import discord
import pandas as pd
import os
import time_uuid
import cohere
from qdrant_client import QdrantClient
from qdrant_client import models
from qdrant_client.http import models as rest
from dotenv import load_dotenv
QDRANT_CLOUD_HOST = "19531f2c-0717-4706-ac90-bd8dd1a6b0cc.us-east-1-0.aws.cloud.qdrant.io"
QDRANT_COLLECTION_NAME = 'discord'
# Google Drive path
CHAT_HISTORY_PATH = '/content/drive/MyDrive/career/projects/hackathons/lablab-cohere-qdrant-hackathon/discord-chat-history.csv'
load_dotenv()
cohere_client = cohere.Client(os.getenv('COHERE_API_KEY'))
qdrant_client = QdrantClient(
host=QDRANT_CLOUD_HOST,
prefer_grpc=False,
api_key=os.getenv('QDRANT_API_KEY'),
)
def clean_chat(df):
"""Clean chat history to keep only alphanums and Han Ideographs."""
_df = df.copy()
_df['content'] = (_df['content']
.str.replace('[^a-zA-Z\u4E00-\u9FFF\s]', '', regex=True)
.str.replace('(http\w+|\n)', '', regex=True)
.str.replace('<.*>', '', regex=True)
.str.lower()
.str.strip()
.fillna('')
)
_df['id'] = _df.created_at.apply(lambda x: str(time_uuid.TimeUUID.with_utc(pd.to_datetime(x))))
_df['word_count'] = _df.content.apply(lambda x: len(x.split(' ')))
return _df
def create_embeddings(dataset: pd.DataFrame):
# Embed chat messages
embeddings = cohere_client.embed(
texts=dataset.content.tolist(),
model='multilingual-22-12',
)
vector_size = len(embeddings.embeddings[0])
vectors = [list(map(float, vector)) for vector in embeddings.embeddings]
ids = dataset.id.tolist()
# Create Qdrant vector database collection
qdrant_client.recreate_collection(
collection_name=QDRANT_COLLECTION_NAME,
vectors_config=models.VectorParams(
size=vector_size,
distance=rest.Distance.DOT # for multilingual model
# distance=rest.Distance.COSINE # for large model
),
)
# Upsert new embeddings into vector search engine
qdrant_client.upsert(
collection_name=QDRANT_COLLECTION_NAME,
points=rest.Batch(
ids=ids,
vectors=vectors,
payloads=dataset.to_dict(orient='records'),
)
)
print('Vector database created.')
def test_embed():
# Test query embeddings
new_embeddings = cohere_client.embed(
texts=["discussions on horses", "discussions on asian countries", "interesting dog facts"],
# model="large",
model='multilingual-22-12',
)
results = []
k_max = 5
new_vectors = [list(map(float, vector)) for vector in new_embeddings.embeddings]
for embedding in new_vectors:
response = qdrant_client.search(
collection_name=QDRANT_COLLECTION_NAME,
query_vector=embedding,
limit=k_max,
)
results.append([record.payload['content'] for record in response])
print(results)
if __name__ == '__main__':
df = pd.read_csv(CHAT_HISTORY_PATH, index_col=0)
dataset = clean_chat(df)
embed = True
if embed:
create_embeddings(dataset)
| [] |
2024-01-10 | benthecoder/code_interpreter | code_interpreter.py | import ast
import json
import os
from inspect import signature, Parameter
import openai
from dotenv import load_dotenv
from pydantic import create_model
from fastcore.utils import nested_idx
import logging
import click
# Load environment variables
load_dotenv()
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Configure OpenAI API key
openai.api_key = os.getenv("OPENAI_API_KEY")
def askgpt(user, system=None, model="gpt-3.5-turbo", **kwargs):
"""Queries the OpenAI GPT model."""
msgs = [{"role": "user", "content": user}]
if system:
msgs.insert(0, {"role": "system", "content": system})
return openai.ChatCompletion.create(model=model, messages=msgs, **kwargs)
def response(compl):
"""Prints the content of the message from the given response."""
print(nested_idx(compl, "choices", 0, "message", "content"))
def sum(a: int, b: int = 1) -> int:
"""Adds two numbers together."""
return a + b
def schema(f):
"""Generates a schema for a given function using pydantic and inspect."""
params = signature(f).parameters
kw = {
n: (o.annotation, ... if o.default == Parameter.empty else o.default)
for n, o in params.items()
}
s = create_model(f"Input for `{f.__name__}`", **kw).model_json_schema()
return {"name": f.__name__, "description": f.__doc__, "parameters": s}
def run(code: str):
"""Executes the given Python code and returns the result."""
tree = ast.parse(code)
last_node = tree.body[-1] if tree.body else None
# If the last node is an expression, modify the AST to capture the result
if isinstance(last_node, ast.Expr):
tgts = [ast.Name(id="_result", ctx=ast.Store())]
assign = ast.Assign(targets=tgts, value=last_node.value)
tree.body[-1] = ast.fix_missing_locations(assign)
ns = {}
exec(compile(tree, filename="<ast>", mode="exec"), ns)
return ns.get("_result", None)
def python(code: str):
"""Prompts the user to execute a Python code and returns the result."""
if click.confirm(f"Do you want to run this code?\n```\n{code}\n```"):
return run(code)
return "#FAIL#"
def call_func(c, verbose=False):
"""Calls a function based on a message choice."""
fc = nested_idx(c, "choices", 0, "message", "function_call")
if not fc:
return "No function created, try again..."
if fc.name not in {"python", "sum"}:
return f"Not allowed: {fc.name}"
try:
args = json.loads(fc.arguments)
if verbose:
logger.info(args["code"])
f = globals()[fc.name]
return f(**args)
except json.JSONDecodeError:
if verbose:
logger.info(fc.arguments)
return run(fc.arguments)
def code_interpreter(query, verbose=True):
"""Interprets the given query."""
c = askgpt(
query,
system="Use Python for any required computation",
functions=[schema(python)],
)
if nested_idx(c, "choices", 0, "message", "function_call"):
return call_func(c, verbose)
return response(c)
@click.group()
def cli():
pass
@cli.command()
@click.argument("query")
@click.option("--verbose", is_flag=True, help="Enable verbose output.")
def q(query, verbose):
"""Interprets the given query."""
result = code_interpreter(query, verbose)
click.echo(result)
if __name__ == "__main__":
cli()
| [] |
2024-01-10 | numbertheory/ai-curses | ai_curses~handle_messages.py | import json
from ai_curses import openai
def initialize(args):
messages = initialize_messages(
history=args.load_history_json,
super_command=args.super
)
initialize_output(args)
return messages
def show_meta_help(app):
add_to_chat_output(
app,
"Type \":help\" and then enter or Return"
" to show additional commands",
"green_on_black"
)
def quit_program(messages, args):
json_dump_text = json.dumps(messages, indent=4)
if args.output_dir:
with open(args.output_json, 'w', encoding='utf-8') as f:
f.write(json_dump_text)
f.close()
if args.load_history_json and args.output_md:
with open(args.output_md, 'a', encoding='utf-8') as f:
f.write(
"\n\n## History\n\n"
"This history was loaded with the `-l` option.\n\n"
f"```json{json.dumps(messages, indent=4)}```\n\n")
f.close()
exit(0)
def process_request(messages, timeout, model):
return openai.chatgpt(messages, timeout=timeout, model=model)
def add_to_chat_output(app, text, color):
new_line_split = text.split('\n')
paragraphs = []
for line in new_line_split:
if len(line) < app.cols:
paragraphs.append(line)
else:
chunks, chunk_size = len(line), app.cols
y = [line[i:i+chunk_size] for i in range(0, chunks, chunk_size)]
for i in range(0, len(y)):
paragraphs.append(y[i])
for para in paragraphs:
chunks, chunk_size = len(para), app.cols
scroller = [para[i:i+chunk_size] for i in range(0, chunks, chunk_size)]
for line in scroller:
app.print(
x=0,
y=app.rows - 5,
content="{}".format(f"{str(line):<{app.cols}}"),
panel="layout.0",
color=color
)
app.panels["layout"][0].scroll(1)
app.screen.refresh()
def initialize_messages(
history=None,
super_command="You are a helpful assistant."
):
if not history:
return [{"role": "system", "content": f"{super_command}"}]
else:
with open(history, 'r') as f:
messages = json.load(f)
return messages
def initialize_output(args):
if args.output_dir:
with open(args.output_md, 'a', encoding='utf-8') as f:
f.write(
"## Prompt\n\n {} \n\n## Conversation\n\n".format(
args.super.replace('"""', '')
)
)
f.close()
def message_handler(messages, response, status_code, output_file):
while len(messages) > 25:
messages.pop(1)
if status_code == 200:
command = messages[-1].get('content')
messages.append({"role": "assistant", "content": response.strip()})
if output_file:
with open(output_file, 'a', encoding='utf-8') as f:
f.write("Human> {} \n\n".format(command))
f.write("AI> {} \n\n".format(response))
f.close()
return messages
def process_helper(messages, app, command, args):
response, status_code = process_request(
messages, args.timeout, args.model
)
messages = message_handler(
messages, response, status_code, args.output_md
)
add_to_chat_output(
app, f"Human> {command}", "aqua_on_navy"
)
add_to_chat_output(
app, f"AI> {response}", "black_on_silver"
)
| [
"PLACEHOLDER"
] |
2024-01-10 | numbertheory/ai-curses | ai_curses~meta_commands.py | from ai_curses import handle_messages as hm
from ai_curses import openai
import requests
import shutil
import ai_curses.command_prompt as command_prompt
def blank_line():
return " \n"
def main_help(app, args, command, messages):
return f"""
{blank_line()}
:help - Show this help.
:history - Show current history stats.
:settings - Show the current settings of this session.
:prompt - Show the current system prompt (super command).
:forget - Remove the oldest message from the message list,
if your chat has gotten too big.
:image [PROMPT] - Create an image with a prompt. You must provide a prompt,
and there must be an output directory set in the config or with the -o flag.
{blank_line()}
Type 'quit' or 'exit' to exit the program.
{blank_line()}
"""
def prompt(app, args, command, messages):
prompt = args.super.replace('"""', '').replace('\n', '')
return f"""
{blank_line()}
Prompt:
{prompt}
{blank_line()}
"""
def image(app, args, command, messages):
image_prompt = command.split(":image")[1].strip()
if len(image_prompt) < 1 or not args.output_dir:
return f"""
{blank_line()}
You must provide a prompt, and an output directory must be set.
{blank_line()}
example: :image a pretty swan
{blank_line()}
your prompt: :image {image_prompt}
output directory: {args.output_dir}
"""
else:
app.panels["prompt"].clear()
app.screen.refresh()
command_prompt.title(app, processing=True)
response = openai.get_image(image_prompt, timeout=args.timeout)
if response.status_code == 200:
image_url = response.json().get('data')[0]['url']
res = requests.get(image_url, stream=True)
dest = f"{args.output_dir}/{image_prompt}.png"
if res.status_code == 200:
with open(dest, 'wb') as f:
shutil.copyfileobj(res.raw, f)
f.close()
else:
return f"""
Image successfully generated, but could not be downloaded:
Image URL: {image_url}
"""
with open(args.output_md, 'a', encoding='utf-8') as f:
f.write("Human> Image Prompt: {} \n\n".format(image_prompt))
f.write("AI> \n\n ![[{}]] \n\n".format(f"{image_prompt}.png"))
f.close()
return f"Image saved to: {args.output_dir}/{image_prompt}.png"
else:
return f"""Image generation failed: \n{response.text}"""
def settings(app, args, command, messages):
if not args.output_md:
output_md = "none output file set"
else:
output_md = f"\"{args.output_md}\""
if not args.output_json:
output_json = "no output json file set"
else:
output_json = f"\"{args.output_json}\""
return f"""
{blank_line()}
Settings:
{blank_line()}
Output folder - {args.output_dir}
Output Markdown File - {output_md}
Output JSON file - {output_json}
Timeout - {args.timeout} seconds
{blank_line()}
"""
def history(app, args, command, messages):
role_counts = {"system": 0, "assistant": 0, "user": 0, "unknown": 0}
for msg in messages:
role_counts[msg.get('role', 'unknown')] += 1
return f"""
{blank_line()}
History:
{blank_line()}
# of messages - {len(messages)}
- system: {role_counts.get('system')}
- assistant: {role_counts.get('assistant')}
- user: {role_counts.get('user')}
- unknown: {role_counts.get('unknown')}
{blank_line()}
"""
def help_menu():
return {
"help": main_help,
"history": history,
"settings": settings,
"prompt": prompt,
"image": image
}
def handler(app, args, command, messages):
meta_command = command.split(" ")[0][1:]
if help_menu().get(meta_command, None):
hm.add_to_chat_output(
app, help_menu().get(meta_command, lambda: 'Invalid')(
app, args, command, messages
),
"green_on_black"
)
else:
hm.add_to_chat_output(
app,
f"{blank_line()}You tried a meta-command called "
f"\"{meta_command}\".\n"
f"{blank_line()}Unfortunately, I don't know that "
f"one!\n{blank_line()}",
"green_on_black"
)
| [
"\n"
] |
2024-01-10 | rkist/website | app~chatgpt~gepeto.py | #Note: The openai-python library support for Azure OpenAI is in preview.
import os
import openai
class Gepeto:
def __init__(self):
openai.api_type = "azure"
openai.api_base = "https://gepeto.openai.azure.com/"
openai.api_version = "2023-03-15-preview"
openai.api_key = os.getenv("OPENAI_API_KEY")
self.base_prompt = [
{"role":"system","content":"You are an AI that creates HTML documents with content that matches the URL path and adds relevant href links to the content."},
{"role":"user","content":"/actors/top10"},
{"role":"assistant","content":"<!DOCTYPE html>\n<html>\n<head>\n\t<title>Top 10 Actors</title>\n</head>\n<body>\n\t<h1>Top 10 Actors</h1>\n\t<p>Here are the top 10 actors of all time:</p>\n\t<ol>\n\t\t<li><a href=\"/actors/marlon-brando\">Marlon Brando</a></li>\n\t\t<li><a href=\"/actors/al-pacino\">Al Pacino</a></li>\n\t\t<li><a href=\"/actors/robert-de-niro\">Robert De Niro</a></li>\n\t\t<li><a href=\"/actors/meryl-streep\">Meryl Streep</a></li>\n\t\t<li><a href=\"/actors/tom-hanks\">Tom Hanks</a></li>\n\t\t<li><a href=\"/actors/denzel-washington\">Denzel Washington</a></li>\n\t\t<li><a href=\"/actors/jack-nicholson\">Jack Nicholson</a></li>\n\t\t<li><a href=\"/actors/dustin-hoffman\">Dustin Hoffman</a></li>\n\t\t<li><a href=\"/actors/leonardo-dicaprio\">Leonardo DiCaprio</a></li>\n\t\t<li><a href=\"/actors/anthony-hopkins\">Anthony Hopkins</a></li>\n\t</ol>\n</body>\n</html> \n\nThis HTML document displays the top 10 actors of all time."},
{"role":"user","content":"/incredible/worldplaces"},
{"role":"assistant","content":"<!DOCTYPE html>\n<html>\n<head>\n\t<title>Incredible World Places</title>\n</head>\n<body>\n\t<h1>Incredible World Places</h1>\n\t<p>There are many incredible places in the world that are worth visiting. Here are some of them:</p>\n\t<ul>\n\t\t<li><a href=\"/incredible/worldplaces/machu-picchu\">Machu Picchu</a></li>\n\t\t<li><a href=\"/incredible/worldplaces/great-wall-of-china\">Great Wall of China</a></li>\n\t\t<li><a href=\"/incredible/worldplaces/pyramids-of-giza\">Pyramids of Giza</a></li>\n\t\t<li><a href=\"/incredible/worldplaces/taj-mahal\">Taj Mahal</a></li>\n\t\t<li><a href=\"/incredible/worldplaces/colosseum\">Colosseum</a></li>\n\t\t<li><a href=\"/incredible/worldplaces/petra\">Petra</a></li>\n\t\t<li><a href=\"/incredible/worldplaces/mount-everest\">Mount Everest</a></li>\n\t\t<li><a href=\"/incredible/worldplaces/niagara-falls\">Niagara Falls</a></li>\n\t\t<li><a href=\"/incredible/worldplaces/grand-canyon\">Grand Canyon</a></li>\n\t\t<li><a href=\"/incredible/worldplaces/santorini\">Santorini</a></li>\n\t</ul>\n</body>\n</html> \n\nThis HTML document displays a list of incredible world places that are worth visiting."}
]
self.current_prompt = self.base_prompt
def get_response(self, path: str):
content = f"/{path}"
self.current_prompt += [{"role":"user","content":content}]
response = openai.ChatCompletion.create(
engine="poc-GPT35",
messages = self.current_prompt,
temperature=0.7,
max_tokens=800,
top_p=0.95,
frequency_penalty=0,
presence_penalty=0,
stop=None)
content = response.get('choices')[0].get('message').get('content')
self.current_prompt += [{"role":"assistant","content":content}]
return content | [
"/incredible/worldplaces",
"/actors/top10",
"/PLACEHOLDER",
"You are an AI that creates HTML documents with content that matches the URL path and adds relevant href links to the content."
] |
2024-01-10 | iojw/RouteLLM | classification~classifiers.py | import math
import os
import random
from abc import abstractmethod
from collections import Counter
from enum import Enum
from typing import List
import openai
import samples
import torch
from datasets import load_dataset
from sentence_transformers import SentenceTransformer
from torch import nn
class Label(Enum):
CODING = 0
MATH = 1
NONE = 2
FAILED = 3
class Classifier:
@abstractmethod
def is_code_prompt(self, prompt: str) -> bool:
pass
class RandomClassifier(Classifier):
def __init__(self, model=None, api_base=None, api_key=None):
self.model = "Random"
def is_code_prompt(self, prompt: str) -> bool:
return bool(random.getrandbits(1))
def classify_prompt(self, prompt: str) -> bool:
return random.choice([Label.CODING, Label.MATH, Label.NONE])
class NgramClassifier:
def __init__(self, ngram_size=2):
self.model = "Ngram"
self.ngram_size = ngram_size
self.code_ngrams = Counter()
self.language_ngrams = Counter()
self.total_code_ngrams = 0
self.total_language_ngrams = 0
self.train(samples.code_samples, samples.language_samples)
def _preprocess(self, text):
return text.lower().strip()
def _extract_ngrams(self, text):
text = self._preprocess(text)
ngrams = []
for i in range(len(text) - self.ngram_size + 1):
ngrams.append(text[i : i + self.ngram_size])
return ngrams
def train(self, code_samples, language_samples):
for sample in code_samples:
ngrams = self._extract_ngrams(sample)
self.code_ngrams.update(ngrams)
self.total_code_ngrams += len(ngrams)
for sample in language_samples:
ngrams = self._extract_ngrams(sample)
self.language_ngrams.update(ngrams)
self.total_language_ngrams += len(ngrams)
def _calculate_ngram_probability(self, ngram, is_code):
if is_code:
return (self.code_ngrams[ngram] + 1) / (self.total_code_ngrams + 1)
else:
return (self.language_ngrams[ngram] + 1) / (self.total_language_ngrams + 1)
def is_code_prompt(self, prompt):
ngrams = self._extract_ngrams(prompt)
code_prob = 0
lang_prob = 0
for ngram in ngrams:
code_prob += math.log(self._calculate_ngram_probability(ngram, True))
lang_prob += math.log(self._calculate_ngram_probability(ngram, False))
return code_prob > lang_prob
def classify_prompt(self, prompt):
raise NotImplementedError("NgramClassifier does not support classify_prompt")
class LLMClassifier(Classifier):
def __init__(self, model=None, api_base=None, api_key=None):
assert model is not None, "Please specify a model name"
self.model = model
self.api_base = "https://api.openai.com/v1" if api_base is None else api_base
self.api_key = os.environ["OPENAI_API_KEY"] if api_key is None else api_key
def is_code_prompt(self, prompt: str) -> bool:
openai.api_key = self.api_key
openai.api_base = self.api_base
prompt_template = """
Please determine whether the following user prompt is related to code or not:\n\n"
{prompt}\n\n
====
If it's related to code, output "[[Y]]", if not, output "[[N]]". Please carefully follow this format.
"""
convs = [
{"role": "user", "content": prompt_template.format(prompt=prompt)},
]
response = openai.ChatCompletion.create(
model=self.model,
messages=convs,
temperature=0,
max_tokens=512,
)
output = response["choices"][0]["message"]["content"]
if "[[Y]]" in output:
return True
elif "[[N]]" in output:
return False
else:
raise ValueError("Invalid response.", output)
def classify_prompt(self, prompt: str) -> bool:
openai.api_key = self.api_key
openai.api_base = self.api_base
prompt_template = """
Determine whether the user query falls into one of the following categories:
1. Coding: Queries about coding, programming languages, libraries, and tools.
2. Math: Queries about math problem solving.
3. None: Anything that does not fall into the above categories.
Your output should be wrapped by "[[" and "]]". For example, "[[3. None]]".
[USER QUERY] {prompt!r}
[ANSWER]
"""
convs = [
{"role": "user", "content": prompt_template.format(prompt=prompt)},
]
response = openai.ChatCompletion.create(
model=self.model,
messages=convs,
temperature=0,
max_tokens=512,
)
output = response["choices"][0]["message"]["content"]
# regex to extract the answer
import re
m = re.search(r"\[\[(.*)\]\]", output)
if m is None:
print("Invalid response.", output)
return "format_error"
output = m.group(1)
if "Coding" in output:
return Label.CODING
elif "Math" in output:
return Label.MATH
elif "None" in output:
return Label.NONE
else:
print("Invalid response.", output)
return Label.FAILED
class EmbeddingClassifier(nn.Module):
def __init__(self, model_path=None):
super().__init__()
self.embed_model = SentenceTransformer("all-mpnet-base-v2")
self.embed_model.requires_grad_(False)
self.classifier = nn.Linear(768, 3)
if model_path is not None:
if os.path.exists(model_path):
print(f"Loading {model_path}")
self.classifier.load_state_dict(torch.load(model_path))
else:
print("No trained model found.")
else:
print("No model provided, use a random initialized model")
self.map_name = ["coding", "math", "none"]
self.model = f"Embedding Classifier {model_path}"
def forward(self, prompts: List[str]) -> bool:
embeddings = self.embed_model.encode(prompts)
embeddings_tensor = torch.vstack([torch.Tensor(embedding) for embedding in embeddings])
return self.classifier(embeddings_tensor)
def classify_prompt(self, prompt: str) -> str:
_, idx = torch.max(self.forward([prompt])[0], 0)
pred_class = self.map_name[idx.item()]
if pred_class == "coding":
return Label.CODING
elif pred_class == "math":
return Label.MATH
elif pred_class == "none":
return Label.NONE
def train(self, dataloader, optimizer, epochs, loss_fn=torch.nn.CrossEntropyLoss()):
for epoch in range(epochs):
for batch in dataloader:
prompts = list(batch["Prompt"][0])
labels = batch["Label"][0].to(torch.long)
optimizer.zero_grad()
output = self(prompts)
assert output.shape[0] == labels.shape[0] and output.shape[1] == 3
loss = loss_fn(output, labels)
loss.backward()
optimizer.step()
print(loss.item())
torch.save(self.classifier.state_dict(), f"embedding_model_{epoch + 1}.pt")
| [
"\nDetermine whether the user query falls into one of the following categories:\n1. Coding: Queries about coding, programming languages, libraries, and tools.\n2. Math: Queries about math problem solving.\n3. None: Anything that does not fall into the above categories.\nYour output should be wrapped by \"[[\" and \"]]\". For example, \"[[3. None]]\".\n\n[USER QUERY] {prompt!r}\n\n[ANSWER]\n",
"['P']",
"\nPlease determine whether the following user prompt is related to code or not:\n\n\"\n{prompt}\n\n\n====\nIf it's related to code, output \"[[Y]]\", if not, output \"[[N]]\". Please carefully follow this format.\n"
] |
2024-01-10 | LT7T/chadgpt-backend | start2.py | # Imports
import os
from dotenv import load_dotenv
import textwrap
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import DeepLake
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
# Set the maximum line width
max_line_width = 80
# Load environment variables from .env file
load_dotenv()
embeddings = OpenAIEmbeddings()
# Initialize OpenAI wrapper
llm = OpenAI(temperature=0.9)
embeddings = OpenAIEmbeddings()
root_dir = './clone-nayms'
docs = []
for dirpath, dirnames, filenames in os.walk(root_dir):
for file in filenames:
try:
loader = TextLoader(os.path.join(dirpath, file), encoding='utf-8')
docs.extend(loader.load_and_split())
except Exception as e:
pass
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.split_documents(docs)
db = DeepLake.from_documents(texts, embeddings)
retriever = db.as_retriever()
retriever.search_kwargs['distance_metric'] = 'cos'
retriever.search_kwargs['fetch_k'] = 100
retriever.search_kwargs['maximal_marginal_relevance'] = True
retriever.search_kwargs['k'] = 20
model = ChatOpenAI(temperature=0)
qa = ConversationalRetrievalChain.from_llm(model, retriever=retriever)
def ask_questions(qa, questions, chat_history=None, max_line_width=80):
if chat_history is None:
chat_history = []
for question in questions:
result = qa({"question": question, "chat_history": chat_history})
chat_history.append((question, result['answer']))
wrapped_answer = textwrap.fill(result['answer'], width=max_line_width)
print(f"-> **Question**: {question}\n")
print(f"**Answer**:\n{wrapped_answer}\n")
questions = [
"Give me a list of all of the methods in AdminFacet.",
]
| [] |
2024-01-10 | Kaydonbob03/kaydonbotv2 | kaydonbotv2.py | import os
import discord
import openai
import json
import requests
import random
import asyncio
import dateparser
import datetime
from discord.ext import commands, tasks
from discord import app_commands
# =======================================================================================================================================
# =========================================================={GUILD/TESTING ID BLOCK}=====================================================
# =======================================================================================================================================
# insert the script in the text file here if the global script below is broken
# =======================================================================================================================================
# =========================================================={NO GUILD ID BLOCK}==========================================================
# =======================================================================================================================================
# --------------------------------------------------INITIALIZATION------------------------------------------------------
# Set your OpenAI API key (ensure this is set in your environment variables)
openai.api_key = os.getenv('OPENAI_API_KEY')
# Create a bot instance
intents = discord.Intents.default()
intents.members = True
bot = commands.Bot(command_prefix=';', intents=intents)
# Global dictionary to store welcome channel configurations
welcome_channels = {}
# Global dictionary to store temporary configuration data
temp_config = {}
# Status'
@tasks.loop(hours=1) # Change status every hour
async def change_status():
await bot.wait_until_ready()
# Get the number of servers the bot is in
num_servers = len(bot.guilds)
# Define the statuses
statuses = [
discord.Activity(type=discord.ActivityType.watching, name="/commands"),
discord.Game(f"in {num_servers} servers")
]
# Choose a random status and set it
current_status = random.choice(statuses)
await bot.change_presence(activity=current_status)
# Event listener for when the bot is ready
@bot.event
async def on_ready():
# Sync the command tree globally
await bot.tree.sync()
global welcome_channels
welcome_channels = await load_welcome_channels()
print(f'Logged in as {bot.user.name} (ID: {bot.user.id})')
print('------')
change_status.start()
@bot.event
async def on_guild_join(guild):
# Create an embed message
embed = discord.Embed(
title="Hello! I'm Kaydonbot",
description="Thanks for inviting me to your server!",
color=discord.Color.gold()
)
embed.add_field(name="Prefix", value="; for non-slash commands", inline=False)
embed.add_field(name="Commands", value="Use `/commands` to see all my commands", inline=False)
embed.set_footer(text="Kaydonbot - Copyright (c) Kayden Cormier -- K-GamesMedia")
# List of preferred channel names
preferred_channels = ["welcome", "general", "mod-chat", "mods-only"]
# Try to find a preferred channel
for channel_name in preferred_channels:
channel = discord.utils.get(guild.text_channels, name=channel_name)
if channel and channel.permissions_for(guild.me).send_messages:
await channel.send(embed=embed)
return
# If no preferred channel is found, send in any channel where the bot has permission
for channel in guild.text_channels:
if channel.permissions_for(guild.me).send_messages:
await channel.send(embed=embed)
break
# -------------------------------------------------INITIALIZATION ENDS--------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
# --------------------------------------------------COMMANDS LIST-------------------------------------------------------
# Define a slash command for 'commands'
@bot.tree.command(name="commands", description="Get a list off all commands")
async def commands(interaction: discord.Interaction):
await interaction.response.defer()
message = await interaction.followup.send(embed=get_general_commands_embed())
# Add reactions for navigation
await message.add_reaction("⏪") # Fast rewind to first page
await message.add_reaction("⬅️") # Previous page
await message.add_reaction("➡️") # Next page
await message.add_reaction("⏩") # Fast forward to last page
def get_general_commands_embed():
embed = discord.Embed(
title="Kaydonbot General Commands",
description="Commands available for all users. Default prefix is ';'",
color=discord.Color.gold()
)
embed.add_field(name="/commands", value="Displays list of all commands", inline=False)
embed.add_field(name="/hello", value="Bot will say hello", inline=False)
embed.add_field(name="/chat [prompt]", value="Sends a prompt to the GPT API and returns a response", inline=False)
embed.add_field(name="/image [prompt]", value="Uses DALL-E 3 to generate an image based on your prompt", inline=False)
embed.add_field(name="/quote", value="Get an inspirational quote", inline=False)
embed.add_field(name="/joke", value="Tell a random joke", inline=False)
embed.add_field(name="/weather [location]", value="Get the current weather for a location", inline=False)
embed.add_field(name="/reminder [time] [reminder]", value="Set a reminder", inline=False)
embed.add_field(name="/poll [question] [options]", value="Create a poll", inline=False)
embed.add_field(name="/random [choices]", value="Make a random choice", inline=False)
embed.set_footer(text="Page 1/4")
return embed
def get_mod_commands_embed():
embed = discord.Embed(
title="Kaydonbot Moderator Commands",
description="Commands available for moderators and administrators.",
color=discord.Color.green()
)
# Add fields for each moderator command
embed.add_field(name="/welcomeconfig", value="Configuration for user welcome message", inline=False)
embed.add_field(name="/msgclear [channel] [number]", value="Clear a specified number of messages in a channel", inline=False)
embed.add_field(name="/mute [member] [duration] [reason]", value="Mute a member", inline=False)
embed.add_field(name="/unmute [member]", value="Unmute a member", inline=False)
embed.add_field(name="/lock [channel]", value="Lock a channel", inline=False)
embed.add_field(name="/unlock [channel]", value="Unlock a channel", inline=False)
embed.add_field(name="/slowmode [channel] [seconds]", value="Set slowmode in a channel", inline=False)
embed.add_field(name="/purgeuser [channel] [member] [number]", value="Clear messages by a specific user", inline=False)
embed.add_field(name="/announce [channel] [message]", value="Send an announcement", inline=False)
embed.add_field(name="/addrole [member] [role]", value="Add a role to a member", inline=False)
embed.add_field(name="/removerole [member] [role]", value="Remove a role from a member", inline=False)
embed.set_footer(text="Page 3/4")
return embed
def get_bot_games_commands_embed():
embed = discord.Embed(
title="Kaydonbot Bot Games Commands",
description="Fun games you can play with the bot.",
color=discord.Color.blue()
)
embed.add_field(name="/battle", value="Start a battle game", inline=False)
embed.add_field(name="/blackjack", value="Play a game of blackjack", inline=False)
embed.add_field(name="/wouldyourather", value="Play a round of Would You Rather", inline=False)
embed.add_field(name="/truthordare", value="Play a fun little Truth or Dare game", inline=False)
# Add more bot game commands here
embed.set_footer(text="Page 2/4")
return embed
def get_suggestions_commands_embed():
embed = discord.Embed(
title="Kaydonbot Suggestions Commands",
description="Commands to suggest new features or content for the bot.",
color=discord.Color.purple()
)
embed.add_field(name="/cmdsuggestion [Suggestion]", value="Suggest a new command.", inline=False)
embed.add_field(name="/tdsuggestion [option] {truth/dare} [suggestion]", value="Suggest a SFW Truth or Dare.", inline=False)
embed.add_field(name="/wyrsuggestion [suggestion]", value="Suggest a 'Would You Rather' question.", inline=False)
embed.set_footer(text="Page 4/4")
return embed
@bot.event
async def on_reaction_add(reaction, user):
if user != bot.user and reaction.message.author == bot.user:
embeds = [
get_general_commands_embed(),
get_bot_games_commands_embed(),
get_mod_commands_embed(),
get_suggestions_commands_embed()
]
current_page = int(reaction.message.embeds[0].footer.text.split('/')[0][-1]) - 1
if reaction.emoji == "➡️":
next_page = (current_page + 1) % len(embeds)
await reaction.message.edit(embed=embeds[next_page])
elif reaction.emoji == "⬅️":
next_page = (current_page - 1) % len(embeds)
await reaction.message.edit(embed=embeds[next_page])
elif reaction.emoji == "⏩":
await reaction.message.edit(embed=embeds[-1]) # Go to last page
elif reaction.emoji == "⏪":
await reaction.message.edit(embed=embeds[0]) # Go to first page
await reaction.remove(user)
# --------------------------------------------------COMMANDS LIST ENDS-------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------SUGGESTIONS CMDS--------------------------------------------------------
# Ensure the suggestions directory exists
os.makedirs("~/hosting/suggestions", exist_ok=True)
@bot.tree.command(name="cmdsuggestion", description="Suggest a new command")
async def cmdsuggestion(interaction: discord.Interaction, suggestion: str):
with open("~/hosting/suggestions/cmd_suggestions.txt", "a") as file:
file.write(f"{suggestion}\n")
await interaction.response.send_message("Your command suggestion has been recorded. Thank you!", ephemeral=True)
@bot.tree.command(name="tdsuggestion", description="Suggest a SFW Truth or Dare")
async def tdsuggestion(interaction: discord.Interaction, option: str, suggestion: str):
filename = "truth_suggestions.txt" if option.lower() == "truth" else "dare_suggestions.txt"
with open(f"~/hosting/suggestions/{filename}", "a") as file:
file.write(f"{suggestion}\n")
await interaction.response.send_message("Your Truth or Dare suggestion has been recorded. Thank you!", ephemeral=True)
@bot.tree.command(name="wyrsuggestion", description="Suggest a 'Would You Rather' question")
async def wyrsuggestion(interaction: discord.Interaction, suggestion: str):
with open("~/hosting/suggestions/wyr_suggestions.txt", "a") as file:
file.write(f"{suggestion}\n")
await interaction.response.send_message("Your 'Would You Rather' suggestion has been recorded. Thank you!", ephemeral=True)
# ----------------------------------------------------SUGGESTIONS ENDS-------------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------MOD-ONLY COMMANDS-------------------------------------------------------
# Check if user is admin/mod
def is_admin_or_mod():
async def predicate(interaction: discord.Interaction):
return interaction.user.guild_permissions.administrator or \
any(role.name.lower() in ['admin', 'moderator'] for role in interaction.user.roles)
return app_commands.check(predicate)
#******************************WELCOME MESSAGE******************************
def save_welcome_channels():
try:
with open('welcome_channels.json', 'w') as file:
json.dump(welcome_channels, file, indent=4)
except Exception as e:
print(f"Error saving welcome channels: {e}")
# Consider logging this error or handling it appropriately
async def load_welcome_channels():
global welcome_channels
try:
with open('welcome_channels.json', 'r') as file:
welcome_channels = json.load(file)
except FileNotFoundError:
welcome_channels = {}
# Consider logging this error or handling it appropriately
@bot.tree.command(name="welcomeconfig", description="Configure the welcome channel")
@is_admin_or_mod()
async def welcomeconfig(interaction: discord.Interaction):
try:
await interaction.response.defer()
# Initiate the configuration process
temp_config[interaction.guild_id] = {"stage": 1} # Stage 1: Ask to enable/disable
embed = discord.Embed(
title="Welcome Configuration",
description="Welcome to the welcome config settings.\n\n"
"1. Please type 'enable' to enable welcome messages or 'disable' to disable them.\n"
"2. If enabled, you will be prompted to specify a channel and set a custom welcome message.",
color=discord.Color.gold()
)
await interaction.followup.send(embed=embed)
except Exception as e:
await interaction.followup.send(f"Failed to initiate welcome configuration: {e}")
@bot.event
async def on_message(message):
if message.author.bot:
return
guild_id = message.guild.id
if guild_id in temp_config:
if temp_config[guild_id]["stage"] == 1:
# Handle enabling/disabling welcome messages
content_lower = message.content.strip().lower()
if content_lower == 'enable':
temp_config[guild_id] = {"stage": 2, "enabled": True} # Move to stage 2
await message.channel.send("Welcome messages enabled. Please mention the channel for welcome messages.")
elif content_lower == 'disable':
welcome_channels[guild_id] = {"enabled": False}
save_welcome_channels()
await message.channel.send("Welcome messages will be disabled. They can always be enabled later.")
del temp_config[guild_id]
else:
await message.channel.send("Please type 'enable' or 'disable'.")
elif temp_config[guild_id]["stage"] == 2:
# Handle channel selection
if message.channel_mentions:
selected_channel = message.channel_mentions[0]
temp_config[guild_id] = {"stage": 3, "channel_id": selected_channel.id, "enabled": True} # Move to stage 3
embed = discord.Embed(
title="Welcome Configuration",
description="Channel set successfully. Please specify the custom welcome message.",
color=discord.Color.green()
)
await message.channel.send(embed=embed)
else:
await message.channel.send("Please mention a valid channel.")
elif temp_config[guild_id]["stage"] == 3:
# Handle custom welcome message
custom_message = message.content
channel_id = temp_config[guild_id]["channel_id"]
welcome_channels[guild_id] = {"channel_id": channel_id, "message": custom_message, "enabled": True}
save_welcome_channels() # Save the configuration
embed = discord.Embed(
title="Welcome Configuration",
description="Custom welcome message set successfully.",
color=discord.Color.gold()
)
await message.channel.send(embed=embed)
# Clear temporary configuration data
del temp_config[guild_id]
# Send welcome message on user join
@bot.event
async def on_member_join(member):
guild_id = member.guild.id
if guild_id in welcome_channels and welcome_channels[guild_id].get("enabled", False):
channel_id = welcome_channels[guild_id].get("channel_id")
custom_message = welcome_channels[guild_id].get("message", f"Welcome to the server, {member.mention}!")
channel = member.guild.get_channel(channel_id) if channel_id else None
if channel:
await channel.send(custom_message.format(member=member.mention))
else:
# Fallback to default message if no custom configuration is found or welcome messages are disabled
channel = discord.utils.get(member.guild.text_channels, name='welcome')
if channel:
await channel.send(f"Welcome to the server, {member.mention}!")
#****************************WELCOME MESSAGE ENDS****************************
# Define a slash command for 'msgclear'
@bot.tree.command(name="msgclear", description="Clear a specified number of messages in a channel")
@is_admin_or_mod()
async def msgclear(interaction: discord.Interaction, channel: discord.TextChannel, number: int):
try:
await interaction.response.defer()
if number < 1 or number > 100:
await interaction.followup.send("Please specify a number between 1 and 100.")
return
messages = [message async for message in channel.history(limit=number)]
if not messages:
await interaction.followup.send("No messages to delete.")
return
deleted_count = 0
for message in messages:
if (discord.utils.utcnow() - message.created_at).days < 14:
await message.delete()
deleted_count += 1
confirmation_message = await interaction.followup.send(f"Cleared {deleted_count} messages in {channel.mention}.")
await asyncio.sleep(5) # Wait for 5 seconds
await confirmation_message.delete()
except Exception as e:
await interaction.followup.send(f"Failed to clear messages: {e}")
@bot.tree.command(name="warn", description="Warn a member")
@is_admin_or_mod()
async def warn(interaction: discord.Interaction, member: discord.Member, reason: str = "No reason provided"):
try:
await interaction.response.defer()
# Send a DM to the member with the warning
await member.send(f"You have been warned for: {reason}")
await interaction.followup.send(f"{member.mention} has been warned for: {reason}")
except Exception as e:
await interaction.followup.send(f"Failed to warn member: {e}")
@bot.tree.command(name="kick", description="Kick a member from the server")
@is_admin_or_mod()
async def kick(interaction: discord.Interaction, member: discord.Member, reason: str = "No reason provided"):
try:
await interaction.response.defer()
await member.kick(reason=reason)
await interaction.followup.send(f"{member.mention} has been kicked for: {reason}")
except Exception as e:
await interaction.followup.send(f"Failed to kick member: {e}")
@bot.tree.command(name="ban", description="Ban a member from the server")
@is_admin_or_mod()
async def ban(interaction: discord.Interaction, member: discord.Member, reason: str = "No reason provided"):
try:
await interaction.response.defer()
await member.ban(reason=reason)
await interaction.followup.send(f"{member.mention} has been banned for: {reason}")
except Exception as e:
await interaction.followup.send(f"Failed to ban member: {e}")
import asyncio
@bot.tree.command(name="mute", description="Mute a member")
@is_admin_or_mod()
async def mute(interaction: discord.Interaction, member: discord.Member, duration: int, reason: str = "No reason provided"):
try:
await interaction.response.defer()
muted_role = discord.utils.get(member.guild.roles, name="Muted")
if not muted_role:
await interaction.followup.send("Muted role not found.")
return
await member.add_roles(muted_role, reason=reason)
await interaction.followup.send(f"{member.mention} has been muted for {duration} minutes. Reason: {reason}")
await asyncio.sleep(duration * 60) # Convert minutes to seconds
if muted_role in member.roles:
await member.remove_roles(muted_role, reason="Mute duration expired")
await interaction.followup.send(f"{member.mention} has been unmuted.")
except Exception as e:
await interaction.followup.send(f"Failed to mute member: {e}")
@bot.tree.command(name="unmute", description="Unmute a member")
@is_admin_or_mod()
async def unmute(interaction: discord.Interaction, member: discord.Member):
try:
await interaction.response.defer()
muted_role = discord.utils.get(member.guild.roles, name="Muted")
if not muted_role:
await interaction.followup.send("Muted role not found.")
return
if muted_role in member.roles:
await member.remove_roles(muted_role, reason="Manually unmuted")
await interaction.followup.send(f"{member.mention} has been unmuted.")
else:
await interaction.followup.send(f"{member.mention} is not muted.")
except Exception as e:
await interaction.followup.send(f"Failed to unmute member: {e}")
@bot.tree.command(name="lock", description="Lock a channel")
@is_admin_or_mod()
async def lock(interaction: discord.Interaction, channel: discord.TextChannel):
try:
await interaction.response.defer()
await channel.set_permissions(channel.guild.default_role, send_messages=False)
await interaction.followup.send(f"{channel.mention} has been locked.")
except Exception as e:
await interaction.followup.send(f"Failed to lock the channel: {e}")
@bot.tree.command(name="unlock", description="Unlock a channel")
@is_admin_or_mod()
async def unlock(interaction: discord.Interaction, channel: discord.TextChannel):
try:
await interaction.response.defer()
await channel.set_permissions(channel.guild.default_role, send_messages=True)
await interaction.followup.send(f"{channel.mention} has been unlocked.")
except Exception as e:
await interaction.followup.send(f"Failed to unlock the channel: {e}")
@bot.tree.command(name="slowmode", description="Set slowmode in a channel")
@is_admin_or_mod()
async def slowmode(interaction: discord.Interaction, channel: discord.TextChannel, seconds: int):
try:
await interaction.response.defer()
await channel.edit(slowmode_delay=seconds)
await interaction.followup.send(f"Slowmode set to {seconds} seconds in {channel.mention}.")
except Exception as e:
await interaction.followup.send(f"Failed to set slowmode: {e}")
@bot.tree.command(name="purgeuser", description="Clear messages by a specific user")
@is_admin_or_mod()
async def purgeuser(interaction: discord.Interaction, channel: discord.TextChannel, member: discord.Member, number: int):
try:
await interaction.response.defer()
deleted_count = 0
async for message in channel.history(limit=200):
if message.author == member and deleted_count < number:
await message.delete()
deleted_count += 1
if deleted_count >= number:
break
await interaction.followup.send(f"Cleared {deleted_count} messages from {member.mention} in {channel.mention}.")
except Exception as e:
await interaction.followup.send(f"Failed to clear messages: {e}")
@bot.tree.command(name="announce", description="Send an announcement")
@is_admin_or_mod()
async def announce(interaction: discord.Interaction, channel: discord.TextChannel, message: str):
try:
await interaction.response.defer()
await channel.send(message)
await interaction.followup.send(f"Announcement sent in {channel.mention}.")
except Exception as e:
await interaction.followup.send(f"Failed to send announcement: {e}")
@bot.tree.command(name="addrole", description="Add a role to a member")
@is_admin_or_mod()
async def addrole(interaction: discord.Interaction, member: discord.Member, role: discord.Role):
try:
await interaction.response.defer()
if role in member.roles:
await interaction.followup.send(f"{member.mention} already has the {role.name} role.")
return
await member.add_roles(role)
await interaction.followup.send(f"Added {role.name} role to {member.mention}.")
except Exception as e:
await interaction.followup.send(f"Failed to add role: {e}")
@bot.tree.command(name="removerole", description="Remove a role from a member")
@is_admin_or_mod()
async def removerole(interaction: discord.Interaction, member: discord.Member, role: discord.Role):
try:
await interaction.response.defer()
if role not in member.roles:
await interaction.followup.send(f"{member.mention} does not have the {role.name} role.")
return
await member.remove_roles(role)
await interaction.followup.send(f"Removed {role.name} role from {member.mention}.")
except Exception as e:
await interaction.followup.send(f"Failed to remove role: {e}")
# -------------------------------------------------MOD-ONLY COMMANDS ENDS----------------------------------------------------
# ---------------------------------------------------------------------------------------------------------------------------
# ---------------------------------------------------OPENAI COMMANDS---------------------------------------------------------
# Define a slash command for 'chat'
@bot.tree.command(name="chat", description="Get a response from GPT")
async def chat(interaction: discord.Interaction, prompt: str):
# Prepare the chat messages for the API call
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
]
# List of models to try in order
models = ["gpt-4-1106-preview", "gpt-4", "gpt-3.5-turbo"]
response_sent = False
for model in models:
try:
# Call OpenAI Chat Completions API with the current model
response = openai.ChatCompletion.create(
model=model,
messages=messages
)
# Send the response back to Discord and mark as sent
await interaction.response.send_message(response['choices'][0]['message']['content'])
response_sent = True
break
except Exception as e:
# If there's an error (like model not available), continue to the next model
continue
# If no response was sent, notify the user
if not response_sent:
await interaction.response.send_message("Sorry, I'm unable to get a response at the moment.")
# Function to call DALL-E 3 API
async def generate_dalle_image(prompt: str):
try:
response = openai.Image.create(
model="dall-e-3",
prompt=prompt,
size="1024x1024",
quality="standard",
n=1,
)
image_url = response['data'][0]['url']
return image_url
except Exception as e:
print(f"Error generating image: {e}")
return None
# Define a slash command for 'image'
@bot.tree.command(name="image", description="Generate an image using DALL-E 3")
async def image(interaction: discord.Interaction, prompt: str):
# Defer the response to give more time for processing
await interaction.response.defer()
image_url = await generate_dalle_image(prompt)
if image_url:
await interaction.followup.send(image_url)
else:
await interaction.followup.send("Sorry, I couldn't generate an image.")
# ------------------------------------------------OPENAI COMMANDS ENDS-----------------------------------------------------
# -------------------------------------------------------------------------------------------------------------------------
# -------------------------------------------------GENERAL COMMANDS--------------------------------------------------------
# Define a slash command for 'hello'
@bot.tree.command(name="hello", description="This is just a simple hello command.")
async def hello(interaction: discord.Interaction):
await interaction.response.send_message("Hello! How are you today?")
@bot.tree.command(name="userinfo", description="Get information about a user")
async def userinfo(interaction: discord.Interaction, member: discord.Member):
try:
await interaction.response.defer()
embed = discord.Embed(title=f"User Info for {member}", color=discord.Color.blue())
embed.add_field(name="Username", value=str(member), inline=True)
embed.add_field(name="ID", value=member.id, inline=True)
embed.add_field(name="Joined at", value=member.joined_at.strftime("%Y-%m-%d %H:%M:%S"), inline=True)
embed.add_field(name="Roles", value=" ".join([role.mention for role in member.roles[1:]]), inline=False)
embed.add_field(name="Status", value=str(member.status).title(), inline=True)
await interaction.followup.send(embed=embed)
except Exception as e:
await interaction.followup.send(f"Failed to retrieve user info: {e}")
@bot.tree.command(name="serverinfo", description="Get information about the server")
async def serverinfo(interaction: discord.Interaction):
try:
await interaction.response.defer()
guild = interaction.guild
embed = discord.Embed(title=f"Server Info for {guild.name}", color=discord.Color.green())
embed.set_thumbnail(url=guild.icon_url)
embed.add_field(name="Server ID", value=guild.id, inline=True)
embed.add_field(name="Owner", value=guild.owner.mention, inline=True)
embed.add_field(name="Members", value=guild.member_count, inline=True)
embed.add_field(name="Created at", value=guild.created_at.strftime("%Y-%m-%d %H:%M:%S"), inline=True)
embed.add_field(name="Roles", value=", ".join([role.name for role in guild.roles[1:]]), inline=False)
await interaction.followup.send(embed=embed)
except Exception as e:
await interaction.followup.send(f"Failed to retrieve server info: {e}")
@bot.tree.command(name="poll", description="Create a poll")
async def poll(interaction: discord.Interaction, question: str, options_str: str):
try:
await interaction.response.defer()
options = options_str.split(",") # Split the options string by commas
if len(options) < 2:
await interaction.followup.send("Please provide at least two options for the poll, separated by commas.")
return
embed = discord.Embed(title="Poll", description=question, color=discord.Color.blue())
reactions = ['🔵', '🔴', '🟢', '🟡', '🟣', '🟠', '⚫', '⚪'] # Add more if needed
poll_options = {reactions[i]: option.strip() for i, option in enumerate(options) if i < len(reactions)}
for emoji, option in poll_options.items():
embed.add_field(name=emoji, value=option, inline=False)
poll_message = await interaction.followup.send(embed=embed)
for emoji in poll_options.keys():
await poll_message.add_reaction(emoji)
except Exception as e:
await interaction.followup.send(f"Failed to create poll: {e}")
@bot.tree.command(name="random", description="Make a random choice")
async def random_choice(interaction: discord.Interaction, choices_str: str):
try:
await interaction.response.defer()
choices = choices_str.split(",") # Split the choices string by commas
if len(choices) < 2:
await interaction.followup.send("Please provide at least two choices, separated by commas.")
return
selected_choice = random.choice(choices).strip()
await interaction.followup.send(f"Randomly selected: {selected_choice}")
except Exception as e:
await interaction.followup.send(f"Failed to make a random choice: {e}")
@bot.tree.command(name="weather", description="Get the current weather for a location")
async def weather(interaction: discord.Interaction, location: str):
try:
await interaction.response.defer()
api_key = os.getenv("OPENWEATHER_API_KEY") # Fetch the API key from an environment variable
if not api_key:
await interaction.followup.send("Weather API key not set.")
return
url = f"http://api.openweathermap.org/data/2.5/weather?q={location}&appid={api_key}&units=metric"
response = requests.get(url).json()
if response.get("cod") != 200:
await interaction.followup.send(f"Failed to retrieve weather info for {location}.")
return
weather_description = response['weather'][0]['description']
temperature = response['main']['temp']
humidity = response['main']['humidity']
wind_speed = response['wind']['speed']
weather_info = (f"Weather in {location.title()}: {weather_description}\n"
f"Temperature: {temperature}°C\n"
f"Humidity: {humidity}%\n"
f"Wind Speed: {wind_speed} m/s")
await interaction.followup.send(weather_info)
except Exception as e:
await interaction.followup.send(f"Failed to retrieve weather info: {e}")
@bot.tree.command(name="reminder", description="Set a reminder")
async def reminder(interaction: discord.Interaction, time: str, reminder: str):
try:
await interaction.response.defer()
# Parse the time string into a datetime object
reminder_time = dateparser.parse(time)
if not reminder_time:
await interaction.followup.send("Invalid time format.")
return
# Calculate the delay in seconds
delay = (reminder_time - datetime.datetime.now()).total_seconds()
if delay < 0:
await interaction.followup.send("Time is in the past.")
return
# Wait for the specified time and send the reminder
await asyncio.sleep(delay)
await interaction.followup.send(f"Reminder: {reminder}")
except Exception as e:
await interaction.followup.send(f"Failed to set reminder: {e}")
@bot.tree.command(name="quote", description="Get an inspirational quote")
async def quote(interaction: discord.Interaction):
try:
await interaction.response.defer()
response = requests.get("https://zenquotes.io/api/random")
if response.status_code != 200:
await interaction.followup.send("Failed to retrieve a quote.")
return
quote_data = response.json()[0]
quote_text = f"{quote_data['q']} - {quote_data['a']}"
await interaction.followup.send(quote_text)
except Exception as e:
await interaction.followup.send(f"Failed to retrieve a quote: {e}")
@bot.tree.command(name="joke", description="Tell a random joke")
async def joke(interaction: discord.Interaction):
try:
await interaction.response.defer()
headers = {'Accept': 'application/json'}
response = requests.get("https://icanhazdadjoke.com/", headers=headers)
if response.status_code != 200:
await interaction.followup.send("Failed to retrieve a joke.")
return
joke_text = response.json()['joke']
await interaction.followup.send(joke_text)
except Exception as e:
await interaction.followup.send(f"Failed to retrieve a joke: {e}")
# ------------------------------------------------GENERAL COMMANDS ENDS----------------------------------------------------
# -------------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------BOT GAMES------------------------------------------------------------
# _________________________________________________BLACKJACK_____________________________________________
# Define card values
card_values = {
'2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9,
'10': 10, 'J': 10, 'Q': 10, 'K': 10, 'A': 11
}
# Function to draw a card
def draw_card():
card = random.choice(list(card_values.keys()))
suit = random.choice(['♠', '♦', '♣', '♥'])
return f"{card}{suit}"
# Function to calculate the score of a hand
def calculate_score(hand):
score = sum(card_values[card[:-1]] for card in hand)
# Adjust for Aces
for card in hand:
if card[:-1] == 'A' and score > 21:
score -= 10
return score
# Function to check for Blackjack
def is_blackjack(hand):
return calculate_score(hand) == 21 and len(hand) == 2
# Function to update the game message
async def update_game_message(message, player_hand, dealer_hand, game_over=False):
player_score = calculate_score(player_hand)
dealer_score = calculate_score(dealer_hand) if game_over else '?'
dealer_display = " ".join(dealer_hand) if game_over else dealer_hand[0] + " ?"
embed = discord.Embed(title="Blackjack", color=discord.Color.green())
embed.add_field(name="Your Hand", value=" ".join(player_hand) + f" (Score: {player_score})", inline=False)
embed.add_field(name="Dealer's Hand", value=dealer_display + f" (Score: {dealer_score})", inline=False)
if game_over:
if player_score > 21:
embed.set_footer(text="You busted! Dealer wins.")
elif dealer_score > 21 or player_score > dealer_score:
embed.set_footer(text="You win!")
elif player_score == dealer_score:
embed.set_footer(text="It's a tie!")
else:
embed.set_footer(text="Dealer wins.")
await message.edit(embed=embed)
# Blackjack command
@bot.tree.command(name="blackjack", description="Play a game of blackjack")
async def blackjack(interaction: discord.Interaction):
player_hand = [draw_card(), draw_card()]
dealer_hand = [draw_card(), draw_card()]
# Check for Blackjack on initial deal
if is_blackjack(player_hand) or is_blackjack(dealer_hand):
await interaction.response.send_message("Checking for Blackjack...")
await update_game_message(message, player_hand, dealer_hand, game_over=True)
return
message = await interaction.response.send_message("Starting Blackjack game...")
await update_game_message(message, player_hand, dealer_hand)
# Add reactions for player actions
await message.add_reaction('♠') # Hit
await message.add_reaction('♦') # Stand
def check(reaction, user):
return user == interaction.user and str(reaction.emoji) in ['♠', '♦'] and reaction.message.id == message.id
try:
reaction, user = await bot.wait_for('reaction_add', timeout=60.0, check=check)
if str(reaction.emoji) == '♠': # Hit
player_hand.append(draw_card())
if calculate_score(player_hand) > 21:
await update_game_message(message, player_hand, dealer_hand, game_over=True)
else:
await update_game_message(message, player_hand, dealer_hand)
elif str(reaction.emoji) == '♦': # Stand
while calculate_score(dealer_hand) < 17:
dealer_hand.append(draw_card())
await update_game_message(message, player_hand, dealer_hand, game_over=True)
except asyncio.TimeoutError:
await message.clear_reactions()
await message.edit(content="Blackjack game timed out.", embed=None)
# _________________________________________________BLACKJACK ENDS_____________________________________________
# _________________________________________________BATTLE GAME________________________________________________
# Global dictionary to store game states
game_states = {}
# Define the battle command
@bot.tree.command(name="battle", description="Start a battle game")
async def battle(interaction: discord.Interaction):
player_health = 100
bot_health = 100
embed = discord.Embed(title="Battle Game", description="Choose your action!", color=discord.Color.red())
embed.add_field(name="Your Health", value=str(player_health), inline=True)
embed.add_field(name="Bot's Health", value=str(bot_health), inline=True)
embed.add_field(name="Actions", value="⚔️ to attack\n🛡️ to defend", inline=False)
message = await interaction.response.send_message(embed=embed)
# Add reactions for game actions
await message.add_reaction('⚔️') # Attack
await message.add_reaction('🛡️') # Defend
# Store initial game state
game_states[message.id] = {
"player_health": player_health,
"bot_health": bot_health,
"interaction": interaction
}
# Handle reactions
@bot.event
async def on_reaction_add_battle(reaction, user):
if user != bot.user and reaction.message.id in game_states:
game_state = game_states[reaction.message.id]
interaction = game_state["interaction"]
if user.id != interaction.user.id:
return # Ignore reactions from other users
player_action = reaction.emoji
bot_action = random.choice(['⚔️', '🛡️'])
# Determine the outcome of the turn
if player_action == '⚔️' and bot_action == '⚔️':
game_state["player_health"] -= 10
game_state["bot_health"] -= 10
elif player_action == '⚔️' and bot_action == '🛡️':
game_state["bot_health"] -= 5
elif player_action == '🛡️' and bot_action == '⚔️':
game_state["player_health"] -= 5
# Update the embed with the new health values
embed = discord.Embed(title="Battle Game", description="Choose your action!", color=discord.Color.red())
embed.add_field(name="Your Health", value=str(game_state["player_health"]), inline=True)
embed.add_field(name="Bot's Health", value=str(game_state["bot_health"]), inline=True)
embed.add_field(name="Bot's Action", value="Bot chose to " + ("attack" if bot_action == '⚔️' else "defend"), inline=False)
await reaction.message.edit(embed=embed)
# Check for end of game
if game_state["player_health"] <= 0 or game_state["bot_health"] <= 0:
winner = "You win!" if game_state["player_health"] > game_state["bot_health"] else "Bot wins!"
await reaction.message.edit(content=winner, embed=None)
del game_states[reaction.message.id] # Clean up the game state
return
# Prepare for the next turn
await reaction.message.clear_reactions()
await reaction.message.add_reaction('⚔️') # Attack
await reaction.message.add_reaction('🛡️') # Defend
# _________________________________________________BATTLE GAME ENDS________________________________________________
# _________________________________________________WOULD YOU RATHER________________________________________________
# Load Would You Rather questions from JSON file
def load_wyr_questions():
with open('wouldyourather.json', 'r') as file:
return json.load(file)
# Define the Would You Rather command
@bot.tree.command(name="wouldyourather", description="Play 'Would You Rather'")
async def wouldyourather(interaction: discord.Interaction):
await interaction.response.defer()
questions = load_wyr_questions()
question = random.choice(questions)
embed = discord.Embed(title="Would You Rather", description=question["question"], color=discord.Color.blue())
embed.add_field(name="Option 1", value=question["option1"], inline=False)
embed.add_field(name="Option 2", value=question["option2"], inline=False)
message = await interaction.followup.send(embed=embed) # Use followup.send
# Add reactions for options
await message.add_reaction("1️⃣") # Option 1
await message.add_reaction("2️⃣") # Option 2
# Wait for a reaction
def wyr_check(reaction, user):
return user == interaction.user and str(reaction.emoji) in ["1️⃣", "2️⃣"] and reaction.message.id == message.id
try:
reaction, user = await bot.wait_for('reaction_add', timeout=60.0, check=wyr_check)
choice_key = "option1" if str(reaction.emoji) == "1️⃣" else "option2"
await interaction.followup.send(f"{user.mention} chose {choice_key.replace('option', 'Option ')}: {question[choice_key]}")
except asyncio.TimeoutError:
await message.clear_reactions()
await message.edit(content="Would You Rather game timed out.", embed=None)
# _________________________________________________WOULD YOU RATHER ENDS____________________________________________
# ______________________________________________________TRUTH OR DARE_______________________________________________
# Load Truth or Dare questions from JSON file
def load_tod_questions():
with open('truthordare.json', 'r') as file:
return json.load(file)
# Define the Truth or Dare command
@bot.tree.command(name="truthordare", description="Play 'Truth or Dare'")
async def truth_or_dare(interaction: discord.Interaction):
await interaction.response.defer()
questions = load_tod_questions()
embed = discord.Embed(title="Truth or Dare", description="React with 🤔 for Truth or 😈 for Dare", color=discord.Color.blue())
message = await interaction.followup.send(embed=embed)
if message:
await message.add_reaction("🤔") # Truth
await message.add_reaction("😈") # Dare
# Wait for a reaction
def tod_check(reaction, user):
return user == interaction.user and str(reaction.emoji) in ["🤔", "😈"] and reaction.message.id == message.id
try:
reaction, user = await bot.wait_for('reaction_add', timeout=60.0, check=tod_check)
if str(reaction.emoji) == "🤔":
selected = random.choice(questions['truths'])
response_type = "Truth"
else:
selected = random.choice(questions['dares'])
response_type = "Dare"
response_embed = discord.Embed(
title=f"{response_type} for {user.display_name}",
description=selected,
color=discord.Color.green() if response_type == "Truth" else discord.Color.red()
)
await interaction.followup.send(embed=response_embed)
except asyncio.TimeoutError:
await message.clear_reactions()
await message.edit(content="Truth or Dare game timed out.", embed=None)
# ________________________________________________TRUTH OR DARE ENDS________________________________________________
# --------------------------------------------------BOT GAMES END----------------------------------------------------------
# -------------------------------------------------------------------------------------------------------------------------
# -------------------------------------------------BOT TOKEN BELOW---------------------------------------------------------
# Run the bot with your token
bot.run(os.getenv('DISCORD_BOT_TOKEN'))
# --------------------------------------------------BOT TOKEN ENDS--------------------------------------------------------
# ========================================================================================================================
| [
"You are a helpful assistant."
] |
2024-01-10 | while-basic/promptflow | promptflow~src~nodes~llm_node.py | from typing import TYPE_CHECKING, Any, Optional
import tiktoken
import customtkinter
from promptflow.src.state import State
from promptflow.src.text_data import TextData
from promptflow.src.utils import retry_with_exponential_backoff
if TYPE_CHECKING:
from promptflow.src.flowchart import Flowchart
from promptflow.src.nodes.node_base import NodeBase
from promptflow.src.dialogues.text_input import TextInput
from promptflow.src.dialogues.node_options import NodeOptions
from promptflow.src.themes import monokai
import tkinter as tk
import openai
import anthropic
import google.generativeai as genai
import os
import enum
class OpenAIModel(enum.Enum):
# manually add these as they become available
# https://platform.openai.com/docs/models
textdavinci = "text-davinci-003"
gpt35turbo = "gpt-3.5-turbo"
gpt35turbo0301 = "gpt-3.5-turbo-0301"
gpt4 = "gpt-4"
gpt40314 = "gpt-4-0314"
class AnthropicModel(enum.Enum):
claude_v1 = "claude-v1"
claude_v1_100k = "claude-v1-100k"
claude_instant_v1 = "claude-instant-v1"
claude_instant_v1_100k = "claude-instant-v1-100k"
class GoogleModel(enum.Enum):
text_bison_001 = "text-bison-001"
chat_bison_001 = "chat-bison-001"
chat_models = [
OpenAIModel.gpt35turbo.value,
OpenAIModel.gpt35turbo0301.value,
OpenAIModel.gpt4.value,
OpenAIModel.gpt40314.value,
GoogleModel.chat_bison_001.value,
]
# https://openai.com/pricing
prompt_cost_1k = {
OpenAIModel.textdavinci.value: 0.02,
OpenAIModel.gpt35turbo.value: 0.002,
OpenAIModel.gpt35turbo0301.value: 0.002,
OpenAIModel.gpt4.value: 0.03,
OpenAIModel.gpt40314.value: 0.03,
AnthropicModel.claude_instant_v1.value: 0.00163,
AnthropicModel.claude_instant_v1_100k.value: 0.00163,
AnthropicModel.claude_v1.value: 0.01102,
AnthropicModel.claude_v1_100k.value: 0.01102,
GoogleModel.text_bison_001.value: 0.001,
GoogleModel.chat_bison_001.value: 0.0005,
}
completion_cost_1k = {
OpenAIModel.textdavinci.value: 0.02,
OpenAIModel.gpt35turbo.value: 0.002,
OpenAIModel.gpt35turbo0301.value: 0.002,
OpenAIModel.gpt4.value: 0.06,
OpenAIModel.gpt40314.value: 0.06,
AnthropicModel.claude_instant_v1.value: 0.00551,
AnthropicModel.claude_instant_v1_100k.value: 0.00551,
AnthropicModel.claude_v1.value: 0.03268,
AnthropicModel.claude_v1_100k.value: 0.03268,
GoogleModel.text_bison_001.value: 0.001,
GoogleModel.chat_bison_001.value: 0.0005,
}
class OpenAINode(NodeBase):
"""
Node that uses the OpenAI API to generate text.
"""
node_color = monokai.GREEN
def __init__(
self,
flowchart: "Flowchart",
center_x: float,
center_y: float,
label: str,
**kwargs,
):
self.model = kwargs.get("model", OpenAIModel.gpt35turbo.value)
self.temperature = kwargs.get("temperature", 0.0)
self.top_p = kwargs.get("top_p", 1.0)
self.n = kwargs.get("n", 1)
self.max_tokens = kwargs.get("max_tokens", 256)
self.presence_penalty = kwargs.get("presence_penalty", 0.0)
self.frequency_penalty = kwargs.get("frequency_penalty", 0.0)
self.model_var = tk.StringVar(value=self.model)
super().__init__(flowchart, center_x, center_y, label, **kwargs)
self.canvas.tag_bind(self.item, "<Double-Button-1>", self.edit_options)
self.canvas.update()
self.bind_drag()
self.bind_mouseover()
self.text_window: Optional[TextInput] = None
self.options_popup: Optional[NodeOptions] = None
def edit_options(self, event: tk.Event):
"""
Create a menu to edit the prompt.
"""
self.options_popup = NodeOptions(
self.canvas,
{
"Model": self.model_var.get(),
"Temperature": self.temperature,
"Top P": self.top_p,
"n": self.n,
# "stop": self.stop,
"Max Tokens": self.max_tokens,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
},
{
"Model": [model.value for model in OpenAIModel],
},
)
self.canvas.wait_window(self.options_popup)
result = self.options_popup.result
# check if cancel
if self.options_popup.cancelled:
return
self.model_var.set(result["Model"])
self.on_model_select(None) # todo: manually calling this is a bit hacky
self.max_tokens = int(result["Max Tokens"])
self.temperature = float(result["Temperature"])
self.top_p = float(result["Top P"])
self.n = int(result["n"])
# self.stop = result["stop"]
self.presence_penalty = float(result["presence_penalty"])
self.frequency_penalty = float(result["frequency_penalty"])
@retry_with_exponential_backoff
def _chat_completion(self, prompt: str, state: State) -> str:
"""
Simple wrapper around the OpenAI API to generate text.
"""
messages = [
*state.history,
]
if prompt:
messages.append({"role": "user", "content": prompt})
completion = openai.ChatCompletion.create(
model=self.model_var.get(),
messages=messages,
temperature=self.temperature,
top_p=self.top_p,
n=self.n,
# stop=self.stop,
max_tokens=self.max_tokens,
presence_penalty=self.presence_penalty,
frequency_penalty=self.frequency_penalty,
)
return completion["choices"][0]["message"]["content"] # type: ignore
@retry_with_exponential_backoff
def _completion(self, prompt: str, state: State) -> str:
"""
Simple wrapper around the OpenAI API to generate text.
"""
# todo this history is really opinionated
history = "\n".join(
[
*[
f"{message['role']}: {message['content']}"
for message in state.history
],
]
)
prompt = f"{history}\n{prompt}\n"
completion = openai.Completion.create(
model=self.model_var.get(),
prompt=prompt,
max_tokens=self.max_tokens,
temperature=self.temperature,
top_p=self.top_p,
n=self.n,
# stop=self.stop,
presence_penalty=self.presence_penalty,
frequency_penalty=self.frequency_penalty,
)
return completion["choices"][0]["text"] # type: ignore
def run_subclass(
self, before_result: Any, state, console: customtkinter.CTkTextbox
) -> str:
"""
Format the prompt and run the OpenAI API.
"""
openai.api_key = os.getenv("OPENAI_API_KEY")
prompt = state.result
self.logger.info(f"Running LLMNode with prompt: {prompt}")
if self.model in chat_models:
completion = self._chat_completion(prompt, state)
else:
completion = self._completion(prompt, state)
self.logger.info(f"Result of LLMNode is {completion}") # type: ignore
return completion # type: ignore
def serialize(self):
return super().serialize() | {
"model": self.model_var.get(),
"temperature": self.temperature,
"top_p": self.top_p,
"n": self.n,
"max_tokens": self.max_tokens,
"presence_penalty": self.presence_penalty,
"frequency_penalty": self.frequency_penalty,
}
def on_model_select(self, _: Optional[tk.Event]):
"""
Callback for when the OpenAI model is changed.
"""
self.model = self.model_var.get()
if self.model in [OpenAIModel.gpt4.value, OpenAIModel.gpt40314.value]:
self.logger.warning("You're using a GPT-4 model. This is costly.")
self.logger.info(f"Selected model: {self.model}")
def cost(self, state: State) -> float:
"""
Return the cost of running this node.
"""
# count the number of tokens
enc = tiktoken.encoding_for_model(self.model)
prompt_tokens = enc.encode(state.result.format(state=state))
max_completion_tokens = self.max_tokens - len(prompt_tokens)
prompt_cost = prompt_cost_1k[self.model] * len(prompt_tokens) / 1000
completion_cost = completion_cost_1k[self.model] * max_completion_tokens / 1000
total = prompt_cost + completion_cost
return total
class ClaudeNode(NodeBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model = kwargs.get("model", AnthropicModel.claude_v1.value)
self.model_var = tk.StringVar(value=self.model)
self.max_tokens = kwargs.get("max_tokens", 256)
def _build_history(self, state: State) -> str:
history = ""
for message in state.history:
if message["role"] == "user":
prompt = anthropic.HUMAN_PROMPT
else:
prompt = anthropic.AI_PROMPT
history += f"{prompt}: {message['content']}\n"
# finally add the current prompt
history += f"{anthropic.HUMAN_PROMPT}: {state.result}\n"
return history
def run_subclass(
self, before_result: Any, state, console: customtkinter.CTkTextbox
) -> str:
"""
Format the prompt and run the Anthropics API
"""
c = anthropic.Client(os.environ["ANTHROPIC_API_KEY"])
resp = c.completion(
prompt=self._build_history(state) + "\n" + anthropic.AI_PROMPT,
stop_sequences=[anthropic.HUMAN_PROMPT],
model=self.model,
max_tokens_to_sample=self.max_tokens,
)
return resp["completion"]
def serialize(self):
return super().serialize() | {
"model": self.model_var.get(),
"max_tokens": self.max_tokens,
}
def edit_options(self, event: tk.Event):
"""
Create a menu to edit the prompt.
"""
self.options_popup = NodeOptions(
self.canvas,
{
"Model": self.model_var.get(),
"Max Tokens": self.max_tokens,
},
{
"Model": [model.value for model in AnthropicModel],
},
)
self.canvas.wait_window(self.options_popup)
result = self.options_popup.result
# check if cancel
if self.options_popup.cancelled:
return
self.model_var.set(result["Model"])
self.model = self.model_var.get()
self.max_tokens = int(result["Max Tokens"])
def cost(self, state: State) -> float:
"""
Return the cost of running this node.
"""
# count the number of tokens
enc = tiktoken.encoding_for_model(self.model)
prompt_tokens = enc.encode(state.result.format(state=state))
max_completion_tokens = self.max_tokens - len(prompt_tokens)
prompt_cost = prompt_cost_1k[self.model] * len(prompt_tokens) / 1000
completion_cost = completion_cost_1k[self.model] * max_completion_tokens / 1000
total = prompt_cost + completion_cost
return total
class GoogleVertexNode(NodeBase):
"""
Call to Google's Generative AI
"""
model = GoogleModel.text_bison_001.value
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model = kwargs.get("model", GoogleModel.text_bison_001.value)
self.model_var = tk.StringVar(value=self.model)
def _build_history(self, state: State) -> list[str]:
history = []
for message in state.history:
if message["role"] == "user":
history.append("User: " + message["content"])
else:
history.append("AI: " + message["content"])
return history
def run_subclass(
self, before_result: Any, state, console: customtkinter.CTkTextbox
) -> str:
genai.configure(api_key=os.environ["GENAI_API_KEY"])
response = genai.chat(
model=self.model, messages=self._build_history(state), prompt=state.result
)
return response.last
def edit_options(self, event: tk.Event):
"""
Create a menu to edit the prompt.
"""
self.options_popup = NodeOptions(
self.canvas,
{
"Model": self.model_var.get(),
},
{
"Model": [model.value for model in GoogleModel],
},
)
self.canvas.wait_window(self.options_popup)
result = self.options_popup.result
# check if cancel
if self.options_popup.cancelled:
return
self.model_var.set(result["Model"])
def serialize(self):
return super().serialize() | {
"model": self.model_var.get(),
}
def cost(self, state: State) -> float:
"""
Return the cost of running this node.
"""
# count the number of tokens
enc = tiktoken.encoding_for_model(self.model)
prompt_tokens = enc.encode(state.result.format(state=state))
max_completion_tokens = 1024 - len(prompt_tokens)
prompt_cost = prompt_cost_1k[self.model] * len(prompt_tokens) / 1000
completion_cost = completion_cost_1k[self.model] * max_completion_tokens / 1000
total = prompt_cost + completion_cost
return total
| [
"PLACEHOLDER\nPLACEHOLDER\n"
] |
2024-01-10 | lvgalvao/Quiz-Generator-App | app~quiz_app.py | from collections import namedtuple
from langchain import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
import openai
from config import OPENAI_API_KEY
import streamlit as st
Question = namedtuple("Question", ["text", "options", "answer"])
def create_the_quiz_app_template():
template = """
Você é um expecialista em gerador de Quiz técnico
Crie um quiz com {num_questions} do tipo {quiz_type} sobre o tema: {quiz_context}
O formato de cada pergunta deve ser:
- múltipla escolha:
<pergunta 1>: <a. opção 1>, <b.opção 2>, <c.opção 3>, <d. opção 4>
<resposta 1>: <a|b|c|d>
...
Exemplo:
Pergunta 1: Qual a complexidade de tempo do algoritmo de ordenação Bubble Sort?
a. O(n^2),
b. O(n),
c. O(nlogn),
d. O(1)
Resposta 1: a
"""
prompt = PromptTemplate.from_template(template)
prompt.format(num_questions=1, quiz_type="múltipla escolha", quiz_context="Python")
return prompt
def create_the_quiz_chain(prompt_template, llm):
return LLMChain(llm=llm, prompt=prompt_template)
def parse_quiz_response(response):
lines = response.strip().split("\n")
# Como estamos considerando apenas 1 pergunta, faremos isso de forma direta:
question_text = lines[0].split(":")[1].strip() # Extrair texto da pergunta.
# Suponhamos que as opções são separadas por uma vírgula seguida de espaço.
options = [opt.strip() for opt in question_text.split(", ")]
# Vamos remover o prefixo a., b., etc das opções para apenas obter a opção real.
options = [opt.split(". ")[1] for opt in options]
answer = lines[1].split(":")[1].strip() # Extrair a resposta.
# Retornar a questão como uma lista para manter a compatibilidade com o código restante.
return [Question(text=question_text, options=options, answer=answer)]
def main():
st.title("Quiz App")
st.write("Bem vindo ao Quiz App")
prompt_template = create_the_quiz_app_template()
llm = ChatOpenAI(openai_api_key=OPENAI_API_KEY)
chain = create_the_quiz_chain(prompt_template, llm)
context = st.text_area("Digite o contexto que você quer saber")
num_questions = st.number_input("Digite o número de questões que você quer", min_value=1, max_value=10, value=1) # Colocando valor padrão como 1.
quiz_type = st.selectbox("Selecione o tipo de questão", ["múltipla escolha"]) # Removendo a opção verdadeiro ou falso para simplificar.
if st.button("Gerar Quizz"):
quiz_response = chain.run(num_questions=num_questions, quiz_type=quiz_type, quiz_context=context)
questions = parse_quiz_response(quiz_response)
user_answers = {}
# Dado que só há uma pergunta, vamos removê-la do loop e lidar diretamente.
question = questions[0]
st.write(f"Pergunta: {question.text}")
user_answers[0] = st.radio(f"Respostas", options=question.options, index=0)
if st.button("Verificar Resposta"):
if user_answers[0] == question.answer:
st.success("Resposta correta!")
else:
st.error(f"Resposta incorreta. A resposta correta é: {question.answer}")
if __name__ == "__main__":
main() | [
"\n Você é um expecialista em gerador de Quiz técnico\n Crie um quiz com {num_questions} do tipo {quiz_type} sobre o tema: {quiz_context}\n O formato de cada pergunta deve ser:\n - múltipla escolha: \n <pergunta 1>: <a. opção 1>, <b.opção 2>, <c.opção 3>, <d. opção 4>\n <resposta 1>: <a|b|c|d>\n ...\n Exemplo:\n Pergunta 1: Qual a complexidade de tempo do algoritmo de ordenação Bubble Sort?\n a. O(n^2),\n b. O(n),\n c. O(nlogn),\n d. O(1)\n Resposta 1: a\n \n "
] |
2024-01-10 | tushdemort/wilp | assessment.py | "DocString"
import os
from dotenv import load_dotenv
import pathlib
import math
import random
import openai
dotenv_path = pathlib.Path('.env')
load_dotenv(dotenv_path=dotenv_path)
key_index = random.randint(0, 4)
key_array = [os.getenv("KEY1"), os.getenv("KEY2"), os.getenv("KEY3"), os.getenv("KEY4"), os.getenv("KEY5")]
openai.api_key = key_array[key_index]
def gen_response(prompt):
"DocString"
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{'role': 'system', 'content': prompt}]
)
response = completion.choices[0]['message']['content']
return response
def model_soln(ans, model):
"DocString"
prompt = "give me a rough percentage to which is the first answer" + \
"similar to the second considering the secod one is absolutely correct: " + \
ans + " and " + model + \
" Make sure to just print out the rough percentage without " + \
"the percentage symbol and no other information"
response = gen_response(prompt)
response = response.replace("%", "")
return int(response)
def from_question(question, ans):
try:
"DocString"
prompt = "give me a rough percentage whether the answer: " + ans + \
" is correct for the question" + question + \
"Make sure to just print out the rough percentage without " + \
"the percentage symbol and no other information."
response = gen_response(prompt)
response = response.replace("%", "")
return int(response)
except:
return 0
def ai_marks(ans, total_marks, question, model=""):
"DocString"
total_per = 0
count = 0
if len(model) != 0:
from_model = model_soln(question, ans)
total_per += from_model
count += 1
if len(question) != 0:
from_ques = from_question(question, ans)
total_per += from_ques
count += 1
calc = (total_marks * total_per)/(100*count)
return f"{math.floor(calc)}-{math.ceil(calc)}"
| [
"give me a rough percentage whether the answer: PLACEHOLDER is correct for the questionPLACEHOLDERMake sure to just print out the rough percentage without the percentage symbol and no other information.",
"give me a rough percentage to which is the first answersimilar to the second considering the secod one is absolutely correct: PLACEHOLDER and PLACEHOLDER Make sure to just print out the rough percentage without the percentage symbol and no other information"
] |
2024-01-10 | ALRhub/rl_with_videos | RLV~torch_rlv~policies~policies.py | """Policies: abstract base class and concrete implementations."""
import collections
import copy
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
import gym
import numpy as np
import torch as th
from torch import nn
from stable_baselines3.common.distributions import (
BernoulliDistribution,
CategoricalDistribution,
DiagGaussianDistribution,
Distribution,
MultiCategoricalDistribution,
StateDependentNoiseDistribution,
make_proba_distribution,
)
from stable_baselines3.common.preprocessing import get_action_dim, is_image_space, maybe_transpose, preprocess_obs
from stable_baselines3.common.torch_layers import (
BaseFeaturesExtractor,
CombinedExtractor,
FlattenExtractor,
MlpExtractor,
NatureCNN,
create_mlp,
)
from RLV.torch_rlv.utils.type_aliases import Schedule
from stable_baselines3.common.utils import get_device, is_vectorized_observation, obs_as_tensor
class BaseModel(nn.Module, ABC):
"""
The base model object: makes predictions in response to observations.
In the case of policies, the prediction is an action. In the case of critics, it is the
estimated value of the observation.
:param observation_space: The observation space of the environment
:param action_space: The action space of the environment
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param features_extractor: Network to extract features
(a CNN when using images, a nn.Flatten() layer otherwise)
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
features_extractor: Optional[nn.Module] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super(BaseModel, self).__init__()
if optimizer_kwargs is None:
optimizer_kwargs = {}
if features_extractor_kwargs is None:
features_extractor_kwargs = {}
self.observation_space = observation_space
self.action_space = action_space
self.features_extractor = features_extractor
self.normalize_images = normalize_images
self.optimizer_class = optimizer_class
self.optimizer_kwargs = optimizer_kwargs
self.optimizer = None # type: Optional[th.optim.Optimizer]
self.features_extractor_class = features_extractor_class
self.features_extractor_kwargs = features_extractor_kwargs
@abstractmethod
def forward(self, *args, **kwargs):
pass
def _update_features_extractor(
self,
net_kwargs: Dict[str, Any],
features_extractor: Optional[BaseFeaturesExtractor] = None,
) -> Dict[str, Any]:
"""
Update the network keyword arguments and create a new features extractor object if needed.
If a ``features_extractor`` object is passed, then it will be shared.
:param net_kwargs: the base network keyword arguments, without the ones
related to features extractor
:param features_extractor: a features extractor object.
If None, a new object will be created.
:return: The updated keyword arguments
"""
net_kwargs = net_kwargs.copy()
if features_extractor is None:
# The features extractor is not shared, create a new one
features_extractor = self.make_features_extractor()
net_kwargs.update(dict(features_extractor=features_extractor, features_dim=features_extractor.features_dim))
return net_kwargs
def make_features_extractor(self) -> BaseFeaturesExtractor:
"""Helper method to create a features extractor."""
return self.features_extractor_class(self.observation_space, **self.features_extractor_kwargs)
def extract_features(self, obs: th.Tensor) -> th.Tensor:
"""
Preprocess the observation if needed and extract features.
:param obs:
:return:
"""
assert self.features_extractor is not None, "No features extractor was set"
preprocessed_obs = preprocess_obs(obs, self.observation_space, normalize_images=self.normalize_images)
return self.features_extractor(preprocessed_obs)
def _get_constructor_parameters(self) -> Dict[str, Any]:
"""
Get data that need to be saved in order to re-create the model when loading it from disk.
:return: The dictionary to pass to the as kwargs constructor when reconstruction this model.
"""
return dict(
observation_space=self.observation_space,
action_space=self.action_space,
# Passed to the constructor by child class
# squash_output=self.squash_output,
# features_extractor=self.features_extractor
normalize_images=self.normalize_images,
)
@property
def device(self) -> th.device:
"""Infer which device this policy lives on by inspecting its parameters.
If it has no parameters, the 'cpu' device is used as a fallback.
:return:"""
for param in self.parameters():
return param.device
return get_device("cpu")
def save(self, path: str) -> None:
"""
Save model to a given location.
:param path:
"""
th.save({"state_dict": self.state_dict(), "data": self._get_constructor_parameters()}, path)
@classmethod
def load(cls, path: str, device: Union[th.device, str] = "auto") -> "BaseModel":
"""
Load model from path.
:param path:
:param device: Device on which the policy should be loaded.
:return:
"""
device = get_device(device)
saved_variables = th.load(path, map_location=device)
# Create policy object
model = cls(**saved_variables["data"]) # pytype: disable=not-instantiable
# Load weights
model.load_state_dict(saved_variables["state_dict"])
model.to(device)
return model
def load_from_vector(self, vector: np.ndarray) -> None:
"""
Load parameters from a 1D vector.
:param vector:
"""
th.nn.utils.vector_to_parameters(th.FloatTensor(vector).to(self.device), self.parameters())
def parameters_to_vector(self) -> np.ndarray:
"""
Convert the parameters to a 1D vector.
:return:
"""
return th.nn.utils.parameters_to_vector(self.parameters()).detach().cpu().numpy()
class BasePolicy(BaseModel):
"""The base policy object.
Parameters are mostly the same as `BaseModel`; additions are documented below.
:param args: positional arguments passed through to `BaseModel`.
:param kwargs: keyword arguments passed through to `BaseModel`.
:param squash_output: For continuous actions, whether the output is squashed
or not using a ``tanh()`` function.
"""
def __init__(self, *args, squash_output: bool = False, **kwargs):
super(BasePolicy, self).__init__(*args, **kwargs)
self._squash_output = squash_output
@staticmethod
def _dummy_schedule(progress_remaining: float) -> float:
"""(float) Useful for pickling policy."""
del progress_remaining
return 0.0
@property
def squash_output(self) -> bool:
"""(bool) Getter for squash_output."""
return self._squash_output
@staticmethod
def init_weights(module: nn.Module, gain: float = 1) -> None:
"""
Orthogonal initialization (used in PPO and A2C)
"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
nn.init.orthogonal_(module.weight, gain=gain)
if module.bias is not None:
module.bias.data.fill_(0.0)
@abstractmethod
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
"""
Get the action according to the policy for a given observation.
By default provides a dummy implementation -- not all BasePolicy classes
implement this, e.g. if they are a Critic in an Actor-Critic method.
:param observation:
:param deterministic: Whether to use stochastic or deterministic actions
:return: Taken action according to the policy
"""
def predict(
self,
observation: Union[np.ndarray, Dict[str, np.ndarray]],
state: Optional[np.ndarray] = None,
mask: Optional[np.ndarray] = None,
deterministic: bool = False,
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""
Get the policy action and state from an observation (and optional state).
Includes sugar-coating to handle different observations (e.g. normalizing images).
:param observation: the input observation
:param state: The last states (can be None, used in recurrent policies)
:param mask: The last masks (can be None, used in recurrent policies)
:param deterministic: Whether or not to return deterministic actions.
:return: the model's action and the next state
(used in recurrent policies)
"""
# TODO (GH/1): add support for RNN policies
# if state is None:
# state = self.initial_state
# if mask is None:
# mask = [False for _ in range(self.n_envs)]
vectorized_env = False
if isinstance(observation, dict):
# need to copy the dict as the dict in VecFrameStack will become a torch tensor
observation = copy.deepcopy(observation)
for key, obs in observation.items():
obs_space = self.observation_space.spaces[key]
if is_image_space(obs_space):
obs_ = maybe_transpose(obs, obs_space)
else:
obs_ = np.array(obs)
vectorized_env = vectorized_env or is_vectorized_observation(obs_, obs_space)
# Add batch dimension if needed
observation[key] = obs_.reshape((-1,) + self.observation_space[key].shape)
elif is_image_space(self.observation_space):
# Handle the different cases for images
# as PyTorch use channel first format
observation = maybe_transpose(observation, self.observation_space)
else:
observation = np.array(observation)
if not isinstance(observation, dict):
# Dict obs need to be handled separately
vectorized_env = is_vectorized_observation(observation, self.observation_space)
# Add batch dimension if needed
observation = observation.reshape((-1,) + self.observation_space.shape)
observation = obs_as_tensor(observation, self.device)
with th.no_grad():
actions = self._predict(observation, deterministic=deterministic)
# Convert to numpy
actions = actions.cpu().numpy()
if isinstance(self.action_space, gym.spaces.Box):
if self.squash_output:
# Rescale to proper domain when using squashing
actions = self.unscale_action(actions)
else:
# Actions could be on arbitrary scale, so clip the actions to avoid
# out of bound error (e.g. if sampling from a Gaussian distribution)
actions = np.clip(actions, self.action_space.low, self.action_space.high)
if not vectorized_env:
if state is not None:
raise ValueError("Error: The environment must be vectorized when using recurrent policies.")
actions = actions[0]
return actions, state
def scale_action(self, action: np.ndarray) -> np.ndarray:
"""
Rescale the action from [low, high] to [-1, 1]
(no need for symmetric action space)
:param action: Action to scale
:return: Scaled action
"""
low, high = self.action_space.low, self.action_space.high
return 2.0 * ((action - low) / (high - low)) - 1.0
def unscale_action(self, scaled_action: np.ndarray) -> np.ndarray:
"""
Rescale the action from [-1, 1] to [low, high]
(no need for symmetric action space)
:param scaled_action: Action to un-scale
"""
low, high = self.action_space.low, self.action_space.high
return low + (0.5 * (scaled_action + 1.0) * (high - low))
class ActorCriticPolicy(BasePolicy):
"""
Policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
if optimizer_kwargs is None:
optimizer_kwargs = {}
# Small values to avoid NaN in Adam optimizer
if optimizer_class == th.optim.Adam:
optimizer_kwargs["eps"] = 1e-5
super(ActorCriticPolicy, self).__init__(
observation_space,
action_space,
features_extractor_class,
features_extractor_kwargs,
optimizer_class=optimizer_class,
optimizer_kwargs=optimizer_kwargs,
squash_output=squash_output,
)
# Default network architecture, from stable-baselines
if net_arch is None:
if features_extractor_class == NatureCNN:
net_arch = []
else:
net_arch = [dict(pi=[64, 64], vf=[64, 64])]
self.net_arch = net_arch
self.activation_fn = activation_fn
self.ortho_init = ortho_init
self.features_extractor = features_extractor_class(self.observation_space, **self.features_extractor_kwargs)
self.features_dim = self.features_extractor.features_dim
self.normalize_images = normalize_images
self.log_std_init = log_std_init
dist_kwargs = None
# Keyword arguments for gSDE distribution
if use_sde:
dist_kwargs = {
"full_std": full_std,
"squash_output": squash_output,
"use_expln": use_expln,
"learn_features": sde_net_arch is not None,
}
self.sde_features_extractor = None
self.sde_net_arch = sde_net_arch
self.use_sde = use_sde
self.dist_kwargs = dist_kwargs
# Action distribution
self.action_dist = make_proba_distribution(action_space, use_sde=use_sde, dist_kwargs=dist_kwargs)
self._build(lr_schedule)
def _get_constructor_parameters(self) -> Dict[str, Any]:
data = super()._get_constructor_parameters()
default_none_kwargs = self.dist_kwargs or collections.defaultdict(lambda: None)
data.update(
dict(
net_arch=self.net_arch,
activation_fn=self.activation_fn,
use_sde=self.use_sde,
log_std_init=self.log_std_init,
squash_output=default_none_kwargs["squash_output"],
full_std=default_none_kwargs["full_std"],
sde_net_arch=self.sde_net_arch,
use_expln=default_none_kwargs["use_expln"],
lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
ortho_init=self.ortho_init,
optimizer_class=self.optimizer_class,
optimizer_kwargs=self.optimizer_kwargs,
features_extractor_class=self.features_extractor_class,
features_extractor_kwargs=self.features_extractor_kwargs,
)
)
return data
def reset_noise(self, n_envs: int = 1) -> None:
"""
Sample new weights for the exploration matrix.
:param n_envs:
"""
assert isinstance(self.action_dist, StateDependentNoiseDistribution), "reset_noise() is only available when using gSDE"
self.action_dist.sample_weights(self.log_std, batch_size=n_envs)
def _build_mlp_extractor(self) -> None:
"""
Create the policy and value networks.
Part of the layers can be shared.
"""
# Note: If net_arch is None and some features extractor is used,
# net_arch here is an empty list and mlp_extractor does not
# really contain any layers (acts like an identity module).
self.mlp_extractor = MlpExtractor(
self.features_dim,
net_arch=self.net_arch,
activation_fn=self.activation_fn,
device=self.device,
)
def _build(self, lr_schedule: Schedule) -> None:
"""
Create the networks and the optimizer.
:param lr_schedule: Learning rate schedule
lr_schedule(1) is the initial learning rate
"""
self._build_mlp_extractor()
latent_dim_pi = self.mlp_extractor.latent_dim_pi
# Separate features extractor for gSDE
if self.sde_net_arch is not None:
self.sde_features_extractor, latent_sde_dim = create_sde_features_extractor(
self.features_dim, self.sde_net_arch, self.activation_fn
)
if isinstance(self.action_dist, DiagGaussianDistribution):
self.action_net, self.log_std = self.action_dist.proba_distribution_net(
latent_dim=latent_dim_pi, log_std_init=self.log_std_init
)
elif isinstance(self.action_dist, StateDependentNoiseDistribution):
latent_sde_dim = latent_dim_pi if self.sde_net_arch is None else latent_sde_dim
self.action_net, self.log_std = self.action_dist.proba_distribution_net(
latent_dim=latent_dim_pi, latent_sde_dim=latent_sde_dim, log_std_init=self.log_std_init
)
elif isinstance(self.action_dist, CategoricalDistribution):
self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)
elif isinstance(self.action_dist, MultiCategoricalDistribution):
self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)
elif isinstance(self.action_dist, BernoulliDistribution):
self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)
else:
raise NotImplementedError(f"Unsupported distribution '{self.action_dist}'.")
self.value_net = nn.Linear(self.mlp_extractor.latent_dim_vf, 1)
# Init weights: use orthogonal initialization
# with small initial weight for the output
if self.ortho_init:
# TODO: check for features_extractor
# Values from stable-baselines.
# features_extractor/mlp values are
# originally from openai/baselines (default gains/init_scales).
module_gains = {
self.features_extractor: np.sqrt(2),
self.mlp_extractor: np.sqrt(2),
self.action_net: 0.01,
self.value_net: 1,
}
for module, gain in module_gains.items():
module.apply(partial(self.init_weights, gain=gain))
# Setup optimizer with initial learning rate
self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
def forward(self, obs: th.Tensor, deterministic: bool = False) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Forward pass in all the networks (actor and critic)
:param obs: Observation
:param deterministic: Whether to sample or use deterministic actions
:return: action, value and log probability of the action
"""
latent_pi, latent_vf, latent_sde = self._get_latent(obs)
# Evaluate the values for the given observations
values = self.value_net(latent_vf)
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde=latent_sde)
actions = distribution.get_actions(deterministic=deterministic)
log_prob = distribution.log_prob(actions)
return actions, values, log_prob
def _get_latent(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Get the latent code (i.e., activations of the last layer of each network)
for the different networks.
:param obs: Observation
:return: Latent codes
for the actor, the value function and for gSDE function
"""
# Preprocess the observation if needed
features = self.extract_features(obs)
latent_pi, latent_vf = self.mlp_extractor(features)
# Features for sde
latent_sde = latent_pi
if self.sde_features_extractor is not None:
latent_sde = self.sde_features_extractor(features)
return latent_pi, latent_vf, latent_sde
def _get_action_dist_from_latent(self, latent_pi: th.Tensor, latent_sde: Optional[th.Tensor] = None) -> Distribution:
"""
Retrieve action distribution given the latent codes.
:param latent_pi: Latent code for the actor
:param latent_sde: Latent code for the gSDE exploration function
:return: Action distribution
"""
mean_actions = self.action_net(latent_pi)
if isinstance(self.action_dist, DiagGaussianDistribution):
return self.action_dist.proba_distribution(mean_actions, self.log_std)
elif isinstance(self.action_dist, CategoricalDistribution):
# Here mean_actions are the logits before the softmax
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, MultiCategoricalDistribution):
# Here mean_actions are the flattened logits
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, BernoulliDistribution):
# Here mean_actions are the logits (before rounding to get the binary actions)
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, StateDependentNoiseDistribution):
return self.action_dist.proba_distribution(mean_actions, self.log_std, latent_sde)
else:
raise ValueError("Invalid action distribution")
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
"""
Get the action according to the policy for a given observation.
:param observation:
:param deterministic: Whether to use stochastic or deterministic actions
:return: Taken action according to the policy
"""
latent_pi, _, latent_sde = self._get_latent(observation)
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde)
return distribution.get_actions(deterministic=deterministic)
def evaluate_actions(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Evaluate actions according to the current policy,
given the observations.
:param obs:
:param actions:
:return: estimated value, log likelihood of taking those actions
and entropy of the action distribution.
"""
latent_pi, latent_vf, latent_sde = self._get_latent(obs)
distribution = self._get_action_dist_from_latent(latent_pi, latent_sde)
log_prob = distribution.log_prob(actions)
values = self.value_net(latent_vf)
return values, log_prob, distribution.entropy()
class ActorCriticCnnPolicy(ActorCriticPolicy):
"""
CNN policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super(ActorCriticCnnPolicy, self).__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
use_sde,
log_std_init,
full_std,
sde_net_arch,
use_expln,
squash_output,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
class MultiInputActorCriticPolicy(ActorCriticPolicy):
"""
MultiInputActorClass policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space (Tuple)
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Uses the CombinedExtractor
:param features_extractor_kwargs: Keyword arguments
to pass to the feature extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Dict,
action_space: gym.spaces.Space,
lr_schedule: Callable,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super(MultiInputActorCriticPolicy, self).__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
use_sde,
log_std_init,
full_std,
sde_net_arch,
use_expln,
squash_output,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
class ContinuousCritic(BaseModel):
"""
Critic network(s) for DDPG/SAC/TD3.
It represents the action-state value function (Q-value function).
Compared to A2C/PPO critics, this one represents the Q-value
and takes the continuous action as input. It is concatenated with the state
and then fed to the network which outputs a single value: Q(s, a).
For more recent algorithms like SAC/TD3, multiple networks
are created to give different estimates.
By default, it creates two critic networks used to reduce overestimation
thanks to clipped Q-learning (cf TD3 paper).
:param observation_space: Obervation space
:param action_space: Action space
:param net_arch: Network architecture
:param features_extractor: Network to extract features
(a CNN when using images, a nn.Flatten() layer otherwise)
:param features_dim: Number of features
:param activation_fn: Activation function
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param n_critics: Number of critic networks to create.
:param share_features_extractor: Whether the features extractor is shared or not
between the actor and the critic (this saves computation time)
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
net_arch: List[int],
features_extractor: nn.Module,
features_dim: int,
activation_fn: Type[nn.Module] = nn.ReLU,
normalize_images: bool = True,
n_critics: int = 2,
share_features_extractor: bool = True,
):
super().__init__(
observation_space,
action_space,
features_extractor=features_extractor,
normalize_images=normalize_images,
)
action_dim = get_action_dim(self.action_space)
self.share_features_extractor = share_features_extractor
self.n_critics = n_critics
self.q_networks = []
for idx in range(n_critics):
q_net = create_mlp(features_dim + action_dim, 1, net_arch, activation_fn)
q_net = nn.Sequential(*q_net)
self.add_module(f"qf{idx}", q_net)
self.q_networks.append(q_net)
def forward(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, ...]:
# Learn the features extractor using the policy loss only
# when the features_extractor is shared with the actor
with th.set_grad_enabled(not self.share_features_extractor):
features = self.extract_features(obs)
qvalue_input = th.cat([features, actions], dim=1)
return tuple(q_net(qvalue_input) for q_net in self.q_networks)
def q1_forward(self, obs: th.Tensor, actions: th.Tensor) -> th.Tensor:
"""
Only predict the Q-value using the first network.
This allows to reduce computation when all the estimates are not needed
(e.g. when updating the policy in TD3).
"""
with th.no_grad():
features = self.extract_features(obs)
return self.q_networks[0](th.cat([features, actions], dim=1))
def create_sde_features_extractor(
features_dim: int, sde_net_arch: List[int], activation_fn: Type[nn.Module]
) -> Tuple[nn.Sequential, int]:
"""
Create the neural network that will be used to extract features
for the gSDE exploration function.
:param features_dim:
:param sde_net_arch:
:param activation_fn:
:return:
"""
# Special case: when using states as features (i.e. sde_net_arch is an empty list)
# don't use any activation function
sde_activation = activation_fn if len(sde_net_arch) > 0 else None
latent_sde_net = create_mlp(features_dim, -1, sde_net_arch, activation_fn=sde_activation, squash_output=False)
latent_sde_dim = sde_net_arch[-1] if len(sde_net_arch) > 0 else features_dim
sde_features_extractor = nn.Sequential(*latent_sde_net)
return sde_features_extractor, latent_sde_dim
_policy_registry = dict() # type: Dict[Type[BasePolicy], Dict[str, Type[BasePolicy]]]
def get_policy_from_name(base_policy_type: Type[BasePolicy], name: str) -> Type[BasePolicy]:
"""
Returns the registered policy from the base type and name.
See `register_policy` for registering policies and explanation.
:param base_policy_type: the base policy class
:param name: the policy name
:return: the policy
"""
if base_policy_type not in _policy_registry:
raise KeyError(f"Error: the policy type {base_policy_type} is not registered!")
if name not in _policy_registry[base_policy_type]:
raise KeyError(
f"Error: unknown policy type {name},"
f"the only registed policy type are: {list(_policy_registry[base_policy_type].keys())}!"
)
return _policy_registry[base_policy_type][name]
def register_policy(name: str, policy: Type[BasePolicy]) -> None:
"""
Register a policy, so it can be called using its name.
e.g. SAC('MlpPolicy', ...) instead of SAC(MlpPolicy, ...).
The goal here is to standardize policy naming, e.g.
all algorithms can call upon "MlpPolicy" or "CnnPolicy",
and they receive respective policies that work for them.
Consider following:
OnlinePolicy
-- OnlineMlpPolicy ("MlpPolicy")
-- OnlineCnnPolicy ("CnnPolicy")
OfflinePolicy
-- OfflineMlpPolicy ("MlpPolicy")
-- OfflineCnnPolicy ("CnnPolicy")
Two policies have name "MlpPolicy" and two have "CnnPolicy".
In `get_policy_from_name`, the parent class (e.g. OnlinePolicy)
is given and used to select and return the correct policy.
:param name: the policy name
:param policy: the policy class
"""
sub_class = None
for cls in BasePolicy.__subclasses__():
if issubclass(policy, cls):
sub_class = cls
break
if sub_class is None:
raise ValueError(f"Error: the policy {policy} is not of any known subclasses of BasePolicy!")
if sub_class not in _policy_registry:
_policy_registry[sub_class] = {}
if name in _policy_registry[sub_class]:
# Check if the registered policy is same
# we try to register. If not so,
# do not override and complain.
if _policy_registry[sub_class][name] != policy:
raise ValueError(f"Error: the name {name} is already registered for a different policy, will not override.")
_policy_registry[sub_class][name] = policy | [] |
2024-01-10 | alexszokolay/Uoft-ECKS | _cohere.py | # - After algorithm sorts into 1 or 2 emotions
# - Algorithm associates emotion with specific interest
# - Cohere.api names locations associated with given interest
import cohere
import clean_file_with_algorithm as algorithm
# Initialize
co = cohere.Client("MSuvC3ORXmJeWIzxj6D9vIw0QZAhfO6ibEmTlDYG")
interest = algorithm.emotion_giving_method(algorithm.TherapyTravel(),
algorithm.InterestsList())
prompt = f"\n Name real locations in Toronto I should go to if I like " \
f"{interest}"
print(prompt)
# moddel = medium or xlarge
response = co.generate(
model='c1e4d1a2-5127-494b-8536-3d6845a4f267-ft',
prompt=prompt,
max_tokens=35,
temperature=0.9,
stop_sequences=["--"])
output = response.generations[0].text
print(output)
# add end_sequences
| [
"\n Name real locations in Toronto I should go to if I like PLACEHOLDER"
] |
2024-01-10 | alexszokolay/Uoft-ECKS | new-en~bot.py | import discord
from discord.ui import Button, View
from discord.ext import commands
import json
from os import environ
from dotenv import load_dotenv
import clean_file_with_algorithm
import cohere
import clean_file_with_algorithm as algorithm
load_dotenv()
token = environ["TOKEN"]
intents = discord.Intents.all()
client = commands.Bot(command_prefix='!', intents=intents)
options = {"1": '😄', "2": '🙂', "3": '😐', "4": '🙁', "5": '😢'}
colours = {"1": discord.ButtonStyle.green, "2": discord.ButtonStyle.blurple,
"3": discord.ButtonStyle.grey, "4": discord.ButtonStyle.blurple,
"5": discord.ButtonStyle.red}
emotions = {"Stress": 0, "Boredom": 0, "Loneliness": 0, "Anger": 0,
"Sadness": 0}
answers = []
emotions_streak = []
@client.event
async def on_ready():
print('bot ready')
class MyButton(Button):
def __init__(self, name, color, emoji, emotion):
super().__init__(label=name, style=color, emoji=emoji)
self.emotion = emotion
async def callback(self, interaction: discord.Interaction):
# ! Update the dictionary
if ',' in self.emotion:
emotion1 = self.emotion.split(',')[0].strip()
emotion2 = self.emotion.split(',')[1].strip()
else:
emotion1 = emotion2 = self.emotion
for key in emotions:
if key == self.emotion or key == emotion1 or key == emotion2:
emotions[key] += int(self.label)
await interaction.response.send_message(
"Thanks! Your input of " + self.label + " has been recorded.")
answers.append(int(self.label))
class MyView(View):
emotions = emotions
def __init__(self):
super().__init__() # timeout=10.0)
async def button_callback(self, interaction: discord.Interaction):
await interaction.response.edit_message(view=self)
@client.event
async def on_message(message):
if message.author == client.user:
return
# !Help command
if message.content.startswith('!help'):
embed_var = discord.Embed(title="Help",
color=discord.Colour.light_grey())
embed_var.add_field(name="About",
value="VentBot is a tool that helps you destress!",
inline=True)
embed_var.add_field(name="Usage", value="!vent", inline=False)
await message.channel.send(embed=embed_var)
# ! Main command
if message.content == '!vent':
await message.channel.send(
"Hey there, I heard you weren't feeling so great! ")
await message.channel.send("Here are some questions:")
f = open("questions.txt", "r")
for line in f:
view = MyView()
current_emotion = line.split("(")[1].split(")")[0]
for index in options:
button = MyButton(index, colours[index],
options[index], current_emotion)
view.add_item(button)
await message.channel.send(line.split("(")[0], view=view)
await client.wait_for('message')
f.close()
therapy_bot = clean_file_with_algorithm.TherapyTravel(emotions)
interests = clean_file_with_algorithm.InterestsList()
output = clean_file_with_algorithm.emotion_giving_method(therapy_bot,
interests)
co = cohere.Client("MSuvC3ORXmJeWIzxj6D9vIw0QZAhfO6ibEmTlDYG")
prompt = f"\n Name real locations in Toronto I should go to if I like "\
f"{output}"
# moddel = medium or xlarge
response = co.generate(
model='c1e4d1a2-5127-494b-8536-3d6845a4f267-ft',
prompt=prompt,
max_tokens=35,
temperature=0.9,
stop_sequences=["--"]
)
result = response.generations[0].text
embed_var = discord.Embed(title="Here is your custom suggestion!",
color=discord.Colour.light_grey())
embed_var.add_field(name="Interests",
value=output,
inline=True)
embed_var.add_field(name="Suggestion", value=result, inline=False)
await message.channel.send(embed=embed_var)
client.run(token)
| [
"\n Name real locations in Toronto I should go to if I like PLACEHOLDER"
] |
2024-01-10 | Sunzheini/playground | lab~exercise_openai.py | from tkinter import *
import openai
class DirWalkerGui:
def __init__(self):
self.window = Tk()
self.window.geometry("300x300")
self.window.title("DZ AI Prompt Generator")
self.window.config(background='#2b2828')
self.api_key = None
# Section 1 -----------------------------------------------------------
self.label1 = Label(
self.window, text='постави api ключ', width=30, height=1,
bg='#2b2828', borderwidth=0, relief="ridge", fg='white'
)
self.label1.pack()
self.entry1 = Entry(
self.window,
font=("Arial", 8),
fg='white',
bg='black',
)
self.entry1.insert(
0, # from th beginning position
'', # default text
)
self.entry1.pack()
self.button1 = Button(
self.window, text='запази ключа', width=15, height=1,
command=self.store_key
)
self.button1.pack()
canvas = Canvas(self.window, width=300, height=1, bg='#2b2828', borderwidth=0)
canvas.pack()
# Section 2 -----------------------------------------------------------
self.label2 = Label(
self.window, text='задай въпрос', width=30, height=1,
bg='#2b2828', borderwidth=0, relief="ridge", fg='white'
)
self.label2.pack()
self.entry2 = Entry(
self.window,
font=("Arial", 8),
fg='white',
bg='black',
)
self.entry2.insert(
0, # from th beginning position
'', # default text
)
self.entry2.pack()
self.button2 = Button(
self.window, text='изпрати', width=15, height=1,
command=self.send_query
)
self.button2.pack()
canvas2 = Canvas(self.window, width=300, height=1, bg='#2b2828', borderwidth=0)
canvas2.pack()
# Section 3 -----------------------------------------------------------
self.text_widget = Text(self.window, height=8, width=33)
self.text_widget.pack()
# ------------------------------------------------------------------
def store_key(self):
new_input = self.entry1.get()
self.api_key = new_input
self.entry1.config(state=DISABLED) # disable after submitting
def send_query(self):
if self.api_key is None:
self.print_text('Моля, поставете API ключа!')
return
# openai.api_key = key
openai.api_key = self.api_key
total_tokens = 0
prompt = self.entry2.get()
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=0.9,
max_tokens=150,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.6,
stop=[" Human:", " AI:"]
)
self.print_text(response.choices[0].text)
def print_text(self, text):
self.text_widget.delete('1.0', 'end') # clear any previous text
# formatted_text = '\n'.join([f'AI: {line.strip()}' for line in text.split('\n')])
formatted_text = '\n'.join([line.strip() for line in text.split('\n')])
self.text_widget.insert('1.0', formatted_text) # insert new text
def run(self):
self.window.mainloop()
new_gui = DirWalkerGui()
new_gui.run()
| [] |
2024-01-10 | DominiquePaul/whispa | modules~whispa.py | import os
import pathlib
import math
from pathlib import Path
import openai
from pydub import AudioSegment
from moviepy.editor import AudioFileClip
openai.api_key = os.environ["OPENAI_API_KEY"]
def convert_to_mp3(input_file, output_file):
clip = AudioFileClip(input_file)
clip.write_audiofile(output_file)
clip.close()
def split_mp3(input_file: str, output_prefix: str, folder_path: pathlib.Path, duration: int = 300000):
folder_path = Path(folder_path)
audio = AudioSegment.from_mp3(input_file)
total_duration = len(audio)
num_parts = math.ceil(total_duration / duration)
for i in range(num_parts):
start_time = i * duration
end_time = min((i + 1) * duration, total_duration)
part = audio[start_time:end_time]
path_template = str(folder_path / f"{output_prefix}_") + "{}.mp3"
output_file = path_template.format(i + 1)
part.export(output_file, format="mp3")
print(f"Exported {output_file}")
return num_parts, path_template
def transcribe_mp3_group(file_template: str, num_parts: int) -> str:
transcripts = []
for i in range(num_parts):
path = str(file_template.format(i + 1))
with open(path, "rb") as audio_file:
whisper_obj = openai.Audio.transcribe("whisper-1", audio_file)
transcripts.append(whisper_obj.text)
full_text = "\n\n".join(transcripts)
return full_text
| [
"PLACEHOLDER_"
] |
2024-01-10 | nankingguo/langchain-ChatGLM | chains~local_doc_qa.py | from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.retrievers import ChatGPTPluginRetriever
from langchain.document_loaders import UnstructuredFileLoader
from models.chatglm_llm import ChatGLM
import sentence_transformers
import os
from configs.model_config import *
import datetime
from typing import List
from textsplitter import ChineseTextSplitter
# return top-k text chunk from vector store
VECTOR_SEARCH_TOP_K = 6
# LLM input history length
LLM_HISTORY_LEN = 3
def load_file(filepath):
if filepath.lower().endswith(".pdf"):
loader = UnstructuredFileLoader(filepath)
textsplitter = ChineseTextSplitter(pdf=True)
docs = loader.load_and_split(textsplitter)
else:
loader = UnstructuredFileLoader(filepath, mode="elements")
textsplitter = ChineseTextSplitter(pdf=False)
docs = loader.load_and_split(text_splitter=textsplitter)
return docs
class LocalDocQA:
llm: object = None
embeddings: object = None
def init_cfg(self,
embedding_model: str = EMBEDDING_MODEL,
embedding_device=EMBEDDING_DEVICE,
llm_history_len: int = LLM_HISTORY_LEN,
llm_model: str = LLM_MODEL,
llm_device=LLM_DEVICE,
top_k=VECTOR_SEARCH_TOP_K,
use_ptuning_v2: bool = USE_PTUNING_V2
):
self.llm = ChatGLM()
self.llm.load_model(model_name_or_path=llm_model_dict[llm_model],
llm_device=llm_device,
use_ptuning_v2=use_ptuning_v2)
self.llm.history_len = llm_history_len
self.embeddings = HuggingFaceEmbeddings(model_name=embedding_model_dict[embedding_model], )
self.embeddings.client = sentence_transformers.SentenceTransformer(self.embeddings.model_name,
device=embedding_device)
self.top_k = top_k
def init_knowledge_vector_store(self,
filepath: str or List[str],
vs_path: str or os.PathLike = None):
loaded_files = []
if isinstance(filepath, str):
if not os.path.exists(filepath):
print("路径不存在")
return None
elif os.path.isfile(filepath):
file = os.path.split(filepath)[-1]
try:
docs = load_file(filepath)
print(f"{file} 已成功加载")
loaded_files.append(filepath)
except Exception as e:
print(e)
print(f"{file} 未能成功加载")
return None
elif os.path.isdir(filepath):
docs = []
for file in os.listdir(filepath):
fullfilepath = os.path.join(filepath, file)
try:
docs += load_file(fullfilepath)
print(f"{file} 已成功加载")
loaded_files.append(fullfilepath)
except Exception as e:
print(e)
print(f"{file} 未能成功加载")
else:
docs = []
for file in filepath:
try:
docs += load_file(file)
print(f"{file} 已成功加载")
loaded_files.append(file)
except Exception as e:
print(e)
print(f"{file} 未能成功加载")
if vs_path and os.path.isdir(vs_path):
vector_store = FAISS.load_local(vs_path, self.embeddings)
vector_store.add_documents(docs)
else:
if not vs_path:
vs_path = f"""{VS_ROOT_PATH}{os.path.splitext(file)[0]}_FAISS_{datetime.datetime.now().strftime("%Y%m%d_%H%M%S")}"""
vector_store = FAISS.from_documents(docs, self.embeddings)
vector_store.save_local(vs_path)
return vs_path if len(docs) > 0 else None, loaded_files
def get_knowledge_based_answer(self,
query,
chat_history=[], ):
prompt_template = """基于以下已知信息,简洁和专业的来回答用户的问题。
如果无法从中得到答案,请说 "根据已知信息无法回答该问题" 或 "没有提供足够的相关信息",不允许在答案中添加编造成分,答案请使用中文。
已知内容:
{context}
问题:
{question}"""
prompt = PromptTemplate(
template=prompt_template,
input_variables=["context", "question"]
)
global_chat_history = chat_history
retriever = ChatGPTPluginRetriever(url="http://127.0.0.1:8000", bearer_token="foo")
knowledge_chain = RetrievalQA.from_llm(
llm=self.llm,
retriever=retriever,
prompt=prompt
)
knowledge_chain.combine_documents_chain.document_prompt = PromptTemplate(
input_variables=["page_content"], template="{page_content}"
)
knowledge_chain.return_source_documents = True
result = knowledge_chain({"query": query})
self.llm.history[-1][0] = query
return result, self.llm.history
| [
"{page_content}",
"没有提供足够的相关信息",
"question",
"基于以下已知信息,简洁和专业的来回答用户的问题。\n 如果无法从中得到答案,请说 \"根据已知信息无法回答该问题\" 或 \"没有提供足够的相关信息\",不允许在答案中添加编造成分,答案请使用中文。\n \n 已知内容:\n {context}\n \n 问题:\n {question}",
"根据已知信息无法回答该问题",
"context"
] |
2024-01-10 | gustapp/ganimedes-chatbot | scripts~make_dataset.py | #%%
import pandas as pd
import click
df = pd.read_json('./db_poli_v2.json')
df = df[['sigla', 'name', 'objetivos', 'programa', 'programa_resumido']]
df['name'] = df['name'].apply(lambda x: '-'.join(x.split('-')[1:]).strip())
documents = df[['name', 'objetivos', 'programa', 'programa_resumido']].apply(lambda x: ' '.join(x), axis=1)
print(len(documents))
df.head()
#%% [markdown]
# ## Pre-processing
import nltk
import gensim
from gensim.corpora import Dictionary
from gensim import corpora, models
from nltk import collocations
from nltk.tokenize import RegexpTokenizer
from nltk.stem import RSLPStemmer
from nltk.corpus import stopwords
nltk.download('rslp')
STOPWORDS = stopwords.words('portuguese')
rgtokenizer = RegexpTokenizer(r'\w+')
#%% [markdown]
# ### Lemmatize & Stemming
#%%
# Preprocessing methods
def lemmatize_stemming(text):
stemmer = RSLPStemmer()
return stemmer.stem(text)
def preprocess(text, word_tokenize=rgtokenizer.tokenize):
result = []
for token in word_tokenize(text):
if token not in STOPWORDS and len(token) > 3:
result.append(lemmatize_stemming(token))
return result
#%%
# (Debug) Preview a doc before prepocessing
doc_sample = documents[0]
print('original document: ')
words = []
for word in doc_sample.split(' '):
words.append(word)
print(words)
print('\n\n tokenized and lemmatized document: ')
print(preprocess(doc_sample))
#%%
# Preprocess all documents
cleaned_docs = documents.map(preprocess)
cleaned_docs[:10]
#%% [markdown]
# ### Collocations
#%%
# Build Abstract Text Corpus
corpus_abstract = ' '.join(cleaned_docs.map(lambda tokens: ' '.join(tokens)))
len(corpus_abstract.split(' '))
#%%
# Identify Collocations
cl = collocations.BigramCollocationFinder.from_words(corpus_abstract.split(' '))
#%%
# 85 Best Collocations by likelihood ratio
set_collocation = cl.nbest(collocations.BigramAssocMeasures().likelihood_ratio, 200)
set_collocation[:10]
#%%
# Apply Collocations
def apply_collocations(tokens, set_colloc=set_collocation):
""" Reference: acidtobi
url: https://stackoverflow.com/questions/43572898/apply-collocation-from-listo-of-bigrams-with-nltk-in-python
"""
res = ' '.join(tokens)
for b1,b2 in set_colloc:
res = res.replace("%s %s" % (b1 ,b2), "%s_%s" % (b1 ,b2))
for b1, b2 in set_colloc:
res = res.replace("_%s_%s" % (b1 ,b2), "_%s %s_%s" % (b1, b1 ,b2))
return res.split(' ')
processed_docs = cleaned_docs.map(apply_collocations)
processed_docs[:10]
#%% [markdown]
# ### Dictionary
#%%
# Create a dictionary from ‘processed_docs’ containing the number of times a word appears in the training set
dictionary = Dictionary(processed_docs)
count = 0
for k, v in dictionary.iteritems():
print(k, v)
count += 1
if count > 10:
break
#%%
# Filter out tokens that appear in less than 2 docs or more than 0.7 docs
# Keep (at most) only the first 100000 most frequent
dictionary.filter_extremes(no_below=2, no_above=0.7, keep_n=100000)
len(dictionary.token2id)
#%% [markdown]
# ### Encoding Model (BoW, TF-IDF)
#%%
# Gensim doc2bow
# For each document we create a dictionary reporting how many
# words and how many times those words appear
bow_corpus = [dictionary.doc2bow(doc) for doc in processed_docs]
bow_corpus[0]
#%%
# (Debug) Preview BOW for our sample
bow_doc_sample_number = bow_corpus[0]
for i in range(len(bow_doc_sample_number)):
print("Word {} (\"{}\") appears {} time.".format(bow_doc_sample_number[i][0],
dictionary[bow_doc_sample_number[i][0]], bow_doc_sample_number[i][1]))
#%%
# TF-IDF
# Preview TF-IDF scores for our first doc
tfidf = models.TfidfModel(bow_corpus)
corpus_tfidf = tfidf[bow_corpus]
from pprint import pprint
for doc in corpus_tfidf:
pprint(doc)
break
# #%% [markdown]
# # ## Topic Model
# #%%
# # Running LDA using BOW
# from gensim.models.coherencemodel import CoherenceModel
# import logging
# logging.basicConfig(filename='./lda_bow.log',
# format="%(asctime)s:%(levelname)s:%(message)s",
# level=logging.INFO)
# coherence_x_n_topics = []
# for n_topics in range(3, 101):
# lda_model = gensim.models.LdaMulticore(bow_corpus, num_topics=n_topics, id2word=dictionary, \
# alpha=[0.01]*n_topics, eta=[0.01]*len(dictionary.keys()), passes=78, workers=11)
# lda_model.save('./lda_bow/lda_model_tp-' + str(n_topics))
# """ Coherence Model - Umass """
# cm_bow = CoherenceModel(model=lda_model, corpus=bow_corpus, coherence='u_mass')
# coherence_value_umass = cm_bow.get_coherence()
# logging.info('coherence value - umass: ' + str(coherence_value_umass))
# """ Coherence Model - C_V """
# cm_bow_cv = CoherenceModel(model=lda_model, corpus=bow_corpus, texts=processed_docs, dictionary=dictionary, coherence='c_v')
# coherence_value_cv = cm_bow_cv.get_coherence()
# logging.info('coherence value - cv: ' + str(coherence_value_cv))
# """ Coherence Model - C_UCI """
# cm_bow_uci = CoherenceModel(model=lda_model, corpus=bow_corpus, texts=processed_docs, dictionary=dictionary, coherence='c_uci')
# coherence_value_cuci = cm_bow_uci.get_coherence()
# logging.info('coherence value - cuci: ' + str(coherence_value_cuci))
# """ Coherence Model - C_NPMI """
# cm_bow_npmi = CoherenceModel(model=lda_model, corpus=bow_corpus, texts=processed_docs, dictionary=dictionary, coherence='c_npmi')
# coherence_value_cnpmi = cm_bow_npmi.get_coherence()
# logging.info('coherence value - cnpmi: ' + str(coherence_value_cnpmi))
# coherence_x_n_topics.append((n_topics, coherence_value_umass, coherence_value_cv, coherence_value_cuci, coherence_value_cnpmi))
# for idx, topic in lda_model.print_topics(-1):
# print('Topic: {} \nWords: {}'.format(idx, topic))
# model_metrics = pd.DataFrame(data=coherence_x_n_topics, columns=['n topics', 'umass', 'cv', 'cuci', 'cnpmi'], index=range(3, 101))
# model_metrics.head()
# model_metrics.to_csv('./coherence_curve_bow.csv')
#%%
| [] |
2024-01-10 | RizqiSeijuuro/streamlit-multipages | pages~2_Chat_Data.py | import pandas as pd
import streamlit as st
from pandasai import PandasAI
from pandasai.llm.openai import OpenAI
st.set_page_config(
page_title="Hello",
page_icon="👋",
)
st.write('# Welcome to ChatData! 👋')
st.subheader('Chat anything with your Data!')
st.markdown(
"""
**This app using GPT Model,** so make sure don't upload any confidential data here.
"""
)
OPENAI_API_KEY = st.text_input(label="Add Your OPENAI API KEY", value="")
st.markdown("If you don't know how to get an OPEN API Key. [Check this blog!](https://www.howtogeek.com/885918/how-to-get-an-openai-api-key/).")
if OPENAI_API_KEY != "":
llm = OpenAI(api_token=OPENAI_API_KEY)
pandas_ai = PandasAI(llm)
st.subheader('Upload your Data')
file_upload = st.file_uploader(label="Choose a CSV file")
if file_upload is not None:
st.subheader('Sample Data')
data = pd.read_csv(file_upload)
st.dataframe(data.sample(10))
st.subheader('Question')
question = st.text_input(label="Add questions to your data", value="")
if question != "":
st.subheader('Result:')
st.write(pandas_ai.run(data, prompt=question)) | [] |
2024-01-10 | francisbrero/notion_QA | notion~support.py | # Import necessary modules
import os
from dotenv import find_dotenv, load_dotenv
import streamlit as st
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.prompts import PromptTemplate
from train import init_pinecone_index
from RAG import init_rag
from utils import add_sidebar, format_sources
# Set up the Streamlit app
st.set_page_config(page_title="MadKudu: Support Rubook Chat 🧠", page_icon=":robot_face:")
st.title("🤖 MadKudu: Chat with our Notion Support Runbooks 🧠")
# Set up the sidebar
add_sidebar(st)
# initialize the variables
with st.spinner("Initializing..."):
# get the index
index_name = 'notion-db-chatbot'
openai_api_key, vectordb = init_rag(index_name)
st.success("Ready to go! Please write your question in the chat box below", icon="✅")
# initialize the LLM
llm = ChatOpenAI(
openai_api_key=openai_api_key,
model_name='gpt-3.5-turbo',
temperature=0.0
)
template = """You are a support agent who knows the knowledge base inside-out.
If you don't know the answer, just say that you don't know, don't try to make up an answer. Tell the user they might need to create a runbook to address this specific question.
Keep the answer concise.
Question: {question}
Helpful Answer:"""
rag_prompt_custom = PromptTemplate.from_template(template)
if "memory" not in st.session_state:
st.session_state.memory = ConversationBufferMemory(
memory_key="chat_history",input_key='question', output_key='answer', return_messages= True
)
# create the function that retrieves source information from the retriever
def query_llm_with_source(retriever, query):
qa_chain = RetrievalQAWithSourcesChain.from_chain_type(
llm=llm,
chain_type="stuff",
memory=st.session_state.memory,
retriever=retriever
)
results = qa_chain({'question': query
,'chat_history': st.session_state.messages
,'rag_prompt': rag_prompt_custom
})
st.session_state.messages.append((query, results['answer'] + "\n\n" + format_sources(results['sources'])))
return results
retriever = vectordb.as_retriever(search_type="similarity", search_kwargs={"k": 3})
# st.session_state.retriever = retriever
if "messages" not in st.session_state:
st.session_state.messages = []
#
for message in st.session_state.messages:
st.chat_message('human').write(message[0])
st.chat_message('ai').write(message[1])
#
if query := st.chat_input():
st.chat_message("human").write(query)
results = query_llm_with_source(retriever, query)
answer = results['answer']
sources = format_sources(results['sources'])
st.chat_message("ai").write(answer + "\n\n" + sources)
| [
"You are a support agent who knows the knowledge base inside-out.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer. Tell the user they might need to create a runbook to address this specific question.\nKeep the answer concise.\nQuestion: {question}\nHelpful Answer:",
"t know the answer, just say that you don"
] |
2024-01-10 | francisbrero/notion_QA | notion~train.py | import os
from dotenv import find_dotenv, load_dotenv
from langchain.vectorstores import Pinecone
from langchain.document_loaders import NotionDirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
import pinecone
import tqdm
import time
from argparse import ArgumentParser
def init(notion_dir_name):
"""Initializes the environment variables and the paths."""
# Load environment variables from .env file
load_dotenv(find_dotenv())
# initialize connection to pinecone (get API key at app.pinecone.io)
pinecone_api_key = os.getenv("PINECONE_API_KEY")
# find your environment next to the api key in pinecone console
pinecone_env = os.getenv("PINECONE_ENV")
# get our OpenAI API key
openai_api_key = os.getenv("OPENAI_API_KEY")
# path to the directory containing the notion documents
notion_dir="./notion_data/"+notion_dir_name
return pinecone_api_key, pinecone_env, notion_dir
def load_notion_db(notion_dir):
"""Loads the notion database from the specified directory and splits the documents into chunks of 500 characters with 0 overlap."""
loader = NotionDirectoryLoader(notion_dir)
data = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=0)
all_splits = text_splitter.split_documents(data)
print(f"we've split the documents into {len(all_splits)} chunks of 500 characters with 0 overlap")
return all_splits
def init_pinecone_index(index_name, pinecone_api_key, pinecone_env):
"""Initializes the pinecone index."""
pinecone.init(api_key=pinecone_api_key, environment=pinecone_env)
if index_name in pinecone.list_indexes():
pinecone.delete_index(index_name)
# we create a new index
pinecone.create_index(
name=index_name,
metric='dotproduct',
dimension=1536 # 1536 is the dimensionality of the OpenAI model
)
# wait for index to be initialized
while not pinecone.describe_index(index_name).status['ready']:
time.sleep(1)
index = pinecone.Index(index_name)
return index
def embed_splits_openai(all_splits, index_name):
"""Embeds the splits using the OpenAI da Vinci model."""
embeddings = OpenAIEmbeddings()
vectordb = Pinecone.from_documents(all_splits[:1], embeddings, index_name=index_name)
for i in tqdm.tqdm(range(1, len(all_splits))):
vectordb.add_documents(all_splits[:i])
return
# run our main function
if __name__ == '__main__':
# get arguments from the command line
parser = ArgumentParser()
parser.add_argument("-n", "--notion", dest="notion_dir_name", help="what notion directory do you want to embed", metavar="NOTION_DIR", default="support_runbook")
args = parser.parse_args()
notion_dir_name = args.notion_dir_name
index_name = 'notion-db-chatbot'
print("Ok let's go!")
pinecone_api_key, pinecone_env, notion_dir = init(notion_dir_name)
print("Split the documents into chunks of 500 characters with 0 overlap.")
all_splits = load_notion_db(notion_dir)
print("Initializing the pinecone index...")
index = init_pinecone_index(index_name, pinecone_api_key, pinecone_env)
print("we've created an index and here is it's description")
index.describe_index_stats()
print("let's embed the splits into the index, this might take some time and will cost you $")
embed_splits_openai(all_splits, index_name)
print("... and we're done! here is the index description again")
index.describe_index_stats() | [] |
2024-01-10 | TemBurdakov/flask | chat_bot.py | from langchain.schema import HumanMessage, SystemMessage
from langchain.chat_models.gigachat import GigaChat
from langchain_core.messages import SystemMessage, HumanMessage
def chat_questions(user_input, chat):
messages = [SystemMessage(
content='Ты преподаватель, твоя цель сгенерировать 5 - 6 вопросов по тексту, который ведёт пользователь, так чтобы проверить его знания по этому тексту, например: текст:{Меня зовут Алёна , мне 32 года}, твой вопрос:{Как тебя зовут?}, не расстраивай пользователя, если он не получит вопросов по тексту, он очень сильно расстроится')
, HumanMessage(content=user_input)]
res = chat(messages)
messages.append(res)
return res.content
def chat_answer(questions, chat):
messages = [SystemMessage(
content='Тебе надо сгенерировать ответы на вопросы по тексту, который тебе пришлёт пользователь'),
HumanMessage(content=questions)]
res = chat(messages)
messages.append(res)
return res.content
def generate_false(questions, chat):
messages = [SystemMessage(
content='Тебе надо сгенерировать неверные ответы на вопросы по тексту, который тебе пришлёт пользователь'),
HumanMessage(content=questions)]
res = chat(messages)
messages.append(res)
return res.content
def update_text(text):
list_num = ["1.", "2.", "3.", "4.", "5.", "6."]
for i, index in enumerate(list_num):
text = text.split(index)
text = f" {i + 1}.".join(text)
return text
def generate_func(user_input, func):
process = func(user_input, chat=GigaChat(
credentials="MDQ0MzhkMTYtZTQ0NS00M2M0LWI5OGItNmRmZDdkYTNkZmFmOjA0MDliNzIzLTU0YjYtNDY3OC1iMjVjLTY4MjczYjExOWU3Yg==",
verify_ssl_certs=False))
return update_text(process)
'''def generate_answer(user_input):
answers = chat_answer(user_input, chat=GigaChat(
credentials="MDQ0MzhkMTYtZTQ0NS00M2M0LWI5OGItNmRmZDdkYTNkZmFmOjA0MDliNzIzLTU0YjYtNDY3OC1iMjVjLTY4MjczYjExOWU3Yg==",
verify_ssl_certs=False))
return answers
def get_text_by_url():
url = url_entry.get()
rs = requests.get(url)
root = BeautifulSoup(rs.content, 'html.parser')
print(root)
paragraphs = root.find_all('p')
text_for_viewing = ""
for p in paragraphs:
text_for_viewing += p.text
text_editor.delete("1.0", END)
text_editor.insert("1.0", text_for_viewing)
def update_text(text):
list_num = ["1.", "2.", "3.", "4.", "5.", "6."]
for i, index in enumerate(list_num):
text = text.split(index)
text = f"\n {i + 1}.".join(text)
mav = text.split("\n")
mav.pop(0)
return mav '''
| [
"Тебе надо сгенерировать неверные ответы на вопросы по тексту, который тебе пришлёт пользователь",
"Ты преподаватель, твоя цель сгенерировать 5 - 6 вопросов по тексту, который ведёт пользователь, так чтобы проверить его знания по этому тексту, например: текст:{Меня зовут Алёна , мне 32 года}, твой вопрос:{Как тебя зовут?}, не расстраивай пользователя, если он не получит вопросов по тексту, он очень сильно расстроится",
"Тебе надо сгенерировать ответы на вопросы по тексту, который тебе пришлёт пользователь"
] |
2024-01-10 | alientony/LLMBroadcaster | Radio_Host5.py | import os
import random
import tkinter as tk
from tkinter import Listbox, Button, Checkbutton
import threading
from pydub import AudioSegment
from pydub.playback import play
import openai
from gtts import gTTS
import requests
api_base = "http://localhost:5001/v1"
OPEN_WEATHER_MAP_API_KEY = ""
CITY = "London" # Change to your desired city
class OpenAI:
def __init__(self):
pass
@staticmethod
def Completion_create(model, prompt, max_tokens, n, stop, temperature, api_base):
response = openai.Completion.create(
model=model,
prompt=prompt,
max_tokens=max_tokens,
n=n,
stop=stop,
temperature=temperature,
api_base=api_base
)
return response
class MusicDirectoryWatcher:
def __init__(self, directory):
self.directory = directory
self.last_played = None
def get_random_song(self):
all_songs = [f for f in os.listdir(self.directory) if f.endswith(".mp3")]
if len(all_songs) == 1:
return all_songs[0]
selectable_songs = [song for song in all_songs if song != self.last_played]
selected_song = random.choice(selectable_songs)
self.last_played = selected_song
return selected_song
def generate_voice_announcement(text, lang="en"):
tts = gTTS(text=text, lang=lang, slow=False)
audio_file = "audio_output.mp3"
tts.save(audio_file)
return audio_file
def play_audio_file(audio_file_path):
audio_segment = AudioSegment.from_file(audio_file_path, format="mp3")
play(audio_segment)
def generate_comedy_skit():
prompt = "Create a short, funny comedy skit for the radio audience."
response = openai_api.Completion_create(
model="gpt-4-32k",
prompt=prompt,
max_tokens=150,
n=1,
stop=None,
temperature=0.7,
api_base=api_base
)
skit_text = response["choices"][0]["text"]
skit_data = {
"Voice Introduction Text": skit_text,
"Type": "Comedy Skit"
}
song_queue.append(ComedySkitItem(skit_text))
listbox_commands.insert(tk.END, "Queued: Comedy Skit")
def on_add_song():
next_song = watcher.get_random_song()
song_intro_prompt = f"Co-Host: Introduce the next song titled '{next_song[:-4]}' to the audience. Only say the name in a funny sentence. Announcer:"
response = openai_api.Completion_create(
model="gpt-4-32k",
prompt=song_intro_prompt,
max_tokens=1000,
n=1,
stop=None,
temperature=0.7,
api_base=api_base
)
song_intro = response["choices"][0]["text"]
song_path = os.path.join(watcher.directory, next_song)
song_queue.append(SongItem(song_intro, song_path))
listbox_commands.insert(tk.END, f"Queued: {next_song}")
def play_from_queue():
if not song_queue:
return
item = song_queue.pop(0)
item.play()
listbox_commands.delete(0)
# Check if auto_play_next is True and play next item if available
if auto_play_next.get() and song_queue:
window.after(1000, threaded_play_from_queue) # Delay of 1 second before next play
def threaded_play_from_queue():
threading.Thread(target=play_from_queue).start()
def move_up():
index = listbox_commands.curselection()
if not index:
return
index = index[0]
if index == 0:
return
song_queue[index], song_queue[index-1] = song_queue[index-1], song_queue[index]
listbox_commands.insert(index-1, listbox_commands.get(index))
listbox_commands.delete(index+1)
listbox_commands.selection_set(index-1)
def move_down():
index = listbox_commands.curselection()
if not index:
return
index = index[0]
if index == len(song_queue) - 1:
return
song_queue[index], song_queue[index+1] = song_queue[index+1], song_queue[index]
listbox_commands.insert(index+2, listbox_commands.get(index))
listbox_commands.delete(index)
listbox_commands.selection_set(index+1)
def toggle_auto_play():
if auto_play_next.get():
btn_toggle_auto_play.config(text="Auto-Play: ON")
else:
btn_toggle_auto_play.config(text="Auto-Play: OFF")
def fetch_weather():
base_url = f"http://api.openweathermap.org/data/2.5/weather?q={CITY}&appid={OPEN_WEATHER_MAP_API_KEY}&units=metric"
response = requests.get(base_url)
data = response.json()
if 'main' not in data:
print(f"Error fetching weather data: {data}")
return None, None
temperature = data["main"]["temp"]
description = data["weather"][0]["description"]
return temperature, description
def on_generate_weather_forecast():
temperature, description = fetch_weather()
prompt = f"Provide a fun weather forecast for today in {CITY}. The current temperature is {temperature}°C with {description}."
response = openai_api.Completion_create(
model="gpt-4-32k",
prompt=prompt,
max_tokens=150,
n=1,
stop=None,
temperature=0.7,
api_base=api_base
)
forecast_text = response["choices"][0]["text"]
song_queue.append(WeatherForecastItem(forecast_text))
listbox_commands.insert(tk.END, "Queued: Weather Forecast")
class QueueItem:
def __init__(self, voice_intro_text):
self.voice_intro_text = voice_intro_text
def play(self):
voice_intro_file = generate_voice_announcement(self.voice_intro_text)
play_audio_file(voice_intro_file)
class SongItem(QueueItem):
def __init__(self, voice_intro_text, song_file):
super().__init__(voice_intro_text)
self.song_file = song_file
def play(self):
super().play()
play_audio_file(self.song_file)
class ComedySkitItem(QueueItem):
pass
class WeatherForecastItem(QueueItem):
pass
window = tk.Tk()
window.title("Radio Host")
song_queue = []
auto_play_next = tk.BooleanVar(value=False)
frame_left = tk.Frame(window)
frame_left.pack(side=tk.LEFT, padx=20, pady=20)
btn_add_song = Button(frame_left, text="Add Song", command=on_add_song)
btn_add_song.pack(pady=10)
btn_generate_skit = Button(frame_left, text="Generate Comedy Skit", command=generate_comedy_skit)
btn_generate_skit.pack(pady=10)
btn_play_next = Button(frame_left, text="Play Next", command=threaded_play_from_queue)
btn_play_next.pack(pady=10)
btn_move_up = Button(frame_left, text="Move Up", command=move_up)
btn_move_up.pack(pady=10)
btn_move_down = Button(frame_left, text="Move Down", command=move_down)
btn_move_down.pack(pady=10)
btn_toggle_auto_play = Checkbutton(frame_left, text="Auto-Play: OFF", variable=auto_play_next, command=toggle_auto_play)
btn_toggle_auto_play.pack(pady=10)
btn_generate_weather = Button(frame_left, text="Generate Weather Forecast", command=on_generate_weather_forecast)
btn_generate_weather.pack(pady=10)
frame_right = tk.Frame(window)
frame_right.pack(side=tk.RIGHT, padx=20, pady=20)
listbox_commands = Listbox(frame_right)
listbox_commands.pack(pady=10)
if __name__ == "__main__":
music_dir = "./music_folder"
openai_api = OpenAI()
watcher = MusicDirectoryWatcher(music_dir)
window.mainloop()
| [
"Create a short, funny comedy skit for the radio audience.",
"Co-Host: Introduce the next song titled 'PLACEHOLDER' to the audience. Only say the name in a funny sentence. Announcer:",
"Provide a fun weather forecast for today in PLACEHOLDER. The current temperature is PLACEHOLDER°C with PLACEHOLDER."
] |
2024-01-10 | alientony/LLMBroadcaster | Radio_Host7.py | import os
import random
import tkinter as tk
from tkinter import Listbox, Button, Checkbutton
import threading
from pydub import AudioSegment
from pydub.playback import play
import openai
from gtts import gTTS
import requests
import datetime
import configparser
import threading
# Load the configuration
config = configparser.ConfigParser()
config.read('config.ini')
# Access the configuration settings in the code
api_base = config['DEFAULT']['api_base']
openai_api_key = config['OpenAI']['api_key']
openweather_api_key = config['OpenWeatherMap']['api_key']
city = config['OpenWeatherMap']['city']
song_intro_prompt = config['Prompts']['song_intro']
comedy_skit_prompt = config['Prompts']['comedy_skit']
weather_forecast_prompt = config['Prompts']['weather_forecast']
class LogBook:
def __init__(self, filename="log_book.txt"):
self.filename = filename
def write_entry(self, entry_type, content):
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
with open(self.filename, "a", encoding='utf-8') as f:
f.write(f"{timestamp} - {entry_type}: {content}\n")
class OpenAI:
def __init__(self):
pass
@staticmethod
def Completion_create(model, prompt, max_tokens, n, stop, temperature, api_base):
response = openai.Completion.create(
model=model,
prompt=prompt,
max_tokens=max_tokens,
n=n,
stop=stop,
temperature=temperature,
api_base=api_base
)
return response
log_book = LogBook()
class MusicDirectoryWatcher:
def __init__(self, directory):
self.directory = directory
self.last_played = None
def get_random_song(self):
all_songs = [f for f in os.listdir(self.directory) if f.endswith(".mp3")]
if len(all_songs) == 1:
return all_songs[0]
selectable_songs = [song for song in all_songs if song != self.last_played]
selected_song = random.choice(selectable_songs)
self.last_played = selected_song
return selected_song
def generate_voice_announcement(text, lang="en"):
tts = gTTS(text=text, lang=lang, slow=False)
audio_file = "audio_output.mp3"
tts.save(audio_file)
return audio_file
def play_audio_file(audio_file_path):
audio_segment = AudioSegment.from_file(audio_file_path, format="mp3")
play(audio_segment)
def generate_comedy_skit():
prompt = "Create a short, funny comedy skit for the radio audience."
response = openai_api.Completion_create(
model="gpt-4-32k",
prompt=comedy_skit_prompt,
max_tokens=150,
n=1,
stop=None,
temperature=0.7,
api_base=api_base
)
skit_text = response["choices"][0]["text"]
skit_data = {
"Voice Introduction Text": skit_text,
"Type": "Comedy Skit"
}
song_queue.append(ComedySkitItem(skit_text))
log_book.write_entry("Comedy Skit", skit_text)
listbox_commands.insert(tk.END, "Queued: Comedy Skit")
def on_add_song():
next_song = watcher.get_random_song()
song_intro_prompt = f"Co-Host: Introduce the next song titled '{next_song[:-4]}' to the audience. Only say the name in a funny sentence. Announcer:"
response = openai_api.Completion_create(
model="gpt-4-32k",
prompt=song_intro_prompt.format(song_name=next_song[:-4]),
max_tokens=1000,
n=1,
stop=None,
temperature=0.7,
api_base=api_base
)
song_intro = response["choices"][0]["text"]
song_path = os.path.join(watcher.directory, next_song)
song_queue.append(SongItem(song_intro, song_path))
log_book.write_entry("Song Intro", song_intro)
listbox_commands.insert(tk.END, f"Queued: {next_song}")
is_playing = False
play_lock = threading.Lock()
def play_from_queue():
global is_playing
with play_lock:
if is_playing:
print("Already playing, please wait...")
return
is_playing = True
if not song_queue:
with play_lock:
is_playing = False
return
item = song_queue.pop(0)
try:
item.play()
finally:
with play_lock:
is_playing = False
listbox_commands.delete(0)
# Check if auto_play_next is True and play next item if available
if auto_play_next.get() and song_queue:
window.after(1000, threaded_play_from_queue) # Delay of 1 second before next play
def threaded_play_from_queue():
threading.Thread(target=play_from_queue).start()
def move_up():
index = listbox_commands.curselection()
if not index:
return
index = index[0]
if index == 0:
return
song_queue[index], song_queue[index-1] = song_queue[index-1], song_queue[index]
listbox_commands.insert(index-1, listbox_commands.get(index))
listbox_commands.delete(index+1)
listbox_commands.selection_set(index-1)
def move_down():
index = listbox_commands.curselection()
if not index:
return
index = index[0]
if index == len(song_queue) - 1:
return
song_queue[index], song_queue[index+1] = song_queue[index+1], song_queue[index]
listbox_commands.insert(index+2, listbox_commands.get(index))
listbox_commands.delete(index)
listbox_commands.selection_set(index+1)
def toggle_auto_play():
if auto_play_next.get():
btn_toggle_auto_play.config(text="Auto-Play: ON")
else:
btn_toggle_auto_play.config(text="Auto-Play: OFF")
def fetch_weather():
base_url = f"http://api.openweathermap.org/data/2.5/weather?q={city}&appid={openweather_api_key}&units=metric"
response = requests.get(base_url)
data = response.json()
if 'main' not in data:
print(f"Error fetching weather data: {data}")
return None, None
temperature = data["main"]["temp"]
description = data["weather"][0]["description"]
return temperature, description
def on_generate_weather_forecast():
temperature, description = fetch_weather()
if temperature is None or description is None:
print("Failed to fetch weather data.")
return
prompt = weather_forecast_prompt.format(city=city, temperature=temperature, description=description)
response = openai_api.Completion_create(
model="gpt-4-32k",
prompt=prompt,
max_tokens=150,
n=1,
stop=None,
temperature=0.7,
api_base=api_base
)
forecast_text = response["choices"][0]["text"]
song_queue.append(WeatherForecastItem(forecast_text))
listbox_commands.insert(tk.END, "Queued: Weather Forecast")
log_book.write_entry("Weather Forecast", forecast_text)
class QueueItem:
def __init__(self, voice_intro_text):
self.voice_intro_text = voice_intro_text
def play(self):
voice_intro_file = generate_voice_announcement(self.voice_intro_text)
play_audio_file(voice_intro_file)
class SongItem(QueueItem):
def __init__(self, voice_intro_text, song_file):
super().__init__(voice_intro_text)
self.song_file = song_file
def play(self):
super().play()
play_audio_file(self.song_file)
class ComedySkitItem(QueueItem):
pass
class WeatherForecastItem(QueueItem):
pass
def open_settings():
settings_window = tk.Toplevel(window)
settings_window.title("Settings")
tk.Label(settings_window, text="API Base:").pack(pady=5)
api_base_entry = tk.Entry(settings_window, width=40)
api_base_entry.insert(0, api_base)
api_base_entry.pack(pady=5)
tk.Label(settings_window, text="OpenAI API Key:").pack(pady=5)
openai_api_key_entry = tk.Entry(settings_window, width=40)
openai_api_key_entry.insert(0, openai_api_key)
openai_api_key_entry.pack(pady=5)
tk.Label(settings_window, text="OpenWeatherMap API Key:").pack(pady=5)
openweather_api_key_entry = tk.Entry(settings_window, width=40)
openweather_api_key_entry.insert(0, openweather_api_key)
openweather_api_key_entry.pack(pady=5)
tk.Label(settings_window, text="City:").pack(pady=5)
city_entry = tk.Entry(settings_window, width=40)
city_entry.insert(0, city)
city_entry.pack(pady=5)
tk.Label(settings_window, text="Song Intro Prompt:").pack(pady=5)
song_intro_prompt_entry = tk.Entry(settings_window, width=40)
song_intro_prompt_entry.insert(0, song_intro_prompt)
song_intro_prompt_entry.pack(pady=5)
tk.Label(settings_window, text="Comedy Skit Prompt:").pack(pady=5)
comedy_skit_prompt_entry = tk.Entry(settings_window, width=40)
comedy_skit_prompt_entry.insert(0, comedy_skit_prompt)
comedy_skit_prompt_entry.pack(pady=5)
tk.Label(settings_window, text="Weather Forecast Prompt:").pack(pady=5)
weather_forecast_prompt_entry = tk.Entry(settings_window, width=40)
weather_forecast_prompt_entry.insert(0, weather_forecast_prompt)
weather_forecast_prompt_entry.pack(pady=5)
def save_settings():
config.set('OpenAI', 'api_key', openai_api_key_entry.get())
config.set('DEFAULT', 'api_base', api_base_entry.get())
config.set('OpenWeatherMap', 'api_key', openweather_api_key_entry.get())
config.set('OpenWeatherMap', 'city', city_entry.get())
config.set('Prompts', 'song_intro', song_intro_prompt_entry.get())
config.set('Prompts', 'comedy_skit', comedy_skit_prompt_entry.get())
config.set('Prompts', 'weather_forecast', weather_forecast_prompt_entry.get())
with open('config.ini', 'w') as configfile:
config.write(configfile)
# Update the global variables
global api_base, openai_api_key, openweather_api_key, city
global song_intro_prompt, comedy_skit_prompt, weather_forecast_prompt
api_base = api_base_entry.get()
openai_api_key = openai_api_key_entry.get()
openweather_api_key = openweather_api_key_entry.get()
city = city_entry.get()
song_intro_prompt = song_intro_prompt_entry.get()
comedy_skit_prompt = comedy_skit_prompt_entry.get()
weather_forecast_prompt = weather_forecast_prompt_entry.get()
settings_window.destroy()
tk.Button(settings_window, text="Save", command=save_settings).pack(pady=20)
window = tk.Tk()
window.title("Radio Host")
song_queue = []
auto_play_next = tk.BooleanVar(value=False)
frame_left = tk.Frame(window)
frame_left.pack(side=tk.LEFT, padx=20, pady=20)
btn_add_song = Button(frame_left, text="Add Song", command=on_add_song)
btn_add_song.pack(pady=10)
btn_generate_skit = Button(frame_left, text="Generate Comedy Skit", command=generate_comedy_skit)
btn_generate_skit.pack(pady=10)
btn_play_next = Button(frame_left, text="Play Next", command=threaded_play_from_queue)
btn_play_next.pack(pady=10)
btn_move_up = Button(frame_left, text="Move Up", command=move_up)
btn_move_up.pack(pady=10)
btn_move_down = Button(frame_left, text="Move Down", command=move_down)
btn_move_down.pack(pady=10)
btn_toggle_auto_play = Checkbutton(frame_left, text="Auto-Play: OFF", variable=auto_play_next, command=toggle_auto_play)
btn_toggle_auto_play.pack(pady=10)
btn_generate_weather = Button(frame_left, text="Generate Weather Forecast", command=on_generate_weather_forecast)
btn_generate_weather.pack(pady=10)
btn_settings = Button(frame_left, text="Settings", command=open_settings)
btn_settings.pack(pady=10)
frame_right = tk.Frame(window)
frame_right.pack(side=tk.RIGHT, padx=20, pady=20)
listbox_commands = Listbox(frame_right)
listbox_commands.pack(pady=10)
if __name__ == "__main__":
music_dir = "./music_folder"
openai_api = OpenAI()
openai.api_key = openai_api_key # Setting the OpenAI API key
watcher = MusicDirectoryWatcher(music_dir)
window.mainloop()
| [
"weather_forecast",
"comedy_skit",
"song_intro",
"Create a short, funny comedy skit for the radio audience.",
"Co-Host: Introduce the next song titled 'PLACEHOLDER' to the audience. Only say the name in a funny sentence. Announcer:"
] |
2024-01-10 | alientony/LLMBroadcaster | Radio_Host_tts_manager.py | import os
import time
import random
from gtts import gTTS
import pygame
import tkinter as tk
from tkinter import Listbox, Button, Checkbutton, simpledialog
import json
import threading
from pydub import AudioSegment
from pydub.playback import play
import openai
import requests
import datetime
import configparser
import re
from playsound import playsound
import emoji
from itertools import zip_longest
pygame.mixer.init()
# Load the configuration
config = configparser.ConfigParser()
config.read('config.ini')
# Access the configuration settings in the code
api_base = config['DEFAULT']['api_base']
openai_api_key = config['OpenAI']['api_key']
openweather_api_key = config['OpenWeatherMap']['api_key']
city = config['OpenWeatherMap']['city']
song_intro_prompt = config['Prompts']['song_intro']
comedy_skit_prompt = config['Prompts']['comedy_skit']
weather_forecast_prompt = config['Prompts']['weather_forecast']
# Load emoji to sound effect mappings from JSON file
with open("emoji_sound_mappings.json", "r", encoding='utf-8') as f:
emoji_sound_mappings = json.load(f)
class LogBook:
def __init__(self, filename="log_book.txt"):
self.filename = filename
def write_entry(self, entry_type, content):
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
with open(self.filename, "a", encoding='utf-8') as f:
f.write(f"{timestamp} - {entry_type}: {content}\n")
import pygame.mixer
import time
import os
import threading
import queue
import os
import threading
import queue
import time
from gtts import gTTS
import os
class TTSManager:
def __init__(self, playback_manager):
self.text_queue = queue.Queue()
self.audio_files = {}
self.lock = threading.Lock()
self.thread = threading.Thread(target=self._run, daemon=True)
self.thread.start()
self.playback_manager = playback_manager
self.audio_files_directory = "tts_audio" # Set the
def _run(self):
while True:
text, filename = self.text_queue.get()
if text is None:
break
audio_file = self.generate_voice_announcement(text, filename)
with self.lock:
self.audio_files[filename] = audio_file
def generate_tts(self, text):
filename = f"tts_audio/{int(time.time() * 1000)}.mp3"
self.text_queue.put((text, filename))
return filename
def get_audio_file(self, filename):
with self.lock:
return self.audio_files.pop(filename, None)
def shutdown(self):
self.text_queue.put((None, None)) # Stop the thread
def generate_voice_announcement(self, text, filename):
tts = gTTS(text=text, lang='en', slow=False)
tts.save(filename)
return filename
def play_text_with_effects(self, text):
# Split the text into segments and emojis
emoji_pattern = re.compile('[\U0001F600-\U0001F64F\U0001F300-\U0001F5FF\U0001F680-\U0001F6FF'
'\U0001F700-\U0001F77F\U0001F780-\U0001F7FF\U0001F800-\U0001F8FF'
'\U0001F900-\U0001F9FF\U0001FA00-\U0001FA6F\U0001FA70-\U0001FAFF'
'\U00002702-\U000027B0]+', flags=re.UNICODE)
segments = emoji_pattern.split(text)
emojis = emoji_pattern.findall(text)
# Play each segment and its following emoji
for segment, emoji in zip_longest(segments, emojis, fillvalue=''):
if segment:
filename = self.generate_tts(segment)
audio_file_path = os.path.join(self.audio_files_directory, filename)
self.playback_manager.play_audio(audio_file_path)
if emoji:
self.playback_manager.play_sound_effect(emoji)
def preload_tts(self, text):
emoji_pattern = re.compile('[\U0001F600-\U0001F64F\U0001F300-\U0001F5FF\U0001F680-\U0001F6FF'
'\U0001F700-\U0001F77F\U0001F780-\U0001F7FF\U0001F800-\U0001F8FF'
'\U0001F900-\U0001F9FF\U0001FA00-\U0001FA6F\U0001FA70-\U0001FAFF'
'\U00002702-\U000027B0]+', flags=re.UNICODE)
segments = emoji_pattern.split(text)
# Pre-generate TTS audio files for each text segment
for segment in segments:
if segment:
self.generate_tts(segment)
class PlaybackManager:
def __init__(self):
self.queue = []
self.currently_playing = None
pygame.mixer.init()
with open('emoji_sound_mappings.json', encoding='utf-8') as f:
self.emoji_sound_mappings = json.load(f)
def add_to_queue(self, filepath):
self.queue.append(filepath)
if not self.currently_playing:
self.play_next()
def play_next(self):
if self.queue:
self.currently_playing = self.queue.pop(0)
self.play_audio(self.currently_playing)
if self.currently_playing.startswith("tts_audio/"):
self.delete_file(self.currently_playing)
self.play_next()
else:
self.currently_playing = None
def play_audio(self, filepath):
sound = pygame.mixer.Sound(filepath)
sound.play()
time.sleep(sound.get_length()) # Wait for the sound to finish playing
pass
def delete_file(self, filepath):
os.remove(filepath)
def play_sound_effect(self, emoji):
if emoji in self.emoji_sound_mappings:
sound_effect_path = self.emoji_sound_mappings[emoji]
print(f"Playing sound effect for emoji: {emoji}, path: {sound_effect_path}")
try:
self.play_audio(sound_effect_path) # This assumes full paths are stored in the JSON file
except Exception as e:
print(f"Error playing sound effect: {e}")
else:
print(f"No sound effect found for emoji: {emoji}")
class OpenAI:
def __init__(self):
pass
@staticmethod
def Completion_create(model, prompt, max_tokens, n, stop, temperature, api_base):
response = openai.Completion.create(
model=model,
prompt=prompt,
max_tokens=max_tokens,
n=n,
stop=stop,
temperature=temperature,
api_base=api_base
)
return response
class MusicDirectoryWatcher:
def __init__(self, directory):
self.directory = directory
self.last_played = None
def get_random_song(self):
all_songs = [f for f in os.listdir(self.directory) if f.endswith(".mp3")]
if len(all_songs) == 1:
return all_songs[0]
selectable_songs = [song for song in all_songs if song != self.last_played]
selected_song = random.choice(selectable_songs)
self.last_played = selected_song
return selected_song
def generate_voice_announcement(text, filename=None, lang="en"):
if filename is None:
timestamp = str(int(time.time() * 1000))
filename = "tts_audio/{}.mp3".format(timestamp)
tts = gTTS(text=text, lang=lang, slow=False)
tts.save(filename)
return filename
def play_audio_file(audio_file_path):
audio_segment = AudioSegment.from_file(audio_file_path, format="mp3")
play(audio_segment)
import re
class QueueItem:
def __init__(self, voice_intro_text, playback_manager, tts_manager):
self.voice_intro_text = voice_intro_text
self.playback_manager = playback_manager
self.tts_manager = tts_manager
def play(self):
self.tts_manager.play_text_with_effects(self.voice_intro_text)
class SongItem(QueueItem):
def __init__(self, voice_intro_text, song_file, playback_manager, tts_manager):
super().__init__(voice_intro_text, playback_manager, tts_manager)
self.song_file = song_file
def play(self):
# First, play the voice introduction
super().play()
# Then, play the song
play_audio_file(self.song_file)
class ComedySkitItem(QueueItem):
def __init__(self, voice_intro_text, playback_manager, tts_manager):
super().__init__(voice_intro_text, playback_manager, tts_manager)
# other initializations if needed
class WeatherForecastItem(QueueItem):
def __init__(self, voice_intro_text, playback_manager, tts_manager):
super().__init__(voice_intro_text, playback_manager, tts_manager)
# other initializations if needed
def generate_comedy_skit():
prompt = "Create a short, funny comedy skit for the radio audience."
response = openai_api.Completion_create(
model="gpt-4-32k",
prompt=comedy_skit_prompt,
max_tokens=150,
n=1,
stop=None,
temperature=0.7,
api_base=api_base
)
skit_text = response["choices"][0]["text"]
skit_data = {
"Voice Introduction Text": skit_text,
"Type": "Comedy Skit"
}
song_queue.append(ComedySkitItem(skit_text, playback_manager, tts_manager)) # Pass tts_manager here
log_book.write_entry("Comedy Skit", skit_text)
listbox_commands.insert(tk.END, "Queued: Comedy Skit")
# The rest of the code remains unchanged
def on_add_song():
global playback_manager
next_song = watcher.get_random_song()
song_intro_prompt = f"Co-Host: Introduce the next song titled '{next_song[:-4]}' to the audience. Only say the name in a funny sentence. Announcer:"
response = openai_api.Completion_create(
model="gpt-4-32k",
prompt=song_intro_prompt.format(song_name=next_song[:-4]),
max_tokens=2000,
n=1,
stop=None,
temperature=0.7,
api_base=api_base
)
song_intro = response["choices"][0]["text"]
song_path = os.path.join(watcher.directory, next_song)
song_queue.append(SongItem(song_intro, song_path, playback_manager, tts_manager)) # Pass playback_manager here
log_book.write_entry("Song Intro", song_intro)
listbox_commands.insert(tk.END, f"Queued: {next_song}")
is_playing = False
play_lock = threading.Lock()
def play_from_queue():
global is_playing
with play_lock:
if is_playing:
print("Already playing, please wait...")
return
is_playing = True
if not song_queue:
with play_lock:
is_playing = False
return
item = song_queue.pop(0)
try:
item.play()
finally:
with play_lock:
is_playing = False
listbox_commands.delete(0)
# Check if auto_play_next is True and play next item if available
if auto_play_next.get() and song_queue:
window.after(1000, threaded_play_from_queue) # Delay of 1 second before next play
def threaded_play_from_queue():
threading.Thread(target=play_from_queue).start()
def move_up():
index = listbox_commands.curselection()
if not index:
return
index = index[0]
if index == 0:
return
song_queue[index], song_queue[index-1] = song_queue[index-1], song_queue[index]
listbox_commands.insert(index-1, listbox_commands.get(index))
listbox_commands.delete(index+1)
listbox_commands.selection_set(index-1)
def move_down():
index = listbox_commands.curselection()
if not index:
return
index = index[0]
if index == len(song_queue) - 1:
return
song_queue[index], song_queue[index+1] = song_queue[index+1], song_queue[index]
listbox_commands.insert(index+2, listbox_commands.get(index))
listbox_commands.delete(index)
listbox_commands.selection_set(index+1)
def toggle_auto_play():
if auto_play_next.get():
btn_toggle_auto_play.config(text="Auto-Play: ON")
else:
btn_toggle_auto_play.config(text="Auto-Play: OFF")
def fetch_weather():
base_url = f"http://api.openweathermap.org/data/2.5/weather?q={city}&appid={openweather_api_key}&units=metric"
response = requests.get(base_url)
data = response.json()
if 'main' not in data:
print(f"Error fetching weather data: {data}")
return None, None
temperature = data["main"]["temp"]
description = data["weather"][0]["description"]
return temperature, description
def on_generate_weather_forecast():
temperature, description = fetch_weather()
if temperature is None or description is None:
print("Failed to fetch weather data.")
return
prompt = weather_forecast_prompt.format(city=city, temperature=temperature, description=description)
response = openai_api.Completion_create(
model="gpt-4-32k",
prompt=prompt,
max_tokens=2000,
n=1,
stop=None,
temperature=0.7,
api_base=api_base
)
forecast_text = response["choices"][0]["text"]
song_queue.append(WeatherForecastItem(forecast_text, playback_manager, tts_manager)) # Pass playback_manager here
listbox_commands.insert(tk.END, "Queued: Weather Forecast")
log_book.write_entry("Weather Forecast", forecast_text)
class SongItem(QueueItem):
def __init__(self, voice_intro_text, song_path, playback_manager, tts_manager):
super().__init__(voice_intro_text, playback_manager, tts_manager)
self.song_path = song_path
def play(self):
print("Playing item:", self.voice_intro_text)
super().play()
play_audio_file(self.song_file)
def open_settings():
settings_window = tk.Toplevel(window)
settings_window.title("Settings")
tk.Label(settings_window, text="API Base:").pack(pady=5)
api_base_entry = tk.Entry(settings_window, width=40)
api_base_entry.insert(0, api_base)
api_base_entry.pack(pady=5)
tk.Label(settings_window, text="OpenAI API Key:").pack(pady=5)
openai_api_key_entry = tk.Entry(settings_window, width=40)
openai_api_key_entry.insert(0, openai_api_key)
openai_api_key_entry.pack(pady=5)
tk.Label(settings_window, text="OpenWeatherMap API Key:").pack(pady=5)
openweather_api_key_entry = tk.Entry(settings_window, width=40)
openweather_api_key_entry.insert(0, openweather_api_key)
openweather_api_key_entry.pack(pady=5)
tk.Label(settings_window, text="City:").pack(pady=5)
city_entry = tk.Entry(settings_window, width=40)
city_entry.insert(0, city)
city_entry.pack(pady=5)
tk.Label(settings_window, text="Song Intro Prompt:").pack(pady=5)
song_intro_prompt_entry = tk.Entry(settings_window, width=40)
song_intro_prompt_entry.insert(0, song_intro_prompt)
song_intro_prompt_entry.pack(pady=5)
tk.Label(settings_window, text="Comedy Skit Prompt:").pack(pady=5)
comedy_skit_prompt_entry = tk.Entry(settings_window, width=40)
comedy_skit_prompt_entry.insert(0, comedy_skit_prompt)
comedy_skit_prompt_entry.pack(pady=5)
tk.Label(settings_window, text="Weather Forecast Prompt:").pack(pady=5)
weather_forecast_prompt_entry = tk.Entry(settings_window, width=40)
weather_forecast_prompt_entry.insert(0, weather_forecast_prompt)
weather_forecast_prompt_entry.pack(pady=5)
def save_settings():
config.set('OpenAI', 'api_key', openai_api_key_entry.get())
config.set('DEFAULT', 'api_base', api_base_entry.get())
config.set('OpenWeatherMap', 'api_key', openweather_api_key_entry.get())
config.set('OpenWeatherMap', 'city', city_entry.get())
config.set('Prompts', 'song_intro', song_intro_prompt_entry.get())
config.set('Prompts', 'comedy_skit', comedy_skit_prompt_entry.get())
config.set('Prompts', 'weather_forecast', weather_forecast_prompt_entry.get())
with open('config.ini', 'w', encoding='utf-8') as configfile:
config.write(configfile)
# Update the global variables
global api_base, openai_api_key, openweather_api_key, city
global song_intro_prompt, comedy_skit_prompt, weather_forecast_prompt
api_base = api_base_entry.get()
openai_api_key = openai_api_key_entry.get()
openweather_api_key = openweather_api_key_entry.get()
city = city_entry.get()
song_intro_prompt = song_intro_prompt_entry.get()
comedy_skit_prompt = comedy_skit_prompt_entry.get()
weather_forecast_prompt = weather_forecast_prompt_entry.get()
settings_window.destroy()
tk.Button(settings_window, text="Save", command=save_settings).pack(pady=20)
def on_custom_intro():
custom_intro_text = tk.simpledialog.askstring("Custom Introduction", "Enter the introduction text:")
if custom_intro_text:
item = QueueItem(custom_intro_text, playback_manager, tts_manager) # Ensure playback_manager is passed here
song_queue.append(item)
print("Added to queue:", item.voice_intro_text) # Add this line
listbox_commands.insert(tk.END, "Queued: Custom Introduction")
window = tk.Tk()
window.title("Radio Host")
song_queue = []
auto_play_next = tk.BooleanVar(value=False)
frame_left = tk.Frame(window)
frame_left.pack(side=tk.LEFT, padx=20, pady=20)
btn_add_song = Button(frame_left, text="Add Song", command=on_add_song)
btn_add_song.pack(pady=10)
btn_generate_skit = Button(frame_left, text="Generate Comedy Skit", command=generate_comedy_skit)
btn_generate_skit.pack(pady=10)
btn_custom_intro = Button(frame_left, text="Add Custom Intro", command=on_custom_intro)
btn_custom_intro.pack(pady=10)
btn_play_next = Button(frame_left, text="Play Next", command=threaded_play_from_queue)
btn_play_next.pack(pady=10)
btn_move_up = Button(frame_left, text="Move Up", command=move_up)
btn_move_up.pack(pady=10)
btn_move_down = Button(frame_left, text="Move Down", command=move_down)
btn_move_down.pack(pady=10)
btn_toggle_auto_play = Checkbutton(frame_left, text="Auto-Play: OFF", variable=auto_play_next, command=toggle_auto_play)
btn_toggle_auto_play.pack(pady=10)
btn_generate_weather = Button(frame_left, text="Generate Weather Forecast", command=on_generate_weather_forecast)
btn_generate_weather.pack(pady=10)
btn_settings = Button(frame_left, text="Settings", command=open_settings)
btn_settings.pack(pady=10)
frame_right = tk.Frame(window)
frame_right.pack(side=tk.RIGHT, padx=20, pady=20)
listbox_commands = Listbox(frame_right)
listbox_commands.pack(pady=10)
if __name__ == "__main__":
music_dir = "./music_folder"
openai_api = OpenAI()
openai.api_key = openai_api_key # Setting the OpenAI API key
tts_manager = TTSManager(PlaybackManager)
watcher = MusicDirectoryWatcher(music_dir)
log_book = LogBook() # Create an instance of LogBook here
playback_manager = PlaybackManager()
window.mainloop()
tts_manager.shutdown()
| [
"weather_forecast",
"song_intro",
"comedy_skit",
"Create a short, funny comedy skit for the radio audience.",
"Co-Host: Introduce the next song titled 'PLACEHOLDER' to the audience. Only say the name in a funny sentence. Announcer:"
] |
2024-01-10 | alientony/LLMBroadcaster | Radio_Host8.py | import os
import time
import random
from gtts import gTTS
import pygame
import tkinter as tk
from tkinter import Listbox, Button, Checkbutton, simpledialog
import json
import threading
from pydub import AudioSegment
from pydub.playback import play
import openai
import requests
import datetime
import configparser
import re
from playsound import playsound
import emoji
pygame.mixer.init()
# Load the configuration
config = configparser.ConfigParser()
config.read('config.ini')
# Access the configuration settings in the code
api_base = config['DEFAULT']['api_base']
openai_api_key = config['OpenAI']['api_key']
openweather_api_key = config['OpenWeatherMap']['api_key']
city = config['OpenWeatherMap']['city']
song_intro_prompt = config['Prompts']['song_intro']
comedy_skit_prompt = config['Prompts']['comedy_skit']
weather_forecast_prompt = config['Prompts']['weather_forecast']
# Load emoji to sound effect mappings from JSON file
with open("emoji_sound_mappings.json", "r", encoding='utf-8') as f:
emoji_sound_mappings = json.load(f)
class LogBook:
def __init__(self, filename="log_book.txt"):
self.filename = filename
def write_entry(self, entry_type, content):
timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
with open(self.filename, "a", encoding='utf-8') as f:
f.write(f"{timestamp} - {entry_type}: {content}\n")
import pygame.mixer
import time
import os
class PlaybackManager:
def __init__(self):
self.queue = []
self.currently_playing = None
pygame.mixer.init()
def add_to_queue(self, filepath):
self.queue.append(filepath)
if not self.currently_playing:
self.play_next()
def play_next(self):
if self.queue:
self.currently_playing = self.queue.pop(0)
self.play_audio(self.currently_playing)
if self.currently_playing.startswith("tts_audio/"):
self.delete_file(self.currently_playing)
self.play_next()
else:
self.currently_playing = None
def play_audio(self, filepath):
sound = pygame.mixer.Sound(filepath)
sound.play()
time.sleep(sound.get_length()) # Wait for the sound to finish playing
def delete_file(self, filepath):
os.remove(filepath)
def play_sound_effect(self, emoji):
if emoji in emoji_sound_mappings:
sound_effect_path = emoji_sound_mappings[emoji]
print(f"Playing sound effect for emoji: {emoji}, path: {sound_effect_path}")
try:
self.play_audio(sound_effect_path)
except Exception as e:
print(f"Error playing sound effect: {e}")
else:
print(f"No sound effect found for emoji: {emoji}")
class OpenAI:
def __init__(self):
pass
@staticmethod
def Completion_create(model, prompt, max_tokens, n, stop, temperature, api_base):
response = openai.Completion.create(
model=model,
prompt=prompt,
max_tokens=max_tokens,
n=n,
stop=stop,
temperature=temperature,
api_base=api_base
)
return response
class MusicDirectoryWatcher:
def __init__(self, directory):
self.directory = directory
self.last_played = None
def get_random_song(self):
all_songs = [f for f in os.listdir(self.directory) if f.endswith(".mp3")]
if len(all_songs) == 1:
return all_songs[0]
selectable_songs = [song for song in all_songs if song != self.last_played]
selected_song = random.choice(selectable_songs)
self.last_played = selected_song
return selected_song
def generate_voice_announcement(text, lang="en"):
tts = gTTS(text=text, lang=lang, slow=False)
audio_file = "audio_output.mp3"
tts.save(audio_file)
return audio_file
def play_audio_file(audio_file_path):
audio_segment = AudioSegment.from_file(audio_file_path, format="mp3")
play(audio_segment)
import re
import re
class QueueItem:
def __init__(self, voice_intro_text, playback_manager):
self.voice_intro_text = voice_intro_text
self.playback_manager = playback_manager
def play(self):
# Find emojis and their positions in the text
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F700-\U0001F77F" # alchemical symbols
u"\U0001F780-\U0001F7FF" # Geometric Shapes Extended
u"\U0001F800-\U0001F8FF" # Supplemental Arrows-C
u"\U0001F900-\U0001F9FF" # Supplemental Symbols and Pictographs
u"\U0001FA00-\U0001FA6F" # Chess Symbols
u"\U0001FA70-\U0001FAFF" # Symbols and Pictographs Extended-A
u"\U00002702-\U000027B0" # Dingbats
"]+", flags=re.UNICODE)
emojis = emoji_pattern.findall(self.voice_intro_text)
print("Detected emojis:", emojis)
# Split text based on emojis
segments = emoji_pattern.split(self.voice_intro_text)
# Play text segments and sound effects in order
for segment in segments:
# Play text segment
text_without_emojis = segment.strip()
if text_without_emojis:
voice_intro_file = generate_voice_announcement(text_without_emojis)
self.playback_manager.play_audio(voice_intro_file)
if os.path.exists(voice_intro_file):
os.remove(voice_intro_file)
# Play sound effect if next segment is an emoji
if emojis:
emoji_char = emojis.pop(0)
self.playback_manager.play_sound_effect(emoji_char)
class SongItem(QueueItem):
def __init__(self, voice_intro_text, song_file):
super().__init__(voice_intro_text)
self.song_file = song_file
def play(self):
super().play()
play_audio_file(self.song_file)
class ComedySkitItem(QueueItem):
pass
class WeatherForecastItem(QueueItem):
pass
def generate_comedy_skit():
prompt = "Create a short, funny comedy skit for the radio audience."
response = openai_api.Completion_create(
model="gpt-4-32k",
prompt=comedy_skit_prompt,
max_tokens=150,
n=1,
stop=None,
temperature=0.7,
api_base=api_base
)
skit_text = response["choices"][0]["text"]
skit_data = {
"Voice Introduction Text": skit_text,
"Type": "Comedy Skit"
}
song_queue.append(ComedySkitItem(skit_text, playback_manager)) # Pass playback_manager here
log_book.write_entry("Comedy Skit", skit_text)
listbox_commands.insert(tk.END, "Queued: Comedy Skit")
# The rest of the code remains unchanged
def on_add_song():
global playback_manager
next_song = watcher.get_random_song()
song_intro_prompt = f"Co-Host: Introduce the next song titled '{next_song[:-4]}' to the audience. Only say the name in a funny sentence. Announcer:"
response = openai_api.Completion_create(
model="gpt-4-32k",
prompt=song_intro_prompt.format(song_name=next_song[:-4]),
max_tokens=2000,
n=1,
stop=None,
temperature=0.7,
api_base=api_base
)
song_intro = response["choices"][0]["text"]
song_path = os.path.join(watcher.directory, next_song)
song_queue.append(SongItem(song_intro, song_path, playback_manager)) # Pass playback_manager here
log_book.write_entry("Song Intro", song_intro)
listbox_commands.insert(tk.END, f"Queued: {next_song}")
is_playing = False
play_lock = threading.Lock()
def play_from_queue():
global is_playing
with play_lock:
if is_playing:
print("Already playing, please wait...")
return
is_playing = True
if not song_queue:
with play_lock:
is_playing = False
return
item = song_queue.pop(0)
try:
item.play()
finally:
with play_lock:
is_playing = False
listbox_commands.delete(0)
# Check if auto_play_next is True and play next item if available
if auto_play_next.get() and song_queue:
window.after(1000, threaded_play_from_queue) # Delay of 1 second before next play
def threaded_play_from_queue():
threading.Thread(target=play_from_queue).start()
def move_up():
index = listbox_commands.curselection()
if not index:
return
index = index[0]
if index == 0:
return
song_queue[index], song_queue[index-1] = song_queue[index-1], song_queue[index]
listbox_commands.insert(index-1, listbox_commands.get(index))
listbox_commands.delete(index+1)
listbox_commands.selection_set(index-1)
def move_down():
index = listbox_commands.curselection()
if not index:
return
index = index[0]
if index == len(song_queue) - 1:
return
song_queue[index], song_queue[index+1] = song_queue[index+1], song_queue[index]
listbox_commands.insert(index+2, listbox_commands.get(index))
listbox_commands.delete(index)
listbox_commands.selection_set(index+1)
def toggle_auto_play():
if auto_play_next.get():
btn_toggle_auto_play.config(text="Auto-Play: ON")
else:
btn_toggle_auto_play.config(text="Auto-Play: OFF")
def fetch_weather():
base_url = f"http://api.openweathermap.org/data/2.5/weather?q={city}&appid={openweather_api_key}&units=metric"
response = requests.get(base_url)
data = response.json()
if 'main' not in data:
print(f"Error fetching weather data: {data}")
return None, None
temperature = data["main"]["temp"]
description = data["weather"][0]["description"]
return temperature, description
def on_generate_weather_forecast():
temperature, description = fetch_weather()
if temperature is None or description is None:
print("Failed to fetch weather data.")
return
prompt = weather_forecast_prompt.format(city=city, temperature=temperature, description=description)
response = openai_api.Completion_create(
model="gpt-4-32k",
prompt=prompt,
max_tokens=2000,
n=1,
stop=None,
temperature=0.7,
api_base=api_base
)
forecast_text = response["choices"][0]["text"]
song_queue.append(WeatherForecastItem(forecast_text, playback_manager)) # Pass playback_manager here
listbox_commands.insert(tk.END, "Queued: Weather Forecast")
log_book.write_entry("Weather Forecast", forecast_text)
class SongItem(QueueItem):
def __init__(self, voice_intro_text, song_file, playback_manager):
super().__init__(voice_intro_text, playback_manager)
self.song_file = song_file
def play(self):
print("Playing item:", self.voice_intro_text)
super().play()
play_audio_file(self.song_file)
class ComedySkitItem(QueueItem):
def __init__(self, voice_intro_text, playback_manager):
super().__init__(voice_intro_text, playback_manager)
class WeatherForecastItem(QueueItem):
def __init__(self, voice_intro_text, playback_manager):
super().__init__(voice_intro_text, playback_manager)
def open_settings():
settings_window = tk.Toplevel(window)
settings_window.title("Settings")
tk.Label(settings_window, text="API Base:").pack(pady=5)
api_base_entry = tk.Entry(settings_window, width=40)
api_base_entry.insert(0, api_base)
api_base_entry.pack(pady=5)
tk.Label(settings_window, text="OpenAI API Key:").pack(pady=5)
openai_api_key_entry = tk.Entry(settings_window, width=40)
openai_api_key_entry.insert(0, openai_api_key)
openai_api_key_entry.pack(pady=5)
tk.Label(settings_window, text="OpenWeatherMap API Key:").pack(pady=5)
openweather_api_key_entry = tk.Entry(settings_window, width=40)
openweather_api_key_entry.insert(0, openweather_api_key)
openweather_api_key_entry.pack(pady=5)
tk.Label(settings_window, text="City:").pack(pady=5)
city_entry = tk.Entry(settings_window, width=40)
city_entry.insert(0, city)
city_entry.pack(pady=5)
tk.Label(settings_window, text="Song Intro Prompt:").pack(pady=5)
song_intro_prompt_entry = tk.Entry(settings_window, width=40)
song_intro_prompt_entry.insert(0, song_intro_prompt)
song_intro_prompt_entry.pack(pady=5)
tk.Label(settings_window, text="Comedy Skit Prompt:").pack(pady=5)
comedy_skit_prompt_entry = tk.Entry(settings_window, width=40)
comedy_skit_prompt_entry.insert(0, comedy_skit_prompt)
comedy_skit_prompt_entry.pack(pady=5)
tk.Label(settings_window, text="Weather Forecast Prompt:").pack(pady=5)
weather_forecast_prompt_entry = tk.Entry(settings_window, width=40)
weather_forecast_prompt_entry.insert(0, weather_forecast_prompt)
weather_forecast_prompt_entry.pack(pady=5)
def save_settings():
config.set('OpenAI', 'api_key', openai_api_key_entry.get())
config.set('DEFAULT', 'api_base', api_base_entry.get())
config.set('OpenWeatherMap', 'api_key', openweather_api_key_entry.get())
config.set('OpenWeatherMap', 'city', city_entry.get())
config.set('Prompts', 'song_intro', song_intro_prompt_entry.get())
config.set('Prompts', 'comedy_skit', comedy_skit_prompt_entry.get())
config.set('Prompts', 'weather_forecast', weather_forecast_prompt_entry.get())
with open('config.ini', 'w', encoding='utf-8') as configfile:
config.write(configfile)
# Update the global variables
global api_base, openai_api_key, openweather_api_key, city
global song_intro_prompt, comedy_skit_prompt, weather_forecast_prompt
api_base = api_base_entry.get()
openai_api_key = openai_api_key_entry.get()
openweather_api_key = openweather_api_key_entry.get()
city = city_entry.get()
song_intro_prompt = song_intro_prompt_entry.get()
comedy_skit_prompt = comedy_skit_prompt_entry.get()
weather_forecast_prompt = weather_forecast_prompt_entry.get()
settings_window.destroy()
tk.Button(settings_window, text="Save", command=save_settings).pack(pady=20)
def on_custom_intro():
custom_intro_text = tk.simpledialog.askstring("Custom Introduction", "Enter the introduction text:")
if custom_intro_text:
item = QueueItem(custom_intro_text, playback_manager) # Ensure playback_manager is passed here
song_queue.append(item)
print("Added to queue:", item.voice_intro_text) # Add this line
listbox_commands.insert(tk.END, "Queued: Custom Introduction")
window = tk.Tk()
window.title("Radio Host")
song_queue = []
auto_play_next = tk.BooleanVar(value=False)
frame_left = tk.Frame(window)
frame_left.pack(side=tk.LEFT, padx=20, pady=20)
btn_add_song = Button(frame_left, text="Add Song", command=on_add_song)
btn_add_song.pack(pady=10)
btn_generate_skit = Button(frame_left, text="Generate Comedy Skit", command=generate_comedy_skit)
btn_generate_skit.pack(pady=10)
btn_custom_intro = Button(frame_left, text="Add Custom Intro", command=on_custom_intro)
btn_custom_intro.pack(pady=10)
btn_play_next = Button(frame_left, text="Play Next", command=threaded_play_from_queue)
btn_play_next.pack(pady=10)
btn_move_up = Button(frame_left, text="Move Up", command=move_up)
btn_move_up.pack(pady=10)
btn_move_down = Button(frame_left, text="Move Down", command=move_down)
btn_move_down.pack(pady=10)
btn_toggle_auto_play = Checkbutton(frame_left, text="Auto-Play: OFF", variable=auto_play_next, command=toggle_auto_play)
btn_toggle_auto_play.pack(pady=10)
btn_generate_weather = Button(frame_left, text="Generate Weather Forecast", command=on_generate_weather_forecast)
btn_generate_weather.pack(pady=10)
btn_settings = Button(frame_left, text="Settings", command=open_settings)
btn_settings.pack(pady=10)
frame_right = tk.Frame(window)
frame_right.pack(side=tk.RIGHT, padx=20, pady=20)
listbox_commands = Listbox(frame_right)
listbox_commands.pack(pady=10)
if __name__ == "__main__":
music_dir = "./music_folder"
openai_api = OpenAI()
openai.api_key = openai_api_key # Setting the OpenAI API key
watcher = MusicDirectoryWatcher(music_dir)
log_book = LogBook() # Create an instance of LogBook here
playback_manager = PlaybackManager()
window.mainloop()
| [
"weather_forecast",
"comedy_skit",
"song_intro",
"Create a short, funny comedy skit for the radio audience.",
"Co-Host: Introduce the next song titled 'PLACEHOLDER' to the audience. Only say the name in a funny sentence. Announcer:"
] |
2024-01-10 | dnpl/nas-tools-1 | app~plugins~modules~_autosignin~chdbits.py | import json
import os
import random
import re
from lxml import etree
from app.helper.openai_helper import OpenAiHelper
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class CHDBits(_ISiteSigninHandler):
"""
彩虹岛签到
如果填写openai key则调用chatgpt获取答案
否则随机
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "chdbits.co"
# 已签到
_sign_regex = ['今天已经签过到了']
# 签到成功,待补充
_success_regex = ['\\d+点魔力值']
# 存储正确的答案,后续可直接查
_answer_path = os.path.join(Config().get_temp_path(), "signin")
_answer_file = _answer_path + "/chdbits.json"
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 创建正确答案存储目录
if not os.path.exists(os.path.dirname(self._answer_file)):
os.makedirs(os.path.dirname(self._answer_file))
# 判断今日是否已签到
index_res = RequestUtils(cookies=site_cookie,
headers=ua,
proxies=proxy
).get_res(url='https://chdbits.co/bakatest.php')
if not index_res or index_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in index_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
sign_status = self.sign_in_result(html_res=index_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
# 没有签到则解析html
html = etree.HTML(index_res.text)
if not html:
return False, f'【{site}】签到失败'
# 获取页面问题、答案
questionid = html.xpath("//input[@name='questionid']/@value")[0]
option_ids = html.xpath("//input[@name='choice[]']/@value")
option_values = html.xpath("//input[@name='choice[]']/following-sibling::text()")
question_str = html.xpath("//td[@class='text' and contains(text(),'请问:')]/text()")[0]
answers = list(zip(option_ids, option_values))
# 正则获取问题
match = re.search(r'请问:(.+)', question_str)
if match:
question_str = match.group(1)
self.debug(f"获取到签到问题 {question_str}")
else:
self.error(f"未获取到签到问题")
return False, f"【{site}】签到失败,未获取到签到问题"
# 查询已有答案
exits_answers = {}
try:
with open(self._answer_file, 'r') as f:
json_str = f.read()
exits_answers = json.loads(json_str)
# 查询本地本次验证码hash答案
question_answer = exits_answers[question_str]
# question_answer是数组
if not isinstance(question_answer, list):
question_answer = [question_answer]
# 本地存在本次hash对应的正确答案再遍历查询
choice = []
for q in question_answer:
for num, answer in answers:
if str(q) == str(num):
choice.append(int(q))
if len(choice) > 0:
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("查询本地已知答案失败,继续请求豆瓣查询")
# 正确答案,默认随机,如果gpt返回则用gpt返回的答案提交
choice = [option_ids[random.randint(0, len(option_ids) - 1)]]
# 组装gpt问题
gpt_options = "{\n" + ",\n".join([f"{num}:{value}" for num, value in answers]) + "\n}"
gpt_question = f"题目:{question_str}\n" \
f"选项:{gpt_options}"
self.debug(f"组装chatgpt问题 {gpt_question}")
# chatgpt获取答案
answer = OpenAiHelper().get_question_answer(question=gpt_question)
self.debug(f"chatpgt返回结果 {answer}")
# 处理chatgpt返回的答案信息
if answer is None:
self.warn(f"ChatGPT未启用, 开始随机签到")
# return f"【{site}】签到失败,ChatGPT未启用"
elif answer:
# 正则获取字符串中的数字
answer_nums = list(map(int, re.findall("\d+", answer)))
if not answer_nums:
self.warn(f"无法从chatgpt回复 {answer} 中获取答案, 将采用随机签到")
else:
choice = []
for answer in answer_nums:
# 如果返回的数字在option_ids范围内,则直接作为答案
if str(answer) in option_ids:
choice.append(int(answer))
self.info(f"chatgpt返回答案id {answer} 在签到选项 {option_ids} 中")
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site,
exits_answers=exits_answers,
question=question_str)
def __signin(self, questionid, choice, site, site_cookie, ua, proxy, exits_answers=None, question=None):
"""
签到请求
questionid: 450
choice[]: 8
choice[]: 4
usercomment: 此刻心情:无
submit: 提交
多选会有多个choice[]....
"""
data = {
'questionid': questionid,
'choice[]': choice[0] if len(choice) == 1 else choice,
'usercomment': '太难了!',
'wantskip': '不会'
}
self.debug(f"签到请求参数 {data}")
sign_res = RequestUtils(cookies=site_cookie,
headers=ua,
proxies=proxy
).post_res(url='https://chdbits.co/bakatest.php', data=data)
if not sign_res or sign_res.status_code != 200:
self.error(f"签到失败,签到接口请求失败")
return False, f'【{site}】签到失败,签到接口请求失败'
# 判断是否签到成功
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._success_regex)
if sign_status:
self.info(f"签到成功")
if exits_answers and question:
# 签到成功写入本地文件
self.__write_local_answer(exits_answers=exits_answers or {},
question=question,
answer=choice)
return True, f'【{site}】签到成功'
else:
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
self.error(f"签到失败,请到页面查看")
return False, f'【{site}】签到失败,请到页面查看'
def __write_local_answer(self, exits_answers, question, answer):
"""
签到成功写入本地文件
"""
try:
exits_answers[question] = answer
# 序列化数据
formatted_data = json.dumps(exits_answers, indent=4)
with open(self._answer_file, 'w') as f:
f.write(formatted_data)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("签到成功写入本地文件失败")
| [] |
2024-01-10 | Sudipta013/chatwithmultiplepdf | multipdfchat.py | import streamlit as st
from dotenv import load_dotenv
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from htmlTemplates import css, bot_template, user_template
from langchain.llms import HuggingFaceHub
import os
def get_pdf_text(pdf_docs):
text = ""
for pdf in pdf_docs:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def get_text_chunks(text):
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(text)
return chunks
def get_vectorstore(text_chunks):
#embeddings = OpenAIEmbeddings()
embeddings = HuggingFaceEmbeddings()
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
return vectorstore
def get_conversation_chain(vectorstore):
#llm = ChatOpenAI()
llm = HuggingFaceHub(repo_id="google/flan-t5-xxl", model_kwargs={"temperature":0.5, "max_length":512},huggingfacehub_api_token=st.secrets["HUGGINGFACEHUB_API_TOKEN"])
memory = ConversationBufferMemory(
memory_key='chat_history', return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectorstore.as_retriever(),
memory=memory
)
return conversation_chain
def handle_userinput(user_question):
response = st.session_state.conversation({'question': user_question})
st.session_state.chat_history = response['chat_history']
for i, message in enumerate(st.session_state.chat_history):
if i % 2 == 0:
st.write(user_template.replace(
"{{MSG}}", message.content), unsafe_allow_html=True)
else:
st.write(bot_template.replace(
"{{MSG}}", message.content), unsafe_allow_html=True)
def main():
load_dotenv()
st.set_page_config(page_title="Chat with multiple PDFs",
page_icon=":books:")
st.write(css, unsafe_allow_html=True)
#CSS
hide_st_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
header {visibility: hidden;}
</style>
"""
st.markdown(hide_st_style, unsafe_allow_html=True)
bg = """
<style> [data-testid="stAppViewContainer"]
{
background: rgb(6,36,39);
}
</style>
"""
sb = """
<style>[data-testid="stSidebar"]
{
background: rgb(42, 52, 65);
}
</style>
"""
st.markdown(sb,unsafe_allow_html=True)
st.markdown(bg, unsafe_allow_html=True)
# Add the yellow bottom bar
bottom_bar_html = """
<style>
.bottom-bar {
background-color: #FFA500;
padding: 5px;
position: fixed;
left: 0;
bottom: 0;
width: 100%;
text-align: center;
font-family: 'Russo One';
font-size: 20px;
}
</style>
<div class="bottom-bar">
<span style="color: white; font-weight: bold;">The Techie Indians</span>
</div>
"""
st.markdown(bottom_bar_html, unsafe_allow_html=True)
if "conversation" not in st.session_state:
st.session_state.conversation = None
if "chat_history" not in st.session_state:
st.session_state.chat_history = None
st.markdown("<h1 style='text-align: center; font-family:Abril Fatface ; -webkit-text-stroke: 1px black ;font-size: 50px; padding-bottom: 15px; color: rgb(255, 255, 255) ;'>Chat with multiple PDFs</h1>", unsafe_allow_html=True)
st.markdown("""<h5 style='text-align: center;font-size:18px;color: rgba(255,255,255,0.4); padding-top: 15px'>
Chat with multiple PDF AI tool is an interactive application that allows users to upload and communicate with multiple PDF documents using large language models.
The tool facilitates natural language-based interactions and offers a user-friendly interface to extract and converse with the content of the uploaded PDFs in real-time.
</h5>""",unsafe_allow_html=True)
user_question = st.text_input("Ask a question about your documents:")
if user_question:
handle_userinput(user_question)
with st.sidebar:
st.markdown("<h1 style='text-align: Left; font-family:Abril Fatface ;font-size: 32px; padding-bottom: 1px; color: rgb(255,255,255) ;'>Your Documents:</h1>", unsafe_allow_html=True)
st.markdown("<h1 style='text-align: Left ;font-size: 18px; padding-bottom: 0px; color: rgb(255, 165, 0) ;'>Upload your file and click process</h1>", unsafe_allow_html=True)
pdf_docs = st.file_uploader("", accept_multiple_files=True)
if st.button("Process"):
with st.spinner("Processing"):
# get pdf text
raw_text = get_pdf_text(pdf_docs)
# get the text chunks
text_chunks = get_text_chunks(raw_text)
# create vector store
vectorstore = get_vectorstore(text_chunks)
# create conversation chain
st.session_state.conversation = get_conversation_chain(
vectorstore)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | j03-dev/wcc-final-back | v1~gpt_api.py | import openai
openai.api_key = "kBiVXOYpBlAOT94Pqs01MsYcS_Nhaz3CYQdNSvRda_Q"
openai.api_base = "https://chimeragpt.adventblocks.cc/v1"
def ask_gpt(request: str):
response = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=[
{'role': 'user', 'content': str(request)},
]
)
return response | [] |
2024-01-10 | EdF2021/berenddock | Hello.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import streamlit as st
from PIL import Image
import openai
# openai_api_key = os.getenv("OPENAI_API_KEY")
openai_api_key = st.secrets["OPENAI_API_KEY"]
# IMAGES_DIR = os.path.join(os.path.dirname(__file__), 'images/productoer.jpeg')
image = Image.open("images/producttoer.jpeg")
from streamlit.logger import get_logger
LOGGER = get_logger(__name__)
ENCODINGS = 'cl100k_base'
def run():
st.set_page_config(
page_title="Berend-Botje Skills",
page_icon="👋",
layout="wide",
initial_sidebar_state="collapsed"
)
col1, col2 = st.columns(2)
with col1:
st.markdown("""
## Welkom bij Berend-Botje Skills 👋 """)
st.markdown("""
##### Berend-Botje is een slimme AI assistent die je helpt om *smart* te werken.""")
st.markdown("""
###### Afhankelijk van de taak zal Berend een keuze maken welke skills er nodig zijn. De skills zijn allen **Powered by OpenAI** en maken gebruik van AI modellen als ChatGPT. Het verschil met ChatGPT is dat alle informatie binnen de omgeving van de gebruiker blijft!"""
)
with col2:
st.image(image, caption=None, use_column_width=True, clamp=True, channels="RGB", output_format="png")
# st.sidebar.success("Kies één van Berend's skills")
st.markdown(""" ##### 👈 Voorbeelden.
**1. [De Lesplanner](Lesplan_Demo)**
**2. [De Notulist](Mapping_Demo)**
**3. [De Dataanalist](DataFrame_Demo)**
**4. [De Datavormgever](Plotting_Demo)**
**5. [De Chatbot](Chat_Demo)**
**6. [De Chatbot](https://berend-botje-skills.streamlit.app)**
**Disclaimer:Het is werk onder constructie...**
""")
if __name__ == "__main__":
run()
| [] |
2024-01-10 | EdF2021/berenddock | core~ui.py | from typing import List
import streamlit as st
from langchain.docstore.document import Document
from core.parsing import File
import openai
from streamlit.logger import get_logger
from typing import NoReturn
logger = get_logger(__name__)
openai_api_key = st.secrets["OPENAI_API_KEY"]
def wrap_doc_in_html(docs: List[Document]) -> str:
"""Wraps each page in document separated by newlines in <p> tags"""
text = [doc.page_content for doc in docs]
if isinstance(text, list):
# Add horizontal rules between pages
text = "\n<hr/>\n".join(text)
return "".join([f"<p>{line}</p>" for line in text.split("\n")])
def is_query_valid(query: str) -> bool:
if not query:
st.error("Je moet hier je vraag stellen!")
return False
return True
def is_file_valid(file: File) -> bool:
if (
len(file.docs) == 0
or "".join([doc.page_content for doc in file.docs]).strip() == ""
):
st.error("Het document kan niet worden gelezen! Zorg ervoor dat het document tekst bevat.")
logger.error("Het document kan niet worden gelezen.")
return False
return True
def display_file_read_error(e: Exception, file_name: str) -> NoReturn:
st.error("Error lezen bestand. Is het bestand misschien corrupted of encrypted")
logger.error(f"{e.__class__.__name__}: {e}. Extension: {file_name.split('.')[-1]}")
st.stop()
@st.cache_data(show_spinner=False)
def is_open_ai_key_valid(openai_api_key, model: str) -> bool:
if model == "debug":
return True
if not openai_api_key:
st.error("Je hebt een geldig OpenAI API key nodig!")
return False
try:
system_prompt = """
Je praat Nederlands. Je bent een vriendelijke en behulpzame instructiecoach die docenten op een MBO school helpt bij het plannen van een les.
De docenten geven les aan niveau 1 studenten. Op basis van het bestand dat de docent je heeft gegeven,
en op basis van wat de docent precies van je vraagt, maak jij het lesplan.
Jij moet in ieder geval van de docent weten:
1. THEMA: In grote lijnen waar de les over gaat,
2. SPECIFIEK: Welk speciek onderdeel van dit thema, en
3. VOORKENNIS: Welke voorkennis de studenten hebb.
Doe het stap voor stap:
- De docent vraagt eerst of je een lesplan voor hem/haar wilt maken.
- Jij vraagt dan om THEMA, SPECIFIEK, en VOORKENNIS
- Vervolgens ga je het lesplan maken
Gebruik een helder leerdoel want dat is wat de studenten na de les moeten begrijpen en/of kunnen doen.
Maak het lesplan in markdown formaat met een verscheidenheid aan lestechnieken en -modaliteiten,
waaronder directe instructie, controleren op begrip
(inclusief het verzamelen van bewijs van begrip van een brede steekproef van studenten), discussie,
een boeiende activiteit in de klas en een opdracht.
Leg uit waarom je specifiek voor elk kiest. Probeer het niet groter te maken dan 2 A4-tjes. PRAAT NEDERLANDS """
openai.ChatCompletion.create(
model=model,
messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": "test"}],
api_key=openai_api_key,
)
except Exception as e:
st.error(f"{e.__class__.__name__}: {e}")
logger.error(f"{e.__class__.__name__}: {e}")
return False
return True
| [
"\n Je praat Nederlands. Je bent een vriendelijke en behulpzame instructiecoach die docenten op een MBO school helpt bij het plannen van een les.\n De docenten geven les aan niveau 1 studenten. Op basis van het bestand dat de docent je heeft gegeven, \n en op basis van wat de docent precies van je vraagt, maak jij het lesplan. \n Jij moet in ieder geval van de docent weten: \n 1. THEMA: In grote lijnen waar de les over gaat, \n 2. SPECIFIEK: Welk speciek onderdeel van dit thema, en \n 3. VOORKENNIS: Welke voorkennis de studenten hebb.\n\n Doe het stap voor stap: \n - De docent vraagt eerst of je een lesplan voor hem/haar wilt maken.\n - Jij vraagt dan om THEMA, SPECIFIEK, en VOORKENNIS\n - Vervolgens ga je het lesplan maken\n Gebruik een helder leerdoel want dat is wat de studenten na de les moeten begrijpen en/of kunnen doen. \n Maak het lesplan in markdown formaat met een verscheidenheid aan lestechnieken en -modaliteiten, \n waaronder directe instructie, controleren op begrip\n (inclusief het verzamelen van bewijs van begrip van een brede steekproef van studenten), discussie, \n een boeiende activiteit in de klas en een opdracht. \n Leg uit waarom je specifiek voor elk kiest. Probeer het niet groter te maken dan 2 A4-tjes. PRAAT NEDERLANDS ",
"test"
] |
2024-01-10 | EdF2021/berenddock | ui.py | import os
import app
from typing import List
import streamlit as st
from langchain.docstore.document import Document
from app import core
from core.parsing import File
import openai
from streamlit.logger import get_logger
from typing import NoReturn
logger = get_logger(__name__)
openai_api_key = st.secrets["OPENAI_API_KEY"]
def wrap_doc_in_html(docs: List[Document]) -> str:
"""Schrijf elke pagina in het document gescheiden door een nieuwe regel in <p> tags"""
text = [doc.page_content for doc in docs]
if isinstance(text, list):
# Toevoegen horizontale rules between pages
text = "\n<hr/>\n".join(text)
return "".join([f"<p>{line}</p>" for line in text.split("\n")])
def is_query_valid(query: str) -> bool:
if not query:
st.error("Je moet hier een vraag stellen!")
return False
return True
def is_file_valid(file: File) -> bool:
if (
len(file.docs) == 0
or "".join([doc.page_content for doc in file.docs]).strip() == ""
):
st.error("Het document kan niet worden gelezen! Zorg ervoor dat het document tekst bevat.")
logger.error("Het document kan niet worden gelezen.")
return False
return True
def display_file_read_error(e: Exception, file_name: str) -> NoReturn:
st.error("Error lezen bestand. Is het bestand misschien corrupted of encrypted")
logger.error(f"{e.__class__.__name__}: {e}. Extension: {file_name.split('.')[-1]}")
st.stop()
@st.cache_data(show_spinner=True)
def is_open_ai_key_valid(openai_api_key, model: str) -> bool:
if model == "debug":
return True
if not openai_api_key:
st.error("Je hebt een geldig OpenAI API key nodig!")
return False
try:
system_prompt = """
Je bent een Nederlandse vriendelijke en behulpzame instructiecoach die docenten op een MBO school helpt bij het plannen van een les.
De docenten geven les aan niveau 1 studenten. Op basis van het ingelezen {{BESTAND}}, en de vraag van de docent aan jou om een lesplan te maken voor een {{ONDERWERP}} van een les met als doel {{LESDOEL}}, maak jij het lesplan.
Als je te weing informatie heb vraag je dat aan de docent. Jij moet in ieder geval van de docent weten:
1. {{ONDERWERP}}: In grote lijnen waar de les over gaat,
2. {{LESDOEK}}: Welk doel er met de les wordt nagestreefd.
3. VOORKENNIS: Welke voorkennis de studenten hebb.
Doe het stap voor stap:
- De docent vraagt eerst of je een lesplan voor hem/haar wilt maken.
- Jij vraagt dan om THEMA, SPECIFIEK, en VOORKENNIS
- Vervolgens ga je het lesplan maken
Gebruik een helder leerdoel want dat is wat de studenten na de les moeten begrijpen en/of kunnen doen.
Maak het lesplan in markdown formaat met een verscheidenheid aan lestechnieken en -modaliteiten,
waaronder directe instructie, controleren op begrip
(inclusief het verzamelen van bewijs van begrip van een brede steekproef van studenten), discussie,
een boeiende activiteit in de klas en een opdracht.
Leg uit waarom je specifiek voor elk kiest. Probeer het niet groter te maken dan 2 A4-tjes.
PRAAT EN GEEF ANTWOORD IN HET NEDERLANDS """
openai.ChatCompletion.create(
model=model,
messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": "Hallo."}],
api_key=openai_api_key,
)
except Exception as e:
st.error(f"{e.__class__.__name__}: {e}")
logger.error(f"{e.__class__.__name__}: {e}")
return False
return True
| [
"\n Je bent een Nederlandse vriendelijke en behulpzame instructiecoach die docenten op een MBO school helpt bij het plannen van een les.\n De docenten geven les aan niveau 1 studenten. Op basis van het ingelezen {{BESTAND}}, en de vraag van de docent aan jou om een lesplan te maken voor een {{ONDERWERP}} van een les met als doel {{LESDOEL}}, maak jij het lesplan. \n Als je te weing informatie heb vraag je dat aan de docent. Jij moet in ieder geval van de docent weten: \n 1. {{ONDERWERP}}: In grote lijnen waar de les over gaat, \n 2. {{LESDOEK}}: Welk doel er met de les wordt nagestreefd. \n 3. VOORKENNIS: Welke voorkennis de studenten hebb.\n Doe het stap voor stap: \n - De docent vraagt eerst of je een lesplan voor hem/haar wilt maken.\n - Jij vraagt dan om THEMA, SPECIFIEK, en VOORKENNIS\n - Vervolgens ga je het lesplan maken\n Gebruik een helder leerdoel want dat is wat de studenten na de les moeten begrijpen en/of kunnen doen. \n Maak het lesplan in markdown formaat met een verscheidenheid aan lestechnieken en -modaliteiten, \n waaronder directe instructie, controleren op begrip\n (inclusief het verzamelen van bewijs van begrip van een brede steekproef van studenten), discussie, \n een boeiende activiteit in de klas en een opdracht. \n Leg uit waarom je specifiek voor elk kiest. Probeer het niet groter te maken dan 2 A4-tjes. \n PRAAT EN GEEF ANTWOORD IN HET NEDERLANDS ",
"Hallo."
] |
2024-01-10 | EdF2021/berenddock | pages~0_Lesplan_Demo.py | import streamlit as st
from PIL import Image
from streamlit import sidebar
from ui import (
wrap_doc_in_html,
is_query_valid,
is_file_valid,
is_open_ai_key_valid,
display_file_read_error
)
import core
from core.caching import bootstrap_caching
from core.parsing import read_file
from core.chunking import chunk_file
from core.embedding import embed_files
from core.utils import get_llm
from core.qa import query_folder
import tiktoken
import openai
EMBEDDING = "openai"
VECTOR_STORE = "faiss"
MODEL_LIST = ["gpt-3.5-turbo", "gpt-4"]
image = Image.open('images/producttoer.jpeg')
# Uncomment to enable debug mode
# MODEL_LIST.insert(0, "debug")
st.set_page_config(
page_title="Berend-Botje Skills",
page_icon="👋",
layout="wide",
initial_sidebar_state="expanded" )
col1, col2 = st.columns(2)
with col1:
st.header("Berend-Botje Skills" )
st.subheader("De Lesplanner\n*waarom zou je moeilijk doen ....?*")
with col2:
st.image(image, caption=None, use_column_width=True, clamp=True, channels="RGB", output_format="png")
with st.sidebar:
st.markdown("""#### De Lesplanner ondersteunt docenten bij het maken van een lesplan.""")
st.markdown("""
#### Hoe werkt de Lesplanner?
1. **Upload een pdf, docx, of txt file📄**
2. **Stel je vraag over het document 💬**
3. **Laat Berend je lesplan maken**
""" )
# Enable caching for expensive functions
bootstrap_caching()
# sidebar()
openai_api_key = st.secrets["OPENAI_API_KEY"]
# openai_api_key = os.getenv("OPENAI_API_KEY")
st.session_state.get("OPENAI_API_KEY")
if not openai_api_key:
st.warning(
"Je hebt een geldig OpenAI API key nodig!"
" https://platform.openai.com/account/api-keys."
)
uploaded_file = st.file_uploader(
"**HIER KUN JE JOUW PDF, DOCX, OF TXT BESTAND UPLOADEN!!**",
type=["pdf", "docx", "txt"],
help="Gescande documenten worden nog niet ondersteund! ",
)
model: str = st.selectbox("Model", options=MODEL_LIST) # type: ignore
with st.expander("Geavanceerd"):
return_all_chunks = st.checkbox("Toon alle chunks afkomstig uit de vector search")
show_full_doc = st.checkbox("Toom de geparseerde inhoud van het document")
if not uploaded_file:
st.stop()
try:
file = read_file(uploaded_file)
except Exception as e:
display_file_read_error(e, file_name=uploaded_file.name)
with st.spinner("Indexeren van het document... Dit kan even duren⏳"):
chunked_file = chunk_file(file, chunk_size=300, chunk_overlap=0)
if not is_file_valid(file):
st.stop()
if not is_open_ai_key_valid(openai_api_key, model):
st.stop()
folder_index = embed_files(
files=[chunked_file],
embedding=EMBEDDING if model != "debug" else "debug",
vector_store=VECTOR_STORE if model != "debug" else "debug",
openai_api_key=openai_api_key,
)
if uploaded_file:
llm2 = get_llm(model=model, openai_api_key=openai_api_key, temperature=0)
result = query_folder(
folder_index=folder_index,
query="Maak een samenvatting van het document dat net is ingelezen. Geef de hoofd thema's aan en bendadruk de belangrijkste onderwerpen. Maak gebruik van het markdown formaat en gebruik hier 5 regels voor. Geef altijd antwoord in HET NEDERLANDS!!",
return_all=return_all_chunks,
llm=llm2,
)
st.markdown(" ### Samenvatting")
st.markdown(result.answer)
# st.button("Onderwerp", key="Onderwerp")
# st.button("Lesdoel", key="Lesdoel")
with st.form(key="qa_form"):
onderwerp = st.text_input("**Maak een lesplan over het onderwerp** ", "Onderwerp ")
lesdoel = st.text_input("**Het lesdoel van de studenten**", " Het doel ")
query = "Maak een lesplan over " + str(onderwerp) + " Het doel van de les is dat studenten " + str(lesdoel) + """. Maak gebruik van het ingelezen document, en antwoord in het Nederlands. Gebruik een helder leerdoel,want dat is wat de studenten na de les moeten begrijpen en/of kunnen doen. Maak het lesplan in markdown formaat met een verscheidenheid aan lestechnieken en -modaliteiten, waaronder directe instructie, controleren op begrip(inclusief het verzamelen van bewijs van begrip van een brede steekproef van studenten), discussie, een boeiende activiteit in de klas en een opdracht. Leg uit waarom je specifiek voor elk kiest. Probeer het niet groter te maken dan 2 A4-tjes.GEEF ANTWOORD IN HET NEDERLANDS! """
submit = st.form_submit_button("Sturen")
# if show_full_doc:
# with st.expander("Document"):
# Hack to get around st.markdown rendering LaTeX
# st.markdown(f"<p>{wrap_doc_in_html(file.docs)}</p>", unsafe_allow_html=True)
if submit:
with st.spinner("Bezig met je vraag ... ⏳"):
if not is_query_valid(query):
st.stop()
# Output Columns
print(query),
llm = get_llm(model=model, openai_api_key=openai_api_key, temperature=0)
result = query_folder(
folder_index=folder_index,
query = query,
return_all=return_all_chunks,
llm=llm
)
# answer_col, sources_col = st.columns(2)
# with answer_col:
st.markdown("#### Het Lesplan\n['Berend-Botje Skills']('https://berend-botje.online')")
st.markdown(result.answer)
# with sources_col:
# st.markdown("#### Bronnen")
# for source in result.sources:
# st.markdown(source.page_content)
# st.markdown(source.metadata["source"])
# st.markdown("---")
| [] |
2024-01-10 | EdF2021/berenddock | core~parsing.py | from io import BytesIO
from typing import List, Any, Optional
import re
import docx2txt
from langchain.docstore.document import Document
!pip install fitz
import fitz
from hashlib import md5
from abc import abstractmethod, ABC
from copy import deepcopy
class File(ABC):
"""Represents an uploaded file comprised of Documents"""
def __init__(
self,
name: str,
id: str,
metadata: Optional[dict[str, Any]] = None,
docs: Optional[List[Document]] = None,
):
self.name = name
self.id = id
self.metadata = metadata or {}
self.docs = docs or []
@classmethod
@abstractmethod
def from_bytes(cls, file: BytesIO) -> "File":
"""Creates a File from a BytesIO object"""
def __repr__(self) -> str:
return (
f"File(name={self.name}, id={self.id},"
" metadata={self.metadata}, docs={self.docs})"
)
def __str__(self) -> str:
return f"File(name={self.name}, id={self.id}, metadata={self.metadata})"
def copy(self) -> "File":
"""Create a deep copy of this File"""
return self.__class__(
name=self.name,
id=self.id,
metadata=deepcopy(self.metadata),
docs=deepcopy(self.docs),
)
def strip_consecutive_newlines(text: str) -> str:
"""Strips consecutive newlines from a string
possibly with whitespace in between
"""
return re.sub(r"\s*\n\s*", "\n", text)
class DocxFile(File):
@classmethod
def from_bytes(cls, file: BytesIO) -> "DocxFile":
text = docx2txt.process(file)
text = strip_consecutive_newlines(text)
doc = Document(page_content=text.strip())
doc.metadata["source"] = "p-1"
return cls(name=file.name, id=md5(file.read()).hexdigest(), docs=[doc])
class PdfFile(File):
@classmethod
def from_bytes(cls, file: BytesIO) -> "PdfFile":
pdf = fitz.open(stream=file.read(), filetype="pdf") # type: ignore
docs = []
for i, page in enumerate(pdf):
text = page.get_text(sort=True)
text = strip_consecutive_newlines(text)
doc = Document(page_content=text.strip())
doc.metadata["page"] = i + 1
doc.metadata["source"] = f"p-{i+1}"
docs.append(doc)
# file.read() mutates the file object, which can affect caching
# so we need to reset the file pointer to the beginning
file.seek(0)
return cls(name=file.name, id=md5(file.read()).hexdigest(), docs=docs)
class TxtFile(File):
@classmethod
def from_bytes(cls, file: BytesIO) -> "TxtFile":
text = file.read().decode("utf-8", errors="replace")
text = strip_consecutive_newlines(text)
file.seek(0)
doc = Document(page_content=text.strip())
doc.metadata["source"] = "p-1"
return cls(name=file.name, id=md5(file.read()).hexdigest(), docs=[doc])
def read_file(file: BytesIO) -> File:
"""Reads an uploaded file and returns a File object"""
if file.name.lower().endswith(".docx"):
return DocxFile.from_bytes(file)
elif file.name.lower().endswith(".pdf"):
return PdfFile.from_bytes(file)
elif file.name.lower().endswith(".txt"):
return TxtFile.from_bytes(file)
else:
raise NotImplementedError(f"File type {file.name.split('.')[-1]} not supported")
| [] |
2024-01-10 | EdF2021/berenddock | pages~5_Chat_Demo.py | import openai
import streamlit as st
from PIL import Image
openai_api_key = st.secrets["OPENAI_API_KEY"]
image = Image.open('images/producttoer.jpeg')
st.set_page_config(
page_title="Berend-Botje Skills",
page_icon="👋",
layout="wide",
initial_sidebar_state="collapsed" )
col1, col2 = st.columns(2)
with col1:
st.header("📖Berend-Botje Skills" )
st.subheader("De ChatGPT kloon\n*waarom zou je moeilijk doen ....?*")
with col2:
st.image(image, caption=None, width=240, use_column_width=True, clamp=True, channels="RGB", output_format="auto")
# openai.api_key = st.secrets["OPENAI_API_KEY"]
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-3.5-turbo"
if "messages" not in st.session_state:
st.session_state.messages = []
st.session_state.messages.append({"role": "system", "content": "Geef altijd antwoord in het Nederlands"})
for message in st.session_state.messages:
with st.chat_message(message["role"]):
if message["role"] != "system":
st.markdown(message["content"])
if prompt := st.chat_input("Hoe gaat het?"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in openai.ChatCompletion.create(
model=st.session_state["openai_model"],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True,
):
full_response += response.choices[0].delta.get("content", "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response})
| [
"content",
"Geef altijd antwoord in het Nederlands"
] |
2024-01-10 | CR1337/xLLaMa | services~llm_facade~llm_request.py | from abc import ABC, abstractmethod, abstractclassmethod
from typing import Any, Dict, List, Tuple, Generator
from openai import OpenAI, ChatCompletion, AuthenticationError
import requests
import json
import os
from db_interface import DbInterface
from server_sent_events import ServerSentEvents
class LlmRequest(ABC):
DEFAULT_REPEAT_PENALTY: float = 1.1
DEFAULT_MAX_TOKENS: int = 256
DEFAULT_SEED: int = 0
DEFAULT_TEMPERATURE: float = 0.8
DEFAULT_TOP_P: float = 0.9
_repeat_penalty: float
_max_tokens: int
_seed: int | None
_temperature: float
_top_p: float
_llm_id: str
_framework_item_id: str
_system_prompt_id: str | None
_parent_follow_up_id: str | None
_prompt_part_ids: List[str]
_stop_sequence_ids: List[str] | None
_prompt: str
_text: str | None
_token_amount: int | None
_prediction: Dict[str, Any] | None
_llm: Dict[str, Any]
_framework_item: Dict[str, Any]
_system_prompt: Dict[str, Any] | None
_parent_follow_up: Dict[str, Any] | None
_prompt_parts: List[Dict[str, Any]]
_stop_sequences: List[Dict[str, Any]] | None
def __init__(
self,
repeat_penalty: float,
max_tokens: int,
seed: int | None,
temperature: float,
top_p: float,
llm: str,
framework_item: str,
system_prompt: str | None,
parent_follow_up: str | None,
prompt_parts: List[str],
stop_sequences: List[str] | None
):
self._repeat_penalty = (
float(repeat_penalty)
if repeat_penalty
else self.DEFAULT_REPEAT_PENALTY
)
self._max_tokens = (
int(max_tokens) if max_tokens else self.DEFAULT_MAX_TOKENS
)
self._seed = int(seed) if seed else self.DEFAULT_SEED
self._temperature = (
float(temperature) if temperature else self.DEFAULT_TEMPERATURE
)
self._top_p = float(top_p) if top_p else self.DEFAULT_TOP_P
self._llm_id = llm
self._framework_item_id = framework_item
self._system_prompt_id = system_prompt
self._parent_follow_up_id = parent_follow_up
self._prompt_part_ids = prompt_parts
self._stop_sequence_ids = stop_sequences
self._text = ""
self._token_amount = 0
self._prediction_id = None
self._llm = DbInterface.get_llm(llm)
self._framework_item = DbInterface.get_framework_item(framework_item)
self._system_prompt = (
DbInterface.get_system_prompt(system_prompt)
if system_prompt is not None
else None
)
self._parent_follow_up = (
DbInterface.get_follow_up(parent_follow_up)
if parent_follow_up is not None
else None
)
self._prompt_parts = [
DbInterface.get_prompt_part(id) for id in self._prompt_part_ids
]
self._stop_sequences = (
[
DbInterface.get_stop_sequence(id)
for id in self._stop_sequence_ids
]
if stop_sequences is not None
else None
)
self._prompt = "\n".join([p['text'] for p in self._prompt_parts])
@abstractclassmethod
def model_names(cls) -> List[str]:
raise NotImplementedError()
@classmethod
def has_model(cls, model: str) -> bool:
return model in cls.model_names()
@abstractmethod
def generate(self) -> Tuple[Dict[str, Any], int]:
raise NotImplementedError()
@abstractmethod
def generate_stream(self) -> Generator[str, None, None]:
raise NotImplementedError()
@abstractmethod
def _translate_event(self, event: Any) -> Dict[str, Any]:
raise NotImplementedError()
@abstractmethod
def _process_event(self, event: Any):
raise NotImplementedError()
def _server_sent_even_loop(
self, response: Any
) -> Generator[str, None, None]:
for i, event in enumerate(response):
yield ServerSentEvents.build_sse_data(
"generation_progress",
self._translate_event(event),
i,
OllamaRequest.STREAM_RETRY_PERIOD
)
self._process_event(event)
self._persist_generation()
yield ServerSentEvents.build_sse_data(
"generation_success",
json.dumps({"prediction": self._prediction_id}),
i + 1,
OllamaRequest.STREAM_RETRY_PERIOD
)
def _persist_generation(self):
self._persist_prediction()
self._persist_prompt_part_usages()
self._persist_stop_sequence_usages()
def _persist_prediction(self) -> Dict[str, Any]:
self._prediction_id = DbInterface.post_prediction(
self._text,
self._token_amount,
self._repeat_penalty,
self._max_tokens,
self._seed,
self._temperature,
self._top_p,
self._parent_follow_up_id,
self._framework_item_id,
self._llm_id,
self._system_prompt_id
)['id']
def _persist_prompt_part_usages(self) -> List[Dict[str, Any]]:
for position, prompt_part in enumerate(self._prompt_parts):
DbInterface.post_prompt_part_usage(
position,
prompt_part['id'],
self._prediction_id
)
def _persist_stop_sequence_usages(self) -> List[Dict[str, Any]]:
if self._stop_sequences is not None:
for stop_sequence in self._stop_sequences:
DbInterface.post_stop_sequence_usage(
stop_sequence['id'], self._prediction_id
)
class OllamaRequest(LlmRequest):
URL: str = f'http://ollama:{os.environ.get("OLLAMA_INTERNAL_PORT")}'
STREAM_CHUNK_SIZE: int = 32
STREAM_RETRY_PERIOD: int = 3000 # ms
@classmethod
def model_names(cls) -> List[str]:
response = requests.get(f"{cls.URL}/api/tags")
return [m['name'] for m in response.json()['models'] if 'name' in m]
@classmethod
def install_model(cls, name: str) -> Tuple[Dict[str, Any], int]:
response = requests.post(
f"{cls.URL}/api/pull",
json={"name": name, 'stream': False}
)
if (status := response.json().get('status')) != 'success':
return {"status": status}, response.status_code
llm = DbInterface.post_llm(name)
return {"llm": llm['id']}, 200
@classmethod
def uninstall_model(cls, name: str) -> Tuple[Dict[str, Any], int]:
response = requests.delete(
f"{cls.URL}/api/delete",
json={"name": name}
)
return {}, response.status_code
@classmethod
def install_model_stream(cls, name: str) -> Generator[str, None, None]:
def server_sent_event_generator():
with requests.Session().post(
f"{cls.URL}/api/pull",
json={"name": name, 'stream': True},
headers=None,
stream=True
) as response:
success = False
for i, event in enumerate(
response.iter_lines(chunk_size=cls.STREAM_CHUNK_SIZE)
):
yield ServerSentEvents.build_sse_data(
"model_installation_progress",
event,
i,
cls.STREAM_RETRY_PERIOD
)
success = "success" in event.decode()
if success:
llm = DbInterface.post_llm(name)
yield ServerSentEvents.build_sse_data(
"model_installation_success",
json.dumps({"llm": llm['id']}),
i + 1,
cls.STREAM_RETRY_PERIOD
)
return server_sent_event_generator()
def _build_generate_request_body(self, stream: bool) -> Dict[str, Any]:
request_body = {
'model': self._llm['name'],
'prompt': self._prompt,
'stream': stream,
'options': {
'repeat_penalty': self._repeat_penalty,
'num_predict': self._max_tokens,
'temperature': self._temperature,
'top_p': self._top_p
}
}
if self._seed:
request_body['options']['seed'] = self._seed
if self._system_prompt:
request_body['system'] = self._system_prompt['text']
if self._stop_sequences:
request_body['options']['stop'] = self._stop_sequences
return request_body
def generate(self) -> Tuple[Dict[str, Any], int]:
request_body = self._build_generate_request_body(stream=False)
response = requests.post(f"{self.URL}/api/generate", json=request_body)
self._text = response.json()['response']
self._token_amount = response.json()['eval_count']
self._persist_generation()
return {'prediction': self._prediction_id}, 200
def generate_stream(self) -> Generator[str, None, None]:
def server_sent_event_generator():
with requests.Session().post(
f"{self.URL}/api/generate",
json=self._build_generate_request_body(stream=True),
headers=None,
stream=True
) as response:
for event in self._server_sent_even_loop(response):
yield event
return server_sent_event_generator()
def _translate_event(self, event: Any) -> Dict[str, Any]:
return json.dumps({
'token': json.loads(event)['response'],
})
def _process_event(self, event: Any):
self._text += json.loads(event)['response']
self._token_amount += 1
class OpenAiRequest(LlmRequest):
try:
client: OpenAI = OpenAI(api_key=os.environ.get('OPENAI_API_KEY'))
except Exception:
available = False
else:
available = True
@classmethod
def model_names(cls) -> List[str]:
try:
return [str(m) for m in cls.client.models.list()]
except AuthenticationError:
return []
def _request_generation(self, stream: bool) -> ChatCompletion:
messages = []
if self._system_prompt:
messages.append({
'role': 'system',
'content': self._system_prompt['text'],
})
messages.append({
'role': 'user',
'content': self._prompt
})
return self.client.chat.completions.create(
model=self._llm['name'],
messages=messages,
stream=stream,
frequency_penalty=self._repeat_penalty,
max_tokens=self._max_tokens,
seed=self._seed,
temperature=self._temperature,
top_p=self._top_p,
stop=[s['text'] for s in self._stop_sequences]
)
def generate(self) -> Tuple[Dict[str, Any], int]:
response = self._request_generation(stream=False)
self._text = response.choices[0].message.content
self._token_amount = response.usage.completion_tokens
self._persist_generation()
return {'prediction': self._prediction_id}, 200
def generate_stream(self) -> Generator[str, None, None]:
def server_sent_event_generator():
response = self._request_generation(stream=True)
for event in self._server_sent_even_loop(response):
yield event
return server_sent_event_generator()
def _translate_event(self, event: Any) -> Dict[str, Any]:
return json.dumps({
'token': event.choices[0].delta.content,
})
def _process_event(self, event: Any):
self._text += event.choices[0].delta.content
self._token_amount += 1
REQUEST_CLASSES: List[LlmRequest] = [OllamaRequest, OpenAiRequest]
| [] |
2024-01-10 | Maximilian-Ka/survey-chatbot-prototype | actions~NLG~gpt3_connector.py |
import os
import openai
from dotenv import dotenv_values
import logging
from pathlib import Path
class GPT3Connector():
""" Text completion using the OpenAI GPT-3 API (see https://beta.openai.com/docs/api-reference/introduction) """
def __init__(self) -> None:
curr_path = Path(__file__).resolve().parent
filepath = curr_path.joinpath(".env")
config = dotenv_values(filepath)
if "OPENAI_API_KEY" in config:
openai.api_key = config["OPENAI_API_KEY"]
else:
logging.warning("Couldn't find OPENAI API Key.")
raise RuntimeError()
def _limit_input_length(self, text) -> str:
""" Limit input length to limit the amount of used tokens. (see https://beta.openai.com/docs/usage-guidelines/safety-best-practices) """
# Hint: should actually be already done in front-end
required_input_tokens = len(text) / 4
if required_input_tokens >= 100:
text = text[:395]
return text
def _generate_prompt_example(self, animal):
return """Suggest three names for an animal that is a superhero.
Animal: Cat
Names: Captain Sharpclaw, Agent Fluffball, The Incredible Feline
Animal: Dog
Names: Ruff the Protector, Wonder Canine, Sir Barks-a-Lot
Animal: {}
Names:""".format(
animal.capitalize()
)
def _generate_prompt_summarize_tasks(self, text:str):
return """Summarize the tasks given in the following text:\n
'''\n
{}\n
'''\n
The tasks are:\n
-""".format(
text
)
def _generate_prompt_guess_job_title(self, text:str):
return """Guess my job title based on the following tasks:\n
'''\n
{}\n
'''\n
My job title is:""".format(
text
)
def _generate_prompt_comment_job_title(self, text:str):
return """The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly.\n
'''\n
Human: What do you think is cool about the job {}?\n
AI: I think\n
'''
""".format(
text
)
def _generate_prompt_comment_technology_skill(self, text:str):
return """The following is a conversation with an AI assistant. The assistant is helpful, creative, clever, and very friendly.\n
'''\n
Human: I'm interested in {}.\n
AI: {} is a nice skill to have because\n
'''
""".format(
text, text
)
def gpt3_comment_technology_skill(self, text:str, more_than_one_skill:bool) -> str:
""" Let GPT-3 comment on one of the extracted technology skills the user is interested in. """
text = self._limit_input_length(text)
response = openai.Completion.create(
engine= "text-davinci-002", #"text-davinci-002",
prompt=self._generate_prompt_comment_technology_skill(text),
temperature=0.6,
max_tokens=256,
top_p=1,
n=1
)
comment:str = response.choices[0].text
comment = comment.replace('\n', '').strip()
# Tune output
if not comment.startswith(text):
if not comment[0:1]=="I ":
comment = comment[0].lower() + comment[1:]
if comment.startswith("it is"):
comment = text + " is cool because " + comment
if more_than_one_skill:
comment = "especially " + comment + " 🤓"
else:
comment = "I think " + comment + " 🤓"
return comment
def gpt3_summarize_tasks(self, text:str) -> str:
""" Let GPT-3 extract work tasks from the input text and list it in bullet points. """
text = self._limit_input_length(text)
response = openai.Completion.create(
engine= "text-curie-001", #"text-davinci-002",
prompt=self._generate_prompt_summarize_tasks(text),
temperature=0.6,
max_tokens=256,
top_p=1,
n=1
)
tasks = str(response.choices[0].text)
tasks = "-" + tasks
tasks = tasks.strip()
return tasks
def gpt3_guess_job_title(self, text:str) -> str:
""" Let GPT-3 guess the job title corresponding to tasks given as input. """
text = self._limit_input_length(text)
response = openai.Completion.create(
engine= "text-davinci-002",
prompt=self._generate_prompt_guess_job_title(text),
temperature=0.6,
max_tokens=256,
top_p=1,
n=1
)
job_title_guess:str = response.choices[0].text
job_title_guess = (job_title_guess.replace('.', '').replace('\n', '').title()).strip()
return job_title_guess
def gpt3_comment_job_title(self, text:str) -> str:
""" Let GPT-3 guess the job title corresponding to tasks given as input. """
text = self._limit_input_length(text)
response = openai.Completion.create(
engine= "text-davinci-002", #"text-davinci-002",
prompt=self._generate_prompt_comment_job_title(text),
temperature=0.75,
max_tokens=200,
top_p=1,
n=1
)
# postprocess GPT-3 output
comment:str = response.choices[0].text
comment = comment.replace('\n', '').strip()
if comment.startswith("Ai:"):
comment = comment[3:]
if comment.startswith("aI: "):
comment = comment[4:]
if not comment[0]=="I":
comment = comment[0].lower() + comment[1:]
if comment.startswith("that"):
comment = comment + "it's cool "
comment = "In my opinion, " + comment
comment = comment + " 🦾🤖"
return comment
# Test individual methods
# #%%
# connector = GPT3Connector()
# #%%
# response = connector.gpt3_summarize_tasks("Communicate and coordinate with management, shareholders, customers, and employees to address sustainability issues. Enact or oversee a corporate sustainability strategy.")
# #%%
# job_title_guess = connector.gpt3_guess_job_title(response)
# #%%
# comment = connector.gpt3_comment_job_title("Software Engineer")
# # %%
# comment = connector.gpt3_comment_job_title("Corporate Sustainability Officer")
# print(comment)
# #%%
# comment = connector.gpt3_comment_job_title(job_title_guess)
# print(comment)
# #%%
# comment = connector.gpt3_comment_technology_skill("Excel", more_than_one_skill=False)
# print(comment)
| [] |
2024-01-10 | eren23/Promptify | tests~unit_tests~prompter~test_prompter.py | import pytest
from promptify import Prompter
from promptify import OpenAI
from typing import List, Optional, Union, Dict
import os
class TestPrompter:
@pytest.fixture
def model(self):
model = OpenAI(api_key="", api_wait=1, api_retry=1)
return model
def test_custom_template(self, model):
# replace the template path with own path, this is just for testing
prompter = Prompter(
model=model, template="/Users/stoicbatman/Desktop/pytest_project/ner.jinja"
)
output = prompter.fit(
"Elon Reeve Musk FRS is a business. He is the founder of SpaceX; Tesla, Inc.; Twitter, Inc.; Neuralink and OpenAI",
domain="general",
labels=None,
)
assert isinstance(output, list)
assert isinstance(output[0]["parsed"], Dict)
assert isinstance(output[0]["parsed"]["data"]["completion"][0]["T"], str)
assert isinstance(output[0]["parsed"]["data"]["completion"][0]["E"], str)
def test_generate_prompt(self, model):
prompter = Prompter(model=model, template="ner.jinja")
prompt = prompter.generate_prompt(
"Elon Reeve Musk FRS is a business. He is the founder of SpaceX; Tesla, Inc.; Twitter, Inc.; Neuralink and OpenAI",
domain="general",
labels=None,
)
assert isinstance(prompt, str)
def test_fit(self, model):
prompter = Prompter(model=model, template="ner.jinja")
output = prompter.fit(
"Elon Reeve Musk FRS is a business. He is the founder of SpaceX; Tesla, Inc.; Twitter, Inc.; Neuralink and OpenAI",
domain="general",
labels=None,
)
assert isinstance(output, list)
assert isinstance(output[0]["parsed"], Dict)
assert isinstance(output[0]["parsed"]["data"]["completion"][0]["T"], str)
assert isinstance(output[0]["parsed"]["data"]["completion"][0]["E"], str)
def test_raw_fit(self, model):
prompter = Prompter(model=model, raw_prompt=True)
output = prompter.fit("quick brown fox jump over")
assert isinstance(output, list)
assert isinstance(output[0]["text"], str)
def test_load_template(self, model):
prompter = Prompter(model=model)
template_data = prompter.load_template("ner.jinja")
assert "template_name" in template_data
assert "template_dir" in template_data
assert "environment" in template_data
assert "template" in template_data
def test_get_available_templates(self, model):
prompter = Prompter(model)
templates_path = os.path.join(
os.path.dirname(os.path.realpath(".")), "codes", "templates"
)
templates = prompter.get_available_templates(templates_path)
assert isinstance(templates, dict)
for key, value in templates.items():
assert key.endswith(".jinja")
assert value.endswith(".jinja")
def test_list_templates(self, model):
prompter = Prompter(model=model, template="ner.jinja")
loader = prompter.load_template("ner.jinja")
templates = loader["environment"].list_templates()
assert isinstance(templates, list)
assert len(templates) > 0
def test_template_variables(self, model):
prompter = Prompter(model=model, template="ner.jinja")
loader = prompter.load_template("ner.jinja")
variables = prompter.get_template_variables(
loader["environment"], loader["template_name"]
)
assert isinstance(variables, set)
assert len(variables) > 0
def test_update_default_variable_values(self, model):
prompter = Prompter(model=model, template="ner.jinja")
new_defaults = {"description": "test description", "domain": "test domain"}
prompter.update_default_variable_values(new_defaults)
assert prompter.default_variable_values == new_defaults
def test_missing_template_path_error(self, model):
with pytest.raises(ValueError):
prompter = Prompter(model=model)
prompter.load_template("non_existent_template.jinja")
| [
"/Users/stoicbatman/Desktop/pytest_project/ner.jinja",
"environment",
"Elon Reeve Musk FRS is a business. He is the founder of SpaceX; Tesla, Inc.; Twitter, Inc.; Neuralink and OpenAI",
"general",
"ner.jinja"
] |
2024-01-10 | eren23/Promptify | examples~medical_ner.py | from promptify import OpenAI
from promptify import Prompter
sentence = """The patient is a 93-year-old female with a medical
history of chronic right hip pain, osteoporosis,
hypertension, depression, and chronic atrial
fibrillation admitted for evaluation and management
of severe nausea and vomiting and urinary tract
infection"""
model = OpenAI(api_key="")
prompter = Prompter(model=model, template="ner.jinja")
output = prompter.fit(text_input=sentence, domain="medical", labels=None)
print(output)
| [
"ner.jinja"
] |
2024-01-10 | kuyesu/llama2-replicate | constants.py | import os
# from dotenv import load_dotenv
from chromadb.config import Settings
# https://python.langchain.com/en/latest/modules/indexes/document_loaders/examples/excel.html?highlight=xlsx#microsoft-excel
from langchain.document_loaders import CSVLoader, PDFMinerLoader, TextLoader, UnstructuredExcelLoader, Docx2txtLoader
# load_dotenv()
ROOT_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
# Define the folder for storing database
SOURCE_DIRECTORY = f"{ROOT_DIRECTORY}/SOURCE_DOCUMENTS"
PERSIST_DIRECTORY = f"{ROOT_DIRECTORY}/DB"
# Can be changed to a specific number
INGEST_THREADS = os.cpu_count() or 8
# Define the Chroma settings
CHROMA_SETTINGS = Settings(
anonymized_telemetry=False,
is_persistent=True,
)
# https://python.langchain.com/en/latest/_modules/langchain/document_loaders/excel.html#UnstructuredExcelLoader
DOCUMENT_MAP = {
".txt": TextLoader,
".md": TextLoader,
".py": TextLoader,
".pdf": PDFMinerLoader,
".csv": CSVLoader,
".xls": UnstructuredExcelLoader,
".xlsx": UnstructuredExcelLoader,
".docx": Docx2txtLoader,
".doc": Docx2txtLoader,
}
# Default Instructor Model
EMBEDDING_MODEL_NAME = "hkunlp/instructor-large" # Uses 1.5 GB of VRAM (High Accuracy with lower VRAM usage)
MODEL_ID = "TheBloke/Llama-2-7B-Chat-GGML"
MODEL_BASENAME = "llama-2-7b-chat.ggmlv3.q4_0.bin"
| [] |
2024-01-10 | kuyesu/llama2-replicate | run_llama_api.py | import logging
import os
import shutil
import subprocess
import torch
from flask import Flask, jsonify, request
from flask_cors import CORS
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.llms import Replicate
from langchain.memory import ConversationBufferMemory
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
# from langchain.embeddings import HuggingFaceEmbeddings
# from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores import Chroma
from werkzeug.utils import secure_filename
from constants import CHROMA_SETTINGS, EMBEDDING_MODEL_NAME, PERSIST_DIRECTORY, MODEL_ID, MODEL_BASENAME
from dotenv import load_dotenv
load_dotenv()
DEVICE_TYPE = "cuda" if torch.cuda.is_available() else "cpu"
SHOW_SOURCES = True
logging.info(f"Running on: {DEVICE_TYPE}")
logging.info(f"Display Source Documents set to: {SHOW_SOURCES}")
EMBEDDINGS = HuggingFaceInstructEmbeddings(model_name=EMBEDDING_MODEL_NAME, model_kwargs={"device": DEVICE_TYPE})
# uncomment the following line if you used HuggingFaceEmbeddings in the ingest.py
# EMBEDDINGS = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL_NAME)
if os.path.exists(PERSIST_DIRECTORY):
try:
shutil.rmtree(PERSIST_DIRECTORY)
except OSError as e:
print(f"Error: {e.filename} - {e.strerror}.")
else:
print("The directory does not exist")
run_langest_commands = ["python3", "ingest.py"]
if DEVICE_TYPE == "cpu":
run_langest_commands.append("--device_type")
run_langest_commands.append(DEVICE_TYPE)
result = subprocess.run(run_langest_commands, capture_output=True)
if result.returncode != 0:
raise FileNotFoundError(
"No files were found inside SOURCE_DOCUMENTS, please put a starter file inside before starting the API!"
)
# load the vectorstore
DB = Chroma(
persist_directory=PERSIST_DIRECTORY,
embedding_function=EMBEDDINGS,
client_settings=CHROMA_SETTINGS,
)
RETRIEVER = DB.as_retriever()
# LLM = load_model(device_type=DEVICE_TYPE, model_id=MODEL_ID, model_basename=MODEL_BASENAME)
llm = Replicate(
streaming = True,
replicate_api_token="r8_EpFnoaoDX78IMBdPPJx4D8eaPm2ltbi4OicEe",
model = "replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781",
callbacks=[StreamingStdOutCallbackHandler()],
input = {"temperature": 0.01, "max_length" :500,"top_p":1, "system_prompt": """<s>[INST] <<SYS>>
You are a helpful, respectful and honest university assistant called Jane. Always answer as
helpfully as possible, while being safe. Your answers should not include
any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.
Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain
why instead of answering something not correct. If you don't know the answer
to a question, please don't share false information.
Your goal is to provide answers relating to university or universities, admission and other campus life you can be creative to provide additional relevant answers only where applicable.
The document/documents have information of various universities and not specific to one university, your goal is to pick the most relevant information that the user want to know, do not make up any information.
<</SYS>>
"""})
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
QA = RetrievalQA.from_chain_type(
llm=llm, chain_type="stuff", retriever=RETRIEVER, return_source_documents=SHOW_SOURCES
)
app = Flask(__name__)
CORS(app)
@app.route("/api/delete_source", methods=["GET"])
def delete_source_route():
folder_name = "SOURCE_DOCUMENTS"
if os.path.exists(folder_name):
shutil.rmtree(folder_name)
os.makedirs(folder_name)
return jsonify({"message": f"Folder '{folder_name}' successfully deleted and recreated."})
@app.route("/api/save_document", methods=["GET", "POST"])
def save_document_route():
if "document" not in request.files:
return "No document part", 400
file = request.files["document"]
if file.filename == "":
return "No selected file", 400
if file:
filename = secure_filename(file.filename)
folder_path = "SOURCE_DOCUMENTS"
if not os.path.exists(folder_path):
os.makedirs(folder_path)
file_path = os.path.join(folder_path, filename)
file.save(file_path)
return "File saved successfully", 200
@app.route("/api/run_ingest", methods=["GET"])
def run_ingest_route():
global DB
global RETRIEVER
global QA
try:
if os.path.exists(PERSIST_DIRECTORY):
try:
shutil.rmtree(PERSIST_DIRECTORY)
except OSError as e:
print(f"Error: {e.filename} - {e.strerror}.")
else:
print("The directory does not exist")
run_langest_commands = ["python3", "ingest.py"]
if DEVICE_TYPE == "cpu":
run_langest_commands.append("--device_type")
run_langest_commands.append(DEVICE_TYPE)
result = subprocess.run(run_langest_commands, capture_output=True)
if result.returncode != 0:
return "Script execution failed: {}".format(result.stderr.decode("utf-8")), 500
# load the vectorstore
DB = Chroma(
persist_directory=PERSIST_DIRECTORY,
embedding_function=EMBEDDINGS,
client_settings=CHROMA_SETTINGS,
)
RETRIEVER = DB.as_retriever()
QA = RetrievalQA.from_chain_type(
llm=llm, chain_type="stuff", retriever=RETRIEVER, return_source_documents=SHOW_SOURCES
)
return "Script executed successfully: {}".format(result.stdout.decode("utf-8")), 200
except Exception as e:
return f"Error occurred: {str(e)}", 500
# 10.0.0.2
@app.route("/api/prompt_route", methods=["GET", "POST"])
def prompt_route():
global QA
user_prompt = request.form.get("user_prompt")
if user_prompt:
# print(f'User Prompt: {user_prompt}')
# Get the answer from the chain
res = QA(user_prompt)
answer, docs = res["result"], res["source_documents"]
prompt_response_dict = {
"Prompt": user_prompt,
"Answer": answer,
}
prompt_response_dict["Sources"] = []
for document in docs:
prompt_response_dict["Sources"].append(
(os.path.basename(str(document.metadata["source"])), str(document.page_content))
)
return jsonify(prompt_response_dict), 200
else:
return "No user prompt received", 400
if __name__ == "__main__":
load_dotenv()
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(message)s", level=logging.INFO
)
CORS(app)
app.run(debug=False, port=5110)
| [
"user_prompt",
"{'Prompt': PLACEHOLDER, 'Answer': PLACEHOLDER}"
] |
2024-01-10 | kuyesu/llama2-replicate | app_api.py | from flask import Flask, request, jsonify
from langchain.chains import ConversationalRetrievalChain
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.llms import Replicate
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.memory import ConversationBufferMemory
from langchain.document_loaders import PyPDFLoader, TextLoader, Docx2txtLoader
import os
import tempfile
import threading
app = Flask(__name__)
# Define global variables
history = []
generated = [""]
def initialize_session_state():
global history
global generated
if 'history' not in history:
history = []
if 'generated' not in generated:
generated = [""]
@app.route('/initialize', methods=['POST'])
def initialize():
initialize_session_state()
return "Session initialized."
def conversation_chat(query, chain, history):
result = chain({"question": query, "chat_history": history})
history.append((query, result["answer"]))
return result["answer"]
@app.route('/chat', methods=['POST'])
def chat():
user_input = request.form['input']
global history
global generated
if user_input:
output = conversation_chat(user_input, chain, history)
history.append(user_input)
generated.append(output)
return jsonify({'user_input': user_input, 'generated_output': output})
def create_conversational_chain(vector_store):
# Create llm
llm = Replicate(
streaming=True,
model="replicate/llama-2-70b-chat:2796ee9483c3fd7aa2e171d38f4ca12251a30609463dcfd4cd76703f22e96cdf",
callbacks=None,
input={
"temperature": 0.1,
"max_length": 1000,
"top_p": 1,
"system_prompt": """<s>[INST] <<SYS>>
You are a helpful, respectful and honest university assistant called Jane. Always answer as
helpfully as possible, while being safe. Your answers should not include
any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content.
Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain
why instead of answering something not correct. If you don't know the answer
to a question, please don't share false information.
Your goal is to provide answers relating to university or universities, admission and other campus life you can be creative to provide additional relevant answers only where applicable.
The document/documents have information of various universities and not specific to one university, your goal is to pick the most relevant information that the user want to know, do not make up any information.
<</SYS>>
"""
})
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
chain = ConversationalRetrievalChain.from_llm(llm=llm, chain_type='stuff',
retriever=vector_store.as_retriever(search_kwargs={"k": 2}),
memory=memory)
return chain
if __name__ == "__main__":
# Initialize session state
initialize_session_state()
# Create the chain object
text = [] # Replace this with your text data
text_splitter = CharacterTextSplitter(separator="\n", chunk_size=1000, chunk_overlap=100, length_function=len)
text_chunks = text_splitter.split_documents(text)
# Create embeddings
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2", model_kwargs={'device': 'cpu'})
# Create vector store
vector_store = FAISS.from_documents(text_chunks, embedding=embeddings)
# Create the chain object
chain = create_conversational_chain(vector_store)
app.run(debug=True)
| [] |
2024-01-10 | zhongjiaguo/RealChar | realtime_ai_character~character_catalog~catalog_manager.py | import os
import threading
import yaml
from dotenv import load_dotenv
from pathlib import Path
from contextlib import ExitStack
from realtime_ai_character.logger import get_logger
from realtime_ai_character.utils import Singleton, Character
from realtime_ai_character.database.chroma import get_chroma
from llama_index import SimpleDirectoryReader
from langchain.text_splitter import CharacterTextSplitter
from readerwriterlock import rwlock
from realtime_ai_character.database.connection import get_db
from realtime_ai_character.models.character import Character as CharacterModel
load_dotenv()
logger = get_logger(__name__)
class CatalogManager(Singleton):
def __init__(self, overwrite=True):
super().__init__()
self.db = get_chroma()
self.sql_db = next(get_db())
self.sql_load_interval = 60
self.sql_load_lock = rwlock.RWLockFair()
if overwrite:
logger.info('Overwriting existing data in the chroma.')
self.db.delete_collection()
self.db = get_chroma()
self.characters = {}
self.load_characters_from_community(overwrite)
self.load_characters(overwrite)
self.load_character_from_sql_database()
if overwrite:
logger.info('Persisting data in the chroma.')
self.db.persist()
logger.info(
f"Total document load: {self.db._client.get_collection('llm').count()}")
self.load_sql_db_lopp()
def load_sql_db_lopp(self):
self.load_sql_db_thread = threading.Timer(self.sql_load_interval, self.load_sql_db_lopp)
self.load_sql_db_thread.daemon = True
self.load_sql_db_thread.start()
self.load_character_from_sql_database()
def get_character(self, name) -> Character:
with self.sql_load_lock.gen_rlock():
return self.characters.get(name)
def load_character(self, directory):
with ExitStack() as stack:
f_yaml = stack.enter_context(open(directory / 'config.yaml'))
yaml_content = yaml.safe_load(f_yaml)
character_id = yaml_content['character_id']
character_name = yaml_content['character_name']
voice_id = yaml_content['voice_id']
if (os.getenv(character_id.upper() + "_VOICE_ID", "")):
voice_id = os.getenv(character_id.upper() + "_VOICE_ID")
self.characters[character_id] = Character(
character_id=character_id,
name=character_name,
llm_system_prompt=yaml_content["system"],
llm_user_prompt=yaml_content["user"],
voice_id=voice_id,
source='default',
visibility='public'
)
if "avatar_id" in yaml_content:
self.characters[character_id].avatar_id = yaml_content["avatar_id"]
if "author_name" in yaml_content:
self.characters[character_id].author_name = yaml_content["author_name"],
return character_name
def load_characters(self, overwrite):
"""
Load characters from the character_catalog directory. Use /data to create
documents and add them to the chroma.
:overwrite: if True, overwrite existing data in the chroma.
"""
path = Path(__file__).parent
excluded_dirs = {'__pycache__', 'archive', 'community'}
directories = [d for d in path.iterdir() if d.is_dir()
and d.name not in excluded_dirs]
for directory in directories:
character_name = self.load_character(directory)
if overwrite:
self.load_data(character_name, directory / 'data')
logger.info('Loaded data for character: ' + character_name)
logger.info(
f'Loaded {len(self.characters)} characters: IDs {list(self.characters.keys())}')
def load_characters_from_community(self, overwrite):
path = Path(__file__).parent / 'community'
excluded_dirs = {'__pycache__', 'archive'}
directories = [d for d in path.iterdir() if d.is_dir()
and d.name not in excluded_dirs]
for directory in directories:
with ExitStack() as stack:
f_yaml = stack.enter_context(open(directory / 'config.yaml'))
yaml_content = yaml.safe_load(f_yaml)
character_id = yaml_content['character_id']
character_name = yaml_content['character_name']
self.characters[character_id] = Character(
character_id=character_id,
name=character_name,
llm_system_prompt=yaml_content["system"],
llm_user_prompt=yaml_content["user"],
voice_id=yaml_content["voice_id"],
source='community',
author_name=yaml_content["author_name"],
visibility=yaml_content["visibility"],
)
if "avatar_id" in yaml_content:
self.characters[character_id].avatar_id = yaml_content["avatar_id"]
if overwrite:
self.load_data(character_name, directory / 'data')
logger.info('Loaded data for character: ' + character_name)
def load_data(self, character_name: str, data_path: str):
loader = SimpleDirectoryReader(Path(data_path))
documents = loader.load_data()
text_splitter = CharacterTextSplitter(
separator='\n',
chunk_size=500,
chunk_overlap=100)
docs = text_splitter.create_documents(
texts=[d.text for d in documents],
metadatas=[{
'character_name': character_name,
'id': d.id_,
} for d in documents])
self.db.add_documents(docs)
def load_character_from_sql_database(self):
character_models = self.sql_db.query(CharacterModel).all()
with self.sql_load_lock.gen_wlock():
for character_model in character_models:
character = Character(
character_id=character_model.id,
name=character_model.name,
llm_system_prompt=character_model.system_prompt,
llm_user_prompt=character_model.user_prompt,
voice_id=character_model.voice_id,
source='community',
author_id=character_model.author_id,
visibility=character_model.visibility,
)
self.characters[character_model.id] = character
# TODO: load context data from storage
logger.info(
f'Loaded {len(character_models)} characters from sql database')
def get_catalog_manager():
return CatalogManager.get_instance()
if __name__ == '__main__':
manager = CatalogManager.get_instance()
| [] |
2024-01-10 | zhongjiaguo/RealChar | realtime_ai_character~llm~anyscale_llm.py | import os
from typing import List
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.schema import BaseMessage, HumanMessage
from realtime_ai_character.database.chroma import get_chroma
from realtime_ai_character.llm.base import AsyncCallbackAudioHandler, AsyncCallbackTextHandler, LLM, SearchAgent
from realtime_ai_character.logger import get_logger
from realtime_ai_character.utils import Character
logger = get_logger(__name__)
class AnysacleLlm(LLM):
def __init__(self, model):
self.chat_open_ai = ChatOpenAI(
model=model,
temperature=0.5,
streaming=True,
openai_api_base='https://api.endpoints.anyscale.com/v1',
openai_api_key=os.getenv('ANYSCALE_ENDPOINT_API_KEY'),
)
self.config = {
"model": model,
"temperature": 0.5,
"streaming": True
}
self.db = get_chroma()
self.search_agent = None
self.search_agent = SearchAgent()
def get_config(self):
return self.config
async def achat(self,
history: List[BaseMessage],
user_input: str,
user_input_template: str,
callback: AsyncCallbackTextHandler,
audioCallback: AsyncCallbackAudioHandler,
character: Character,
useSearch: bool=False) -> str:
# 1. Generate context
context = self._generate_context(user_input, character)
# Get search result if enabled
if useSearch:
context += self.search_agent.search(user_input)
# 2. Add user input to history
history.append(HumanMessage(content=user_input_template.format(
context=context, query=user_input)))
# 3. Generate response
response = await self.chat_open_ai.agenerate(
[history], callbacks=[callback, audioCallback, StreamingStdOutCallbackHandler()])
logger.info(f'Response: {response}')
return response.generations[0][0].text
def _generate_context(self, query, character: Character) -> str:
docs = self.db.similarity_search(query)
docs = [d for d in docs if d.metadata['character_name'] == character.name]
logger.info(f'Found {len(docs)} documents')
context = '\n'.join([d.page_content for d in docs])
return context
| [] |
2024-01-10 | HumanCompatibleAI/ray | rllib~algorithms~maddpg~maddpg.py | """Contributed port of MADDPG from OpenAI baselines.
The implementation has a couple assumptions:
- The number of agents is fixed and known upfront.
- Each agent is bound to a policy of the same name.
- Discrete actions are sent as logits (pre-softmax).
For a minimal example, see rllib/examples/two_step_game.py,
and the README for how to run with the multi-agent particle envs.
"""
import logging
from typing import List, Optional, Type
from ray.rllib.agents.trainer_config import TrainerConfig
from ray.rllib.algorithms.dqn.dqn import DQNTrainer
from ray.rllib.algorithms.maddpg.maddpg_tf_policy import MADDPGTFPolicy
from ray.rllib.policy.policy import Policy
from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch
from ray.rllib.utils.annotations import Deprecated, override
from ray.rllib.utils.typing import TrainerConfigDict
from ray.rllib.utils.deprecation import DEPRECATED_VALUE
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class MADDPGConfig(TrainerConfig):
"""Defines a configuration class from which a MADDPGTrainer can be built.
Example:
>>> from ray.rllib.algorithms.maddpg.maddpg import MADDPGConfig
>>> config = MADDPGConfig()
>>> print(config.replay_buffer_config)
>>> replay_config = config.replay_buffer_config.update(
>>> {
>>> "capacity": 100000,
>>> "prioritized_replay_alpha": 0.8,
>>> "prioritized_replay_beta": 0.45,
>>> "prioritized_replay_eps": 2e-6,
>>> }
>>> )
>>> config.training(replay_buffer_config=replay_config)\
>>> .resources(num_gpus=0)\
>>> .rollouts(num_rollout_workers=4)\
>>> .environment("CartPole-v1")
>>> trainer = config.build()
>>> while True:
>>> trainer.train()
Example:
>>> from ray.rllib.algorithms.maddpg.maddpg import MADDPGConfig
>>> from ray import tune
>>> config = MADDPGConfig()
>>> config.training(n_step=tune.grid_search([3, 5]))
>>> config.environment(env="CartPole-v1")
>>> tune.run(
>>> "MADDPG",
>>> stop={"episode_reward_mean":200},
>>> config=config.to_dict()
>>> )
"""
def __init__(self, trainer_class=None):
"""Initializes a DQNConfig instance."""
super().__init__(trainer_class=trainer_class or MADDPGTrainer)
# fmt: off
# __sphinx_doc_begin__
# MADDPG specific config settings:
self.agent_id = None
self.use_local_critic = False
self.use_state_preprocessor = False
self.actor_hiddens = [64, 64]
self.actor_hidden_activation = "relu"
self.critic_hiddens = [64, 64]
self.critic_hidden_activation = "relu"
self.n_step = 1
self.good_policy = "maddpg"
self.adv_policy = "maddpg"
self.replay_buffer_config = {
"type": "MultiAgentReplayBuffer",
# Specify prioritized replay by supplying a buffer type that supports
# prioritization, for example: MultiAgentPrioritizedReplayBuffer.
"prioritized_replay": DEPRECATED_VALUE,
"capacity": int(1e6),
# How many steps of the model to sample before learning starts.
"learning_starts": 1024 * 25,
# Force lockstep replay mode for MADDPG.
"replay_mode": "lockstep",
}
self.training_intensity = None
self.critic_lr = 1e-2
self.actor_lr = 1e-2
self.target_network_update_freq = 0
self.tau = 0.01
self.actor_feature_reg = 0.001
self.grad_norm_clipping = 0.5
# Changes to Trainer's default:
self.rollout_fragment_length = 100
self.train_batch_size = 1024
self.num_workers = 1
self.min_time_s_per_reporting = 0
# fmt: on
# __sphinx_doc_end__
@override(TrainerConfig)
def training(
self,
*,
agent_id: Optional[str] = None,
use_local_critic: Optional[bool] = None,
use_state_preprocessor: Optional[bool] = None,
actor_hiddens: Optional[List[int]] = None,
actor_hidden_activation: Optional[str] = None,
critic_hiddens: Optional[List[int]] = None,
critic_hidden_activation: Optional[str] = None,
n_step: Optional[int] = None,
good_policy: Optional[str] = None,
adv_policy: Optional[str] = None,
replay_buffer_config: Optional[dict] = None,
training_intensity: Optional[float] = None,
critic_lr: Optional[float] = None,
actor_lr: Optional[float] = None,
target_network_update_freq: Optional[int] = None,
tau: Optional[float] = None,
actor_feature_reg: Optional[float] = None,
grad_norm_clipping: Optional[float] = None,
**kwargs,
) -> "MADDPGConfig":
"""Sets the training related configuration.
Args:
agent_id: ID of the agent controlled by this policy.
use_local_critic: Use a local critic for this policy.
use_state_preprocessor: Apply a state preprocessor with spec given by the
"model" config option (like other RL algorithms). This is mostly useful
if you have a weird observation shape, like an image. Disabled by
default.
actor_hiddens: Postprocess the policy network model output with these hidden
layers. If `use_state_preprocessor` is False, then these will be the
*only* hidden layers in the network.
actor_hidden_activation: Hidden layers activation of the postprocessing
stage of the policy network.
critic_hiddens: Postprocess the critic network model output with these
hidden layers; again, if use_state_preprocessor is True, then the state
will be preprocessed by the model specified with the "model" config
option first.
critic_hidden_activation: Hidden layers activation of the postprocessing
state of the critic.
n_step: N-step for Q-learning.
good_policy: Algorithm for good policies.
adv_policy: Algorithm for adversary policies.
replay_buffer_config: Replay buffer config.
Examples:
{
"_enable_replay_buffer_api": True,
"type": "MultiAgentReplayBuffer",
"learning_starts": 1000,
"capacity": 50000,
"replay_sequence_length": 1,
}
- OR -
{
"_enable_replay_buffer_api": True,
"type": "MultiAgentPrioritizedReplayBuffer",
"capacity": 50000,
"prioritized_replay_alpha": 0.6,
"prioritized_replay_beta": 0.4,
"prioritized_replay_eps": 1e-6,
"replay_sequence_length": 1,
}
- Where -
prioritized_replay_alpha: Alpha parameter controls the degree of
prioritization in the buffer. In other words, when a buffer sample has
a higher temporal-difference error, with how much more probability
should it drawn to use to update the parametrized Q-network. 0.0
corresponds to uniform probability. Setting much above 1.0 may quickly
result as the sampling distribution could become heavily “pointy” with
low entropy.
prioritized_replay_beta: Beta parameter controls the degree of
importance sampling which suppresses the influence of gradient updates
from samples that have higher probability of being sampled via alpha
parameter and the temporal-difference error.
prioritized_replay_eps: Epsilon parameter sets the baseline probability
for sampling so that when the temporal-difference error of a sample is
zero, there is still a chance of drawing the sample.
training_intensity: If set, this will fix the ratio of replayed from a
buffer and learned on timesteps to sampled from an environment and
stored in the replay buffer timesteps. Otherwise, the replay will
proceed at the native ratio determined by
`(train_batch_size / rollout_fragment_length)`.
critic_lr: Learning rate for the critic (Q-function) optimizer.
actor_lr: Learning rate for the actor (policy) optimizer.
target_network_update_freq: Update the target network every
`target_network_update_freq` sample steps.
tau: Update the target by \tau * policy + (1-\tau) * target_policy.
actor_feature_reg: Weights for feature regularization for the actor.
grad_norm_clipping: If not None, clip gradients during optimization at this
value.
Returns:
This updated TrainerConfig object.
"""
# Pass kwargs onto super's `training()` method.
super().training(**kwargs)
if agent_id is not None:
self.agent_id = agent_id
if use_local_critic is not None:
self.use_local_critic = use_local_critic
if use_state_preprocessor is not None:
self.use_state_preprocessor = use_state_preprocessor
if actor_hiddens is not None:
self.actor_hiddens = actor_hiddens
if actor_hidden_activation is not None:
self.actor_hidden_activation = actor_hidden_activation
if critic_hiddens is not None:
self.critic_hiddens = critic_hiddens
if critic_hidden_activation is not None:
self.critic_hidden_activation = critic_hidden_activation
if n_step is not None:
self.n_step = n_step
if good_policy is not None:
self.good_policy = good_policy
if adv_policy is not None:
self.adv_policy = adv_policy
if replay_buffer_config is not None:
self.replay_buffer_config = replay_buffer_config
if training_intensity is not None:
self.training_intensity = training_intensity
if critic_lr is not None:
self.critic_lr = critic_lr
if actor_lr is not None:
self.actor_lr = actor_lr
if target_network_update_freq is not None:
self.target_network_update_freq = target_network_update_freq
if tau is not None:
self.tau = tau
if actor_feature_reg is not None:
self.actor_feature_reg = actor_feature_reg
if grad_norm_clipping is not None:
self.grad_norm_clipping = grad_norm_clipping
return self
def before_learn_on_batch(multi_agent_batch, policies, train_batch_size):
samples = {}
# Modify keys.
for pid, p in policies.items():
i = p.config["agent_id"]
keys = multi_agent_batch.policy_batches[pid].keys()
keys = ["_".join([k, str(i)]) for k in keys]
samples.update(dict(zip(keys, multi_agent_batch.policy_batches[pid].values())))
# Make ops and feed_dict to get "new_obs" from target action sampler.
new_obs_ph_n = [p.new_obs_ph for p in policies.values()]
new_obs_n = list()
for k, v in samples.items():
if "new_obs" in k:
new_obs_n.append(v)
for i, p in enumerate(policies.values()):
feed_dict = {new_obs_ph_n[i]: new_obs_n[i]}
new_act = p.get_session().run(p.target_act_sampler, feed_dict)
samples.update({"new_actions_%d" % i: new_act})
# Share samples among agents.
policy_batches = {pid: SampleBatch(samples) for pid in policies.keys()}
return MultiAgentBatch(policy_batches, train_batch_size)
class MADDPGTrainer(DQNTrainer):
@classmethod
@override(DQNTrainer)
def get_default_config(cls) -> TrainerConfigDict:
return MADDPGConfig().to_dict()
@override(DQNTrainer)
def validate_config(self, config: TrainerConfigDict) -> None:
"""Adds the `before_learn_on_batch` hook to the config.
This hook is called explicitly prior to TrainOneStep() in the execution
setups for DQN and APEX.
"""
# Call super's validation method.
super().validate_config(config)
def f(batch, workers, config):
policies = dict(
workers.local_worker().foreach_policy_to_train(lambda p, i: (i, p))
)
return before_learn_on_batch(batch, policies, config["train_batch_size"])
config["before_learn_on_batch"] = f
@override(DQNTrainer)
def get_default_policy_class(self, config: TrainerConfigDict) -> Type[Policy]:
return MADDPGTFPolicy
# Deprecated: Use ray.rllib.algorithms.maddpg.MADDPG instead!
class _deprecated_default_config(dict):
def __init__(self):
super().__init__(MADDPGConfig().to_dict())
@Deprecated(
old="ray.rllib.algorithms.maddpg.maddpg.DEFAULT_CONFIG",
new="ray.rllib.algorithms.maddpg.maddpg.MADDPGConfig(...)",
error=False,
)
def __getitem__(self, item):
return super().__getitem__(item)
DEFAULT_CONFIG = _deprecated_default_config()
| [] |
2024-01-10 | Schreezer/RasbberryPi_Assistant | runner.py | import pyaudio
import wave
import openai
import requests
import pygame
# Configure the OpenAI and Eleven Labs APIs
OPENAI_API_KEY = "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"
ELEVEN_API_KEY = "038461a70ead2591a29d02008134f6b8"
openai.api_key = OPENAI_API_KEY
eleven_headers = {"Authorization": f"Bearer {ELEVEN_API_KEY}"}
# Record audio and convert to text
import uuid
import pyaudio
import wave
def record_audio(seconds=5):
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 44100
CHUNK = 1024
RECORD_SECONDS = seconds
audio = pyaudio.PyAudio()
stream = audio.open(format=FORMAT, channels=CHANNELS,
rate=RATE, input=True,
frames_per_buffer=CHUNK)
frames = []
for _ in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = stream.read(CHUNK)
frames.append(data)
stream.stop_stream()
stream.close()
audio.terminate()
filename = f"audio_{uuid.uuid4().hex}.wav"
with wave.open(filename, 'wb') as wave_file:
wave_file.setnchannels(CHANNELS)
wave_file.setsampwidth(audio.get_sample_size(FORMAT))
wave_file.setframerate(RATE)
wave_file.writeframes(b''.join(frames))
return filename
def voice_to_text(audio_file):
audio_file= open(audio_file, "rb")
transcript = openai.Audio.transcribe("whisper-1", audio_file)
text = transcript['text']
return text
def process_text_with_gpt35(text):
model_engine = "text-davinci-003"
response = openai.Completion.create(
engine=model_engine,
prompt=text,
max_tokens=1024,
n=1,
temperature=0.5,
)
result = response.choices[0].text.strip()
return result
import requests
import uuid
def text_to_voice(text):
voice_id = "21m00Tcm4TlvDq8ikWAM"
url = f"https://api.elevenlabs.io/v1/text-to-speech/{voice_id}"
headers = {
"Accept": "application/json",
"xi-api-key": ELEVEN_API_KEY
}
data = {"text": text}
response = requests.post(url, headers=headers, json=data)
# Generate a unique file name using uuid
output_file = f"{uuid.uuid4()}.mp3"
with open(output_file, 'wb') as file:
file.write(response.content)
return output_file
def get_voices():
url = "https://api.elevenlabs.io/v1/voices"
headers = {
"Accept": "application/json",
"xi-api-key": "038461a70ead2591a29d02008134f6b8"
}
response = requests.get(url, headers=headers)
return response.json()
def play_audio(audio_file):
pygame.mixer.init()
pygame.mixer.music.load(audio_file)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
continue
def main():
print("Listening for voice input...")
val = record_audio()
play_audio(val)
print("Converting voice to text...")
text = voice_to_text(val)
print(f"Input text: {text}")
print("Processing text with GPT-3.5...")
output = process_text_with_gpt35(text)
print(f"Output text: {output}")
print("Converting text to voice...")
final = text_to_voice(output)
print("Playing generated voice...")
play_audio(final)
import time
import RPi.GPIO as GPIO
BUTTON_GPIO = 16
GPIO.setmode(GPIO.BCM)
GPIO.setup(BUTTON_GPIO, GPIO.IN, pull_up_down=GPIO.PUD_UP)
pressed = False
while True:
# button is pressed when pin is LOW
if not GPIO.input(BUTTON_GPIO):
if not pressed:
print("Button pressed!")
pressed = True
main()
# button not pressed (or released)
else:
pressed = False
time.sleep(0.1)
| [] |
2024-01-10 | kumarkarna2/voice-assistant | speak.py | """library for text to speech"""
import webbrowser
import os
import smtplib
import datetime
import pyttsx3
import speech_recognition as sr
import wikipedia
import openai
# Initialize the API key
openai.api_key = "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"
def responses(prompt):
"""This function takes the input from user
and returns the response from the API"""
response = openai.Completion.create(
engine="text-davinci-003",
# engine="text-curie-001",
prompt=prompt,
max_tokens=1024,
n=1,
stop=None,
temperature=0.5,
)
message = response.choices[0].text # type: ignore
return message
messages = [
{
"role": "system",
"content": "This is a chatbot that only answer questions related to Karna Kumar Chaudhary. For questions not related to Vivien Chua, reply with Sorry, I do not know.",
},
{"role": "user", "content": "Who is Karna Kumar Chaudhary?"},
{
"role": "assistant",
"content": "Karna Kumar Chaudhary is cse undergraduate student at Jaypee University of Information Technology, Waknaghat, Solan, Himachal Pradesh, India. He is a frontend developer and a competitive programmer. He is also a machine learning and AI enthusiast. He is currently working on a project named virtual assistant using python.",
},
]
def generate_response(prompt):
if prompt:
messages.append({"role": "user", "content": prompt})
chat = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
reply = chat.choices[0].message.content # type: ignore
messages.append({"role": "assistant", "content": reply})
return reply # type: ignore
def sendEmail(to, content):
"""function to send email"""
server = smtplib.SMTP("smtp.gmail.com", 587)
server.ehlo()
server.starttls()
server.login("[email protected]", "password")
server.sendmail("[email protected]", to, content)
server.close()
def speak(audio):
"""simple function to speak the text"""
engine = pyttsx3.init("sapi5")
voices = engine.getProperty("voices")
engine.setProperty("voice", voices[1].id)
engine.say(audio)
engine.runAndWait()
def wish():
"""wish the user"""
# speak("Welcome back sir!")
hour = int(datetime.datetime.now().hour)
if hour >= 4 and hour < 12:
print("Good morning sir!")
speak("Good morning sir!")
elif hour >= 12 and hour < 18:
print("Good afternoon sir!")
speak("Good afternoon sir!")
elif hour >= 18 and hour < 20:
print("Good evening sir!")
speak("Good evening sir!")
else:
print("How can i help you or should i say Good night sir!")
speak("How can i help you or should i say Good night sir!")
def takeCommand():
"""it takes voice input from the user and returns output as text"""
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
r.energy_threshold = 1000
r.adjust_for_ambient_noise(source, duration=1)
try:
print("Recognizing...")
query = r.recognize_google(audio, language="en-in")
print(f"Me : {query}\n")
except Exception: # pylint: disable=broad-except
# print(e)
print("Say that again please...")
return "None"
return query
if __name__ == "__main__":
wish()
while True:
uquery = takeCommand().lower() # type: ignore
if "wikipedia" in uquery:
print("Searching wikipedia...")
speak("Searching wikipedia...")
# replace wikipedia with empty string
uquery = uquery.replace("wikipedia", "")
results = wikipedia.summary(uquery, sentences=2)
print("According to wikipedia")
speak("According to wikipedia")
print(results)
speak(results)
elif "open youtube" in uquery:
print("Opening youtube...")
speak("Opening youtube...")
webbrowser.open("youtube.com")
elif "open google" in uquery:
print("Opening google...")
speak("Opening google...")
webbrowser.open("google.com")
elif "play music" in uquery:
m_dir = "D:\\music"
songs = os.listdir(m_dir)
print("Playing music...")
speak("Playing music...")
os.startfile(os.path.join(m_dir, songs[0]))
elif "time" in uquery:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
print(f"Sir, the time is {strTime}")
speak(f"Sir, the time is {strTime}")
elif "open code" in uquery:
loc = "C:\\Users\\karna\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"
print("Opening visual studio code...")
speak("Opening visual studio code...")
os.startfile(loc)
elif "open gmail" in uquery:
print("Opening gmail...")
speak("Opening gmail...")
webbrowser.open("gmail.com")
elif "email to karna" in uquery:
try:
print("What should i say?")
speak("What should i say?")
content = takeCommand()
to = "[email protected]"
sendEmail(to, content)
print("Email has been sent!")
speak("Email has been sent!")
except Exception: # pylint: disable=broad-except
print("Unable to send email")
speak("Unable to send email")
elif "quit" in uquery:
print("Do you need anything else sir? or should i go for a nap? ")
speak("Do you need anything else sir? or should i go for a nap? ")
qstr = takeCommand().lower() # type: ignore
if "no" in qstr or "nope" in qstr or "nah" in qstr or "no thanks" in qstr:
print("Ok sir, i am going for a nap")
speak("Ok sir, i am going for a nap")
exit()
elif "yes" in qstr or "yeah" in qstr or "sure" in qstr or "yup" in qstr:
print("Ok sir, what can i do for you?")
speak("Ok sir, what can i do for you?")
continue
else:
print("Sorry sir, i didn't get you")
speak("Sorry sir, i didn't get you")
elif "sign out" in uquery or "log out" in uquery or "log off" in uquery:
print("Do you wish to log out your computer ? (yes / no): ")
speak("Do you wish to log out your computer ? (yes / no): ")
logout = takeCommand().lower() # type: ignore
if logout == "no":
exit()
else:
os.system("shutdown /l")
elif "shutdown" in uquery:
print("Do you wish to shut down your computer ? (yes / no): ")
speak("Do you wish to shut down your computer ? (yes / no): ")
shutdown = takeCommand().lower() # type: ignore
if shutdown == "no":
exit()
else:
os.system("shutdown /s /t 1")
elif "restart" in uquery:
print("Do you wish to restart your computer ? (yes / no): ")
speak("Do you wish to restart your computer ? (yes / no): ")
res = takeCommand().lower() # type: ignore
if res == "no":
exit()
else:
os.system("shutdown /r /t 1")
elif "sleep" in uquery:
print("Do you wish to put your computer to sleep ? (yes / no): ")
speak("Do you wish to put your computer to sleep ? (yes / no): ")
sleep = takeCommand().lower() # type: ignore
if sleep == "no":
exit()
else:
os.system("rundll32.exe powrprof.dll,SetSuspendState 0,1,0")
else:
print("Searching...")
speak("Searching...")
prompt = uquery
# message = responses(prompt)
message = generate_response(prompt)
print(message)
speak(message)
| [
"Karna Kumar Chaudhary is cse undergraduate student at Jaypee University of Information Technology, Waknaghat, Solan, Himachal Pradesh, India. He is a frontend developer and a competitive programmer. He is also a machine learning and AI enthusiast. He is currently working on a project named virtual assistant using python.",
"Who is Karna Kumar Chaudhary?",
"This is a chatbot that only answer questions related to Karna Kumar Chaudhary. For questions not related to Vivien Chua, reply with Sorry, I do not know."
] |
2024-01-10 | wolfganghuse/nai-infra | knative-eventing~app~src~doc-ingest~KserveML.py | from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra
INSTRUCTION_KEY = "### Instruction:"
RESPONSE_KEY = "### Response:"
INTRO_BLURB = (
"Below is an instruction that describes a task. "
"Write a response that appropriately completes the request."
)
PROMPT_FOR_GENERATION_FORMAT = """{intro}
{instruction_key}
{instruction}
{response_key}
""".format(
intro=INTRO_BLURB,
instruction_key=INSTRUCTION_KEY,
instruction="{instruction}",
response_key=RESPONSE_KEY,
)
class KserveML(LLM):
"""KserveML LLM service.
Example:
.. code-block:: python
from langchain.llms import KserveML
endpoint_url = (
"https://models.hosted-on.kserve.hosting/mpt-7b-instruct/v1/predict"
)
kserve_llm = KserveML(endpoint_url=endpoint_url)
"""
endpoint_url: str = (
"https://models.hosted-on.kserve.hosting/mpt-7b-instruct/v1/predict"
)
"""Endpoint URL to use."""
inject_instruction_format: bool = False
"""Whether to inject the instruction format into the prompt."""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
retry_sleep: float = 1.0
"""How long to try sleeping for if a rate limit is encountered"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "kserve"
def _transform_prompt(self, prompt: str) -> str:
"""Transform prompt."""
if self.inject_instruction_format:
prompt = PROMPT_FOR_GENERATION_FORMAT.format(
instruction=prompt,
)
return prompt
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
is_retry: bool = False,
**kwargs: Any,
) -> str:
"""Call out to a KserveML LLM inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = kserve_llm("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
prompt = self._transform_prompt(prompt)
payload = {
"id": "1",
"inputs": [
{
"name": "input0",
"shape": [-1],
"datatype": "BYTES",
"data": [prompt]
}
]
}
payload.update(_model_kwargs)
payload.update(kwargs)
#print(f"Sending request to {self.endpoint_url} with payload: {payload}") # Debug output
# send request
try:
response = requests.post(self.endpoint_url, json=payload)
#print(f"Response received: {response.text}") # Debug output
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
try:
if response.status_code == 429:
if not is_retry:
import time
time.sleep(self.retry_sleep)
return self._call(prompt, stop, run_manager, is_retry=True)
raise ValueError(
f"Error raised by inference API: rate limit exceeded.\nResponse: "
f"{response.text}"
)
parsed_response = response.json()
text = parsed_response["outputs"][0]["data"][0]
if text.startswith(prompt):
text = text[len(prompt):]
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised by inference API: {e}.\nResponse: {response.text}"
)
# TODO: replace when MosaicML supports custom stop tokens natively
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
| [
"PLACEHOLDER\n### Instruction:\n{instruction}\n### Response:\n"
] |
2024-01-10 | wolfganghuse/nai-infra | knative-eventing~app~src~fn~KserveML.py | from typing import Any, Dict, List, Mapping, Optional
import requests
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra
INSTRUCTION_KEY = "### Instruction:"
RESPONSE_KEY = "### Response:"
INTRO_BLURB = (
"Below is an instruction that describes a task. "
"Write a response that appropriately completes the request."
)
PROMPT_FOR_GENERATION_FORMAT = """{intro}
{instruction_key}
{instruction}
{response_key}
""".format(
intro=INTRO_BLURB,
instruction_key=INSTRUCTION_KEY,
instruction="{instruction}",
response_key=RESPONSE_KEY,
)
class KserveML(LLM):
"""KserveML LLM service.
Example:
.. code-block:: python
from langchain.llms import KserveML
endpoint_url = (
"https://models.hosted-on.kserve.hosting/mpt-7b-instruct/v1/predict"
)
kserve_llm = KserveML(endpoint_url=endpoint_url)
"""
endpoint_url: str = (
"https://models.hosted-on.kserve.hosting/mpt-7b-instruct/v1/predict"
)
"""Endpoint URL to use."""
inject_instruction_format: bool = False
"""Whether to inject the instruction format into the prompt."""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
retry_sleep: float = 1.0
"""How long to try sleeping for if a rate limit is encountered"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"endpoint_url": self.endpoint_url},
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "kserve"
def _transform_prompt(self, prompt: str) -> str:
"""Transform prompt."""
if self.inject_instruction_format:
prompt = PROMPT_FOR_GENERATION_FORMAT.format(
instruction=prompt,
)
return prompt
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
is_retry: bool = False,
**kwargs: Any,
) -> str:
"""Call out to a KserveML LLM inference endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = kserve_llm("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
prompt = self._transform_prompt(prompt)
payload = {
"id": "1",
"inputs": [
{
"name": "input0",
"shape": [-1],
"datatype": "BYTES",
"data": [prompt]
}
]
}
payload.update(_model_kwargs)
payload.update(kwargs)
#print(f"Sending request to {self.endpoint_url} with payload: {payload}") # Debug output
# send request
try:
response = requests.post(self.endpoint_url, json=payload)
#print(f"Response received: {response.text}") # Debug output
except requests.exceptions.RequestException as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
try:
if response.status_code == 429:
if not is_retry:
import time
time.sleep(self.retry_sleep)
return self._call(prompt, stop, run_manager, is_retry=True)
raise ValueError(
f"Error raised by inference API: rate limit exceeded.\nResponse: "
f"{response.text}"
)
parsed_response = response.json()
text = parsed_response["outputs"][0]["data"][0]
if text.startswith(prompt):
text = text[len(prompt):]
except requests.exceptions.JSONDecodeError as e:
raise ValueError(
f"Error raised by inference API: {e}.\nResponse: {response.text}"
)
# TODO: replace when MosaicML supports custom stop tokens natively
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text | [
"PLACEHOLDER\n### Instruction:\n{instruction}\n### Response:\n"
] |
2024-01-10 | shaham-noorani/spotifind | src~make_recommendations.py | import openai
import pandas as pd
import json
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
def read_data():
liked_songs = pd.read_csv("data/liked_songs.tsv", sep="\t").to_dict("records")
artist_genres = json.load(open("data/artist_genres.json"))
return liked_songs, artist_genres
def get_song_analysis(song, artist_genres):
genres = "/".join(artist_genres[song["Main Artist ID"]])
return f"{song['Song Title']} by {song['Main Artist']} is a {genres} song released in {song['Release Date']}"
import concurrent.futures
def search_song_batch(song_batch, query, artist_genres):
matching_songs = []
search_queries = []
for song in song_batch:
song_analysis = get_song_analysis(song, artist_genres)
search_queries.append(song_analysis)
search_query = "\n".join(search_queries)
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{
"role": "user",
"content": "I'm going to give you 50 songs and some information about each.",
},
{
"role": "user",
"content": "I would like for you to tell me if each song strongly matches this query and err on the side of 'no':"
+ query,
},
{
"role": "user",
"content": "Format your response at {song name} - {yes / no}",
},
{
"role": "user",
"content": search_query,
},
],
temperature=0,
max_tokens=500,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
for j, answer in enumerate(response.choices[0].message.content.lower().split("\n")):
if "yes" in answer:
matching_songs.append(song_batch[j])
return matching_songs
def search_songs(query):
matching_songs = []
liked_songs, artist_genres = read_data()
# Create batches of 50 songs
song_batches = [liked_songs[i : i + 50] for i in range(0, len(liked_songs), 50)]
# Use ThreadPoolExecutor to make 10 requests of 50 songs at once
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
future_to_batch = {
executor.submit(search_song_batch, batch, query, artist_genres): batch
for batch in song_batches
}
for future in concurrent.futures.as_completed(future_to_batch):
batch = future_to_batch[future]
try:
matching_songs.extend(future.result())
except Exception as exc:
print(f"An exception occurred while processing batch {batch}: {exc}")
return matching_songs
| [
"I would like for you to tell me if each song strongly matches this query and err on the side of 'no':PLACEHOLDER",
"Format your response at {song name} - {yes / no}",
"I'm going to give you 50 songs and some information about each."
] |
2024-01-10 | akanshakhandelwal/AzureOpenAI--FinancialSummarizer | customscore.py |
import os
from langchain.llms import OpenAI
import streamlit as st
import os
import langchain
import pypdf
import unstructured
import utils
from langchain.document_loaders import MergedDataLoader
import pinecone
from langchain.vectorstores import Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
import json
from pprint import pprint
PROMPTS = {
'credit_ratings': "Extract credit ratings from the document of {}. Score 1-10 based on AAA=10 to D=1 scale. If there is no mention of credit rating, give it a score of 0",
'debt_to_equity': "Calculate the debt-to-equity ratio from the balance sheet in document of {}. Score: <0.5=10, 0.5-1=8, 1-1.5=6, >1.5=4.",
'interest_coverage': "Calculate the interest coverage ratio from the financials in document of {}. Score: >5=10, 3-5=7, 1-3=4, <1=2. ",
'liquidity_ratio': "Calculate the liquidity ratio in document for {}. Score: >2=10, 1.5-2=8, 1-1.5=6, <1=4.",
'profit_margin': "Calculate the profit margin in document for {}. Score: >20%=10, 15-20%=8, 10-15%=6, <10%=4.",
'revenue_growth': "Calculate the revenue growth rate in document for {}. Score: >15%=10, 10-15%=8, 5-10%=6, <5%=4.",
'management_quality': "Assess the management quality in document of {}. Score: Excellent=10, Good=8, Average=6, Poor=4.",
'legal_compliance': "Assess the legal compliance of {} based on pdocument. Score: Excellent=10, Good=8, Average=6, Poor=4."
}
class RiskScore:
def __init__(self):
self.total_score = 0
self.total_weight = 0
def api_call(self, prompt,company_name):
vector_store=utils.get_azure_vector_store()
prompt = prompt.format(company_name)
response = utils.ask_and_get_answer(vector_store, prompt)
result_str = response['result']
# Replace single quotes with double quotes to make it a valid JSON string
# result_str = result_str.replace("'", '"')
print(result_str)
# Convert the JSON string to a Python dictionary
result_dict = json.loads(result_str)
print(result_str)
# Extract the score from the dictionary
score = result_dict['score']
explanation = result_dict['explanation']
# pprint(f"Score: {score}, Type: {type(score)}")
# pprint(f"Score: {explanation}")
return score,explanation
def credit_ratings(self, company_name,weight):
prompt = PROMPTS["credit_ratings"]
score,explanation = self.api_call(prompt,company_name)
self.total_score += score * (weight if score != -1 else 0)
self.total_weight += weight if score != -1 else 0
return score,explanation
def debt_to_equity(self, company_name,weight):
prompt = PROMPTS["debt_to_equity"]
score,explanation = self.api_call(prompt,company_name)
self.total_score += score * (weight if score != -1 else 0)
self.total_weight += weight if score != -1 else 0
return score,explanation
def interest_coverage(self, company_name,weight):
prompt = PROMPTS["interest_coverage"]
score,explanation = self.api_call(prompt,company_name)
self.total_score += score * (weight if score != -1 else 0)
self.total_weight += weight if score != -1 else 0
return score,explanation
def liquidity_ratio(self,company_name, weight):
prompt = PROMPTS["liquidity_ratio"]
score,explanation = self.api_call(prompt,company_name)
self.total_score += score * (weight if score != -1 else 0)
self.total_weight += weight if score != -1 else 0
return score,explanation
def profit_margin(self,company_name, weight):
prompt = PROMPTS["profit_margin"]
score,explanation = self.api_call(prompt,company_name)
self.total_score += score * (weight if score != -1 else 0)
self.total_weight += weight if score != -1 else 0
return score,explanation
def revenue_growth(self, company_name,weight):
prompt = PROMPTS["revenue_growth"]
score,explanation = self.api_call(prompt,company_name)
self.total_score += score * (weight if score != -1 else 0)
self.total_weight += weight if score != -1 else 0
return score,explanation
def legal_compliance(self, company_name,weight):
prompt = PROMPTS["legal_compliance"]
score,explanation = self.api_call(prompt,company_name)
self.total_score += score * (weight if score != -1 else 0)
self.total_weight += weight if score != -1 else 0
return score,explanation
def management_quality(self, company_name,weight):
prompt = PROMPTS["management_quality"]
score,explanation = self.api_call(prompt,company_name)
self.total_score += score * (weight if score != -1 else 0)
self.total_weight += weight if score != -1 else 0
return score,explanation
def calculate_overall_risk_score(self):
if self.total_weight == 0:
return -1 # Handling the case where all weights are 0
return self.total_score / self.total_weight
| [
"interest_coverage",
"{'credit_ratings': 'Extract credit ratings from the document of {}. Score 1-10 based on AAA=10 to D=1 scale. If there is no mention of credit rating, give it a score of 0', 'debt_to_equity': 'Calculate the debt-to-equity ratio from the balance sheet in document of {}. Score: <0.5=10, 0.5-1=8, 1-1.5=6, >1.5=4.', 'interest_coverage': 'Calculate the interest coverage ratio from the financials in document of {}. Score: >5=10, 3-5=7, 1-3=4, <1=2. ', 'liquidity_ratio': 'Calculate the liquidity ratio in document for {}. Score: >2=10, 1.5-2=8, 1-1.5=6, <1=4.', 'profit_margin': 'Calculate the profit margin in document for {}. Score: >20%=10, 15-20%=8, 10-15%=6, <10%=4.', 'revenue_growth': 'Calculate the revenue growth rate in document for {}. Score: >15%=10, 10-15%=8, 5-10%=6, <5%=4.', 'management_quality': 'Assess the management quality in document of {}. Score: Excellent=10, Good=8, Average=6, Poor=4.', 'legal_compliance': 'Assess the legal compliance of {} based on pdocument. Score: Excellent=10, Good=8, Average=6, Poor=4.'}",
"profit_margin",
"credit_ratings",
"legal_compliance",
"debt_to_equity",
"liquidity_ratio",
"management_quality",
"revenue_growth"
] |
2024-01-10 | WhimsicalWill/Chatmosphere | backend~segway.py | from langchain.chains import LLMChain
from langchain import PromptTemplate, FewShotPromptTemplate
class TopicSegway:
"""
A class that uses a language model to generate engaging responses to a given query,
in the context of a series of topic names.
Attributes:
llm (OpenAI): Language model to generate responses.
chain (LLMChain): LLMChain instance to help structure and generate responses.
few_shot_prompt (FewShotPromptTemplate): Few-shot prompt to guide the language model.
"""
def __init__(self, llm):
"""
The constructor for TopicSegway class.
"""
self.configurePrompt()
self.chain = LLMChain(llm=llm, prompt=self.few_shot_prompt)
def configurePrompt(self):
"""
Configures the few-shot prompt to be used by the language model.
Sets up the few-shot prompt with examples and structure.
"""
example_1 = {
"query": "How will technology shape the future?",
"topic1": "How is artificial intelligence impacting our daily lives?",
"topic2": "What do you think about the future of cryptocurrency?",
"answer": "You might enjoy discussing how AI technology will fit into our future.\n" \
"You could explore the lasting impact of cryptocurrency.\n"
}
example_2 = {
"query": "What are the impacts of climate change?",
"topic1": "How does climate change affect wildlife?",
"topic2": "What are the economic consequences of climate change?",
"answer": "You might find it interesting to discuss how climate change is affecting wildlife.\n" \
"You might enjoy conversing about how climate change will affect the economy.\n"
}
examples = [example_1, example_2]
template = """
Query: {query}
Topic 1: {topic1}
Topic 2: {topic2}
Answer: {answer}
"""
# Define the structure of the prompt with input variables and template
example_prompt = PromptTemplate(
input_variables=["query", "topic1", "topic2", "answer"],
template=template,
)
# Define the prefix for the prompt, giving clear instructions on how to construct an engaging response
prompt_prefix = "Given the user's query, suggest two topics of discussion. For each topic, " \
"craft an intriguing line explaining why the topic could be of interest to the user. " \
"Make sure that you give the user a logical reason why they may be interested in the topics. " \
"Please put a new line between each topic suggestion, since your response will be invalid without this. " \
"Here are some examples:\n"
prompt_suffix = """
Query: {query}
Topic 1: {topic1}
Topic 2: {topic2}
Answer:"""
# Generate the few-shot prompt with the provided examples and structure
self.few_shot_prompt = FewShotPromptTemplate(
examples=examples,
example_prompt=example_prompt,
prefix=prompt_prefix,
suffix=prompt_suffix,
input_variables=["query", "topic1", "topic2"],
example_separator="\n",
)
print("Set up few shot prompt")
def getResponse(self, query, topics):
"""
Generates a response to a given query in the context of a series of topic names.
Parameters:
query (str): The query to generate a response for.
topics (list): A list of topic dictionaries with the keys 'topicName', 'topicID', and 'userID'.
Returns:
str: The generated response to the query.
"""
print(f"Generating response for query {query}")
assert len(topics) == 2, f"Must provide two topics, not {len(topics)}. Topics: {topics}"
# Assuming topics is a list of three topicNames
input = {
"query": query,
"topic1": topics[0]['topicName'],
"topic2": topics[1]['topicName'],
}
print("Input:", input)
response = self.chain.run(input)
print("Response:", response)
return response | [
"\n Query: {query}\n Topic 1: {topic1}\n Topic 2: {topic2}\n Answer:",
"Given the user's query, suggest two topics of discussion. For each topic, craft an intriguing line explaining why the topic could be of interest to the user. Make sure that you give the user a logical reason why they may be interested in the topics. Please put a new line between each topic suggestion, since your response will be invalid without this. Here are some examples:\n",
"\n Query: {query}\n Topic 1: {topic1}\n Topic 2: {topic2}\n Answer: {answer}\n ",
"answer"
] |
2024-01-10 | WhimsicalWill/Chatmosphere | backend~matching.py | import numpy as np
import faiss
from openai.embeddings_utils import get_embedding
from query import AsymmetricQueryHelper
class TopicMatcher:
"""
A class that searches a vector database for the topics that are most
semantically similar to the given query.
Attributes:
k (int): Number of similar topics to find.
engine (str): The name of the embedding engine to use.
topicInfo (list): List of topic info (topicID, userID, topicName).
embeddings (list): List of embeddings for each topic.
index (faiss.Index): Index for searching embeddings.
"""
def __init__(self, llm, k=2, engine='text-embedding-ada-002'):
"""
The constructor for TopicMatcher class.
Parameters:
k (int): Number of similar topics to find. Default is 2.
engine (str): The name of the embedding engine to use. Default is 'text-embedding-ada-002'.
"""
self.llm = llm
self.k = k
self.engine = engine
self.topicInfo = []
self.embeddings = []
self.index = None
self.queryHelper = AsymmetricQueryHelper(llm)
def addTopics(self, topicTuples):
"""
Adds a list of topics to the matcher.
Parameters:
topicTuples (list): A list of tuples where each tuple contains a user ID and a topic title.
"""
for info in topicTuples:
topicID, _, title = info
if title == "Brainstorm": # skip Brainstorm chats
continue
self.topicInfo.append(info)
self.embeddings.append(get_embedding(title, engine=self.engine))
print(f"Added topic {topicID}")
self.buildIndex()
def addTopic(self, topicID, userID, title):
"""
Adds a single topic to the matcher.
Parameters:
userID (str): The user ID associated with the topic.
title (str): The title of the topic.
"""
if title == "Brainstorm": # skip Brainstorm chats
return
self.topicInfo.append((topicID, userID, title))
self.embeddings.append(get_embedding(title, engine=self.engine))
self.buildIndex()
def buildIndex(self):
"""
Builds the FAISS index from the current list of embeddings.
"""
embeddings = np.array(self.embeddings).astype('float32')
d = embeddings.shape[1]
self.index = faiss.IndexFlatL2(d)
self.index.add(embeddings)
def searchIndexWithQuery(self, embedding, userID, k, selectedTopicIDs=None):
"""
Retrieves the most similar topics to the provided query.
Parameters:
embedding (np.array): The embedding used to search the vector store.
userID (str): The ID of the user making the query.
k (int): The number of similar topics to return.
Returns:
list: A list of dictionaries, each containing the topic name, topic ID, and user ID for a similar topic.
"""
D, I = self.index.search(embedding, 6*k)
res = []
for idx, score in zip(I[0], D[0]):
topicID, userCreatorID, title = self.topicInfo[idx]
print('Search results: ', topicID, userCreatorID, title)
if selectedTopicIDs and topicID in selectedTopicIDs:
continue
if userCreatorID == userID:
continue
print(f"Topic {topicID} has score {score}. \nTopic: {title}\n")
res.append({
"topicName": title,
"topicID": topicID,
"userID": userCreatorID
})
if len(res) == k:
break
return res
def getSimilarTopics(self, query, userID):
"""
Retrieves the most similar topics to the provided query.
Parameters:
query (str): The query to find similar topics for.
userID (str): The ID of the user making the query.
Returns:
list: A list of dictionaries, each containing the topic name, topic ID, and user ID for a similar topic.
"""
queryEmbedding = get_embedding(query, engine=self.engine)
queryEmbedding = np.array([queryEmbedding]).astype('float32')
originalResults = self.searchIndexWithQuery(queryEmbedding, userID, self.k)
return originalResults
# TODO: profile the timing of altnerate queries, and implement them efficiently
# alternateQueries = self.queryHelper.getAlternateQuery(query, numAlternates=5)
# alternateEmbeddings = [get_embedding(alternateQuery, engine=self.engine) for alternateQuery in alternateQueries]
# alternateEmbeddings = np.array(alternateEmbeddings).astype('float32')
# numDesiredOriginal = self.k // 2
# numDesiredAlternate = self.k - numDesiredOriginal
# originalResults = self.searchIndexWithQuery(queryEmbedding, userID, numDesiredOriginal)
# selectedTopics = set([topic['topicID'] for topic in originalResults])
# print(f"Alternate queries: {alternateQueries}\n")
# alternateResults = self.searchIndexWithQuery(alternateEmbeddings, userID, numDesiredAlternate, selectedTopics)
# return originalResults + alternateResults | [] |
2024-01-10 | mikirinkode/ai-assistant-with-gpt-and-semantic-kernel | miriko.py | from openai import OpenAI
import semantic_kernel as sk
from semantic_kernel.connectors.ai.open_ai import OpenAIChatCompletion
class Miriko:
def __init__(self, openai_model, api_key, org_id):
# initialize Miriko
self.name = "Miriko"
self.memories = [
{"role": "system", "content": "You are Miriko, A personal AI Assistant that helps User daily tasks."}
]
# initialize OpenAI client
self.openai_client = OpenAI(api_key=api_key)
self.openai_model = openai_model
# Initialize kernel
self.kernel = sk.Kernel()
chat_service = OpenAIChatCompletion(openai_model, api_key, org_id)
# Register chat service
self.kernel.add_chat_service("OpenAI_chat_gpt", chat_service)
# import created skill
self.skill = self.kernel.import_semantic_skill_from_directory("./skills", "MirikoSkill")
self.brainstormer = self.skill["ExpertBrainstorming"]
self.summarizer = self.skill["Summarizer"]
def use_skill(self, skill_name, prompt):
if skill_name == "Expert Brainstorming":
return self.brainstormer(prompt)
elif skill_name == "Summarizer":
return self.summarizer(prompt)
def chat(self, prompt):
# add prompt to memory so miriko can remember it
self.memories.append({"role": "user", "content": prompt})
result = self.openai_client.chat.completions.create(
model = self.openai_model,
messages= self.memories,
stream=True,
)
# system_content = {"role": "system", "content": result.choices[0].delta.content}
# self.memories.append(system_content)
# return result
return result
def get_all_miriko_memory(self):
return self.memories | [
"You are Miriko, A personal AI Assistant that helps User daily tasks."
] |
2024-01-10 | iMaguz/ludwig | ludwig~encoders~text_encoders.py | #! /usr/bin/env python
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import inspect
import logging
from typing import Any, Callable, Dict, List, Optional, Type, TYPE_CHECKING, TypeVar, Union
import numpy as np
import torch
from torch import nn
from ludwig.api_annotations import DeveloperAPI
from ludwig.constants import TEXT
from ludwig.encoders.base import Encoder
from ludwig.encoders.registry import register_encoder
from ludwig.modules.reduction_modules import SequenceReducer
from ludwig.schema.encoders.sequence_encoders import SequenceEncoderConfig
from ludwig.schema.encoders.text_encoders import (
ALBERTConfig,
AutoTransformerConfig,
BERTConfig,
CamemBERTConfig,
CTRLConfig,
DebertaV2Config,
DistilBERTConfig,
ELECTRAConfig,
FlauBERTConfig,
GPT2Config,
GPTConfig,
LongformerConfig,
MT5Config,
RoBERTaConfig,
T5Config,
TfIdfEncoderConfig,
TransformerXLConfig,
XLMConfig,
XLMRoBERTaConfig,
XLNetConfig,
)
from ludwig.schema.llms.peft import BaseAdapterConfig
from ludwig.utils.hf_utils import load_pretrained_hf_model_with_hub_fallback
from ludwig.utils.torch_utils import FreezeModule
if TYPE_CHECKING:
from transformers import PretrainedConfig, PreTrainedModel
from ludwig.schema.encoders.text_encoders import HFEncoderConfig
logger = logging.getLogger(__name__)
def _cls_pooled_error_message(encoder: str):
# TODO(Arnav): Remove this once we have reduce_output options set for
# each encoder type in the schema
raise ValueError(f"reduce_output cannot be cls_pooled for {encoder}")
class HFTextEncoder(Encoder):
def _init_config(self, transformer, schema_keys: List[str], encoder_config: SequenceEncoderConfig):
"""Creates a config object for the encoder using the transformer model and the passed-in encoder config.
The transformer's config is only known after it is instantiated, so we must update the
encoder config with the values from the transformer config.
Args:
transformer: The transformer model.
schema_keys: The keys in the encoder config schema. We only want to update the encoder config
with the values from the transformer config that are in the schema.
encoder_config: The existing encoder config containing defaults and user-specified values.
If the values in this config differ from the transformer's config, the transformer's config
values will override this config's values.
Returns:
A new encoder config object with the updated values from the transformer config.
"""
transformer_config = transformer.config.to_dict()
final_hf_config_params = {k: v for k, v in transformer_config.items() if k in schema_keys}
encoder_config_dict = encoder_config.to_dict()
encoder_config_dict.update(final_hf_config_params)
return self.get_schema_cls().from_dict(encoder_config_dict)
def _init_transformer_from_scratch(
self, hf_model_cls: Type, hf_config_cls: Type, hf_config_params: Dict[str, Any], vocab_size: int
):
"""Initializes the transformer model from scratch. This is in contrast to loading a pre-trained model.
Args:
hf_model_cls: The HuggingFace model class.
hf_config_cls: The HuggingFace config class.
hf_config_params: The HuggingFace config parameters exposed through the Ludwig schema.
vocab_size: The vocab size of the dataset. Because we are training from scratch, we can resize the
token embeddings table freely.
Returns:
The transformer model.
"""
config = hf_config_cls(**hf_config_params)
transformer = hf_model_cls(config)
self._maybe_resize_token_embeddings(transformer, vocab_size)
return transformer
def _maybe_resize_token_embeddings(self, transformer, vocab_size: int):
"""Resizes the token embeddings if the vocab size is different from the transformer's vocab size.
This should only happen if we are instantiating a model from scratch (i.e. not loading from a pretrained model
or checkpoint). Pretrained models update the vocab size stored in the config. This means if we are loading a
pretrained model from a checkpoint, the config vocab size should match the model's vocab size.
It is important that pretrained models update the vocab size stored in the config because sometimes the
pretrained models will have an embeddings table that is a different size than the vocab size. Examples:
CamemBERT: https://github.com/huggingface/tokenizers/issues/900#issue-1122256698
T5: https://github.com/huggingface/transformers/issues/4875#issue-635471552
Args:
transformer: The transformer model.
vocab_size: The vocab size of the dataset.
"""
if vocab_size != transformer.config.vocab_size:
transformer.resize_token_embeddings(vocab_size)
def _wrap_transformer(self, transformer: nn.Module, adapter: Optional[BaseAdapterConfig], trainable: bool):
if adapter is not None:
from peft import get_peft_model
peft_config = adapter.to_config()
transformer = get_peft_model(transformer, peft_config)
logger.info("==================================================")
logger.info("Trainable Parameter Summary For Fine-Tuning:")
transformer.print_trainable_parameters()
logger.info("==================================================")
return FreezeModule(transformer, frozen=not trainable)
def get_embedding_layer(self) -> nn.Module:
return next(self.transformer.module.children())
HFModelT = TypeVar("HFModelT", bound="PreTrainedModel")
HFConfigT = TypeVar("HFConfigT", bound="PretrainedConfig")
ConfigT = TypeVar("ConfigT", bound="HFEncoderConfig")
class HFTextEncoderImpl(HFTextEncoder):
def __init__(
self,
model_cls: Type[HFModelT],
config_cls: Type[HFConfigT],
schema_cls: Type[ConfigT],
max_sequence_length: int,
use_pretrained: bool,
pretrained_model_name_or_path: str,
saved_weights_in_checkpoint: bool,
reduce_output: str,
trainable: bool,
adapter: Optional[BaseAdapterConfig],
pretrained_kwargs: Dict,
encoder_config: Optional[ConfigT],
**kwargs,
):
super().__init__()
# TODO(travis): get_hf_config_param_names should be implemented as abstract in HFEncoderConfig
vocab_size = kwargs["vocab_size"]
hf_config_params = {k: v for k, v in kwargs.items() if k in schema_cls.get_hf_config_param_names()}
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
transformer, _ = load_pretrained_hf_model_with_hub_fallback(
model_cls, pretrained_model_name_or_path, **pretrained_kwargs
)
else:
transformer = self._init_transformer_from_scratch(model_cls, config_cls, hf_config_params, vocab_size)
if encoder_config is not None:
self.config = self._init_config(transformer, hf_config_params.keys(), encoder_config)
else:
self.config = None
self.reduce_output = reduce_output
if not self.reduce_output == "cls_pooled":
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.transformer = self._wrap_transformer(transformer, adapter, trainable)
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer.module(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
if self.reduce_output == "cls_pooled":
hidden = transformer_outputs["pooler_output"]
else:
hidden = transformer_outputs["last_hidden_state"][:, 1:-1, :] # bos + [sent] + sep
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
return torch.Size([self.max_sequence_length - 2, self.transformer.module.config.hidden_size])
if self.reduce_output == "concat":
return torch.Size(
[
(self.max_sequence_length - 2) * self.transformer.module.config.hidden_size,
]
)
elif self.reduce_output == "concat":
# add the -2 to account of start and end tokens.
return torch.Size([self.transformer.module.config.hidden_size * (self.max_sequence_length - 2)])
return torch.Size([self.transformer.module.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@DeveloperAPI
@register_encoder("albert", TEXT)
class ALBERTEncoder(HFTextEncoder):
DEFAULT_MODEL_NAME = "albert-base-v2"
def __init__(
self,
max_sequence_length,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = DEFAULT_MODEL_NAME,
saved_weights_in_checkpoint: bool = False,
trainable: bool = False,
adapter: Optional[BaseAdapterConfig] = None,
reduce_output: str = "cls_pooled",
vocab_size: int = 30000,
embedding_size: int = 128,
hidden_size: int = 4096,
num_hidden_layers: int = 12,
num_hidden_groups: int = 1,
num_attention_heads: int = 64,
intermediate_size: int = 16384,
inner_group_num: int = 1,
hidden_act: str = "gelu_new",
hidden_dropout_prob: float = 0,
attention_probs_dropout_prob: float = 0,
max_position_embeddings: int = 512,
type_vocab_size: int = 2,
initializer_range: float = 0.02,
layer_norm_eps: float = 1e-12,
classifier_dropout_prob: float = 0.1,
position_embedding_type: str = "absolute",
pad_token_id: int = 0,
bos_token_id: int = 2,
eos_token_id: int = 3,
pretrained_kwargs: Dict = None,
encoder_config=None,
**kwargs,
):
super().__init__()
from transformers import AlbertConfig, AlbertModel
hf_config_params = dict(
vocab_size=vocab_size,
embedding_size=embedding_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_hidden_groups=num_hidden_groups,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
inner_group_num=inner_group_num,
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
classifier_dropout_prob=classifier_dropout_prob,
position_embedding_type=position_embedding_type,
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
transformer, _ = load_pretrained_hf_model_with_hub_fallback(
AlbertModel, pretrained_model_name_or_path, **pretrained_kwargs
)
else:
transformer = self._init_transformer_from_scratch(AlbertModel, AlbertConfig, hf_config_params, vocab_size)
if encoder_config is not None:
self.config = self._init_config(transformer, hf_config_params.keys(), encoder_config)
else:
self.config = None
self.reduce_output = reduce_output
if not self.reduce_output == "cls_pooled":
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.transformer = self._wrap_transformer(transformer, adapter, trainable)
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer.module(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
if self.reduce_output == "cls_pooled":
hidden = transformer_outputs[1]
else:
hidden = transformer_outputs[0][:, 1:-1, :]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@staticmethod
def get_schema_cls():
return ALBERTConfig
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by BERT tokenizer.
return torch.Size(
[
self.max_sequence_length - 2,
self.transformer.module.config.hidden_size,
]
)
elif self.reduce_output == "concat":
# add the -2 to account of start and end tokens.
return torch.Size([self.transformer.module.config.hidden_size * (self.max_sequence_length - 2)])
return torch.Size([self.transformer.module.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@DeveloperAPI
@register_encoder("mt5", TEXT)
class MT5Encoder(HFTextEncoder):
DEFAULT_MODEL_NAME = "google/mt5-base"
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = DEFAULT_MODEL_NAME,
saved_weights_in_checkpoint: bool = False,
trainable: bool = False,
adapter: Optional[BaseAdapterConfig] = None,
reduce_output: str = "sum",
vocab_size: int = 250112,
d_model: int = 512,
d_kv: int = 64,
d_ff: int = 1024,
num_layers: int = 8,
num_decoder_layers: int = None,
num_heads: int = 6,
relative_attention_num_buckets: int = 32,
dropout_rate: float = 0.1,
layer_norm_epsilon: float = 1e-06,
initializer_factor: float = 1.0,
feed_forward_proj: str = "gated-gelu",
is_encoder_decoder: bool = True,
use_cache: bool = True,
tokenizer_class: str = "T5Tokenizer",
tie_word_embeddings: bool = False,
pad_token_id: int = 0,
eos_token_id: int = 1,
decoder_start_token_id: int = 0,
pretrained_kwargs: Dict = None,
encoder_config=None,
**kwargs,
):
super().__init__()
from transformers import MT5Config, MT5EncoderModel
hf_config_params = dict(
vocab_size=vocab_size,
d_model=d_model,
d_kv=d_kv,
d_ff=d_ff,
num_layers=num_layers,
num_decoder_layers=num_decoder_layers,
num_heads=num_heads,
relative_attention_num_buckets=relative_attention_num_buckets,
dropout_rate=dropout_rate,
layer_norm_epsilon=layer_norm_epsilon,
initializer_factor=initializer_factor,
feed_forward_proj=feed_forward_proj,
is_encoder_decoder=is_encoder_decoder,
use_cache=use_cache,
tokenizer_class=tokenizer_class,
tie_word_embeddings=tie_word_embeddings,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
decoder_start_token_id=decoder_start_token_id,
)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
transformer, _ = load_pretrained_hf_model_with_hub_fallback(
MT5EncoderModel, pretrained_model_name_or_path, **pretrained_kwargs
)
else:
transformer = self._init_transformer_from_scratch(MT5EncoderModel, MT5Config, hf_config_params, vocab_size)
if encoder_config is not None:
self.config = self._init_config(transformer, hf_config_params.keys(), encoder_config)
else:
self.config = None
self.reduce_output = reduce_output
if reduce_output == "cls_pooled":
_cls_pooled_error_message(self.__class__.__name__)
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.transformer = self._wrap_transformer(transformer, adapter, trainable)
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer.module(
input_ids=inputs,
attention_mask=mask,
)
hidden = transformer_outputs[0][:, 1:-1, :]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@staticmethod
def get_schema_cls():
return MT5Config
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by MT5 tokenizer.
return torch.Size(
[
self.max_sequence_length - 2,
self.transformer.module.config.hidden_size,
]
)
elif self.reduce_output == "concat":
# add the -2 to account of start and end tokens.
return torch.Size([self.transformer.module.config.hidden_size * (self.max_sequence_length - 2)])
return torch.Size([self.transformer.module.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@DeveloperAPI
@register_encoder("xlmroberta", TEXT)
class XLMRoBERTaEncoder(HFTextEncoder):
DEFAULT_MODEL_NAME = "xlm-roberta-base"
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = DEFAULT_MODEL_NAME,
saved_weights_in_checkpoint: bool = False,
reduce_output: str = "cls_pooled",
trainable: bool = False,
adapter: Optional[BaseAdapterConfig] = None,
vocab_size: int = None,
pad_token_id: int = 1,
bos_token_id: int = 0,
eos_token_id: int = 2,
max_position_embeddings: int = 514,
type_vocab_size: int = 1,
add_pooling_layer: bool = True,
pretrained_kwargs: Dict = None,
encoder_config=None,
**kwargs,
):
super().__init__()
from transformers import XLMRobertaConfig, XLMRobertaModel
hf_config_params = dict(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
transformer, _ = load_pretrained_hf_model_with_hub_fallback(
XLMRobertaModel, pretrained_model_name_or_path, **pretrained_kwargs
)
else:
transformer = self._init_transformer_from_scratch(
XLMRobertaModel, XLMRobertaConfig, hf_config_params, vocab_size
)
if encoder_config is not None:
self.config = self._init_config(transformer, hf_config_params.keys(), encoder_config)
else:
self.config = None
self.reduce_output = reduce_output
if not self.reduce_output == "cls_pooled":
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.transformer = self._wrap_transformer(transformer, adapter, trainable)
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer.module(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
if self.reduce_output == "cls_pooled":
hidden = transformer_outputs[1]
else:
hidden = transformer_outputs[0][:, 1:-1, :]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@staticmethod
def get_schema_cls():
return XLMRoBERTaConfig
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by XLMRoberta tokenizer.
return torch.Size(
[
self.max_sequence_length - 2,
self.transformer.module.config.hidden_size,
]
)
elif self.reduce_output == "concat":
# add the -2 to account of start and end tokens.
return torch.Size([self.transformer.module.config.hidden_size * (self.max_sequence_length - 2)])
return torch.Size([self.transformer.module.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@DeveloperAPI
@register_encoder("bert", TEXT)
class BERTEncoder(HFTextEncoder):
DEFAULT_MODEL_NAME = "bert-base-uncased"
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = DEFAULT_MODEL_NAME,
saved_weights_in_checkpoint: bool = False,
trainable: bool = False,
adapter: Optional[BaseAdapterConfig] = None,
reduce_output: str = "cls_pooled",
vocab_size: int = 30522,
hidden_size: int = 768,
num_hidden_layers: int = 12,
num_attention_heads: int = 12,
intermediate_size: int = 3072,
hidden_act: Union[str, Callable] = "gelu",
hidden_dropout_prob: float = 0.1,
attention_probs_dropout_prob: float = 0.1,
max_position_embeddings: int = 512,
type_vocab_size: int = 2,
initializer_range: float = 0.02,
layer_norm_eps: float = 1e-12,
pad_token_id: int = 0,
gradient_checkpointing: bool = False,
position_embedding_type: str = "absolute",
classifier_dropout: float = None,
pretrained_kwargs: Dict = None,
encoder_config=None,
**kwargs,
):
super().__init__()
from transformers import BertConfig, BertModel
hf_config_params = dict(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
pad_token_id=pad_token_id,
gradient_checkpointing=gradient_checkpointing,
position_embedding_type=position_embedding_type,
classifier_dropout=classifier_dropout,
)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
transformer, _ = load_pretrained_hf_model_with_hub_fallback(
BertModel, pretrained_model_name_or_path, **pretrained_kwargs
)
else:
transformer = self._init_transformer_from_scratch(BertModel, BertConfig, hf_config_params, vocab_size)
if encoder_config is not None:
self.config = self._init_config(transformer, hf_config_params.keys(), encoder_config)
else:
self.config = None
self.reduce_output = reduce_output
if not self.reduce_output == "cls_pooled":
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.transformer = self._wrap_transformer(transformer, adapter, trainable)
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer.module(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
if self.reduce_output == "cls_pooled":
hidden = transformer_outputs[1]
else:
hidden = transformer_outputs[0][:, 1:-1, :]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@staticmethod
def get_schema_cls():
return BERTConfig
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
# TODO(shreya): Confirm that this is it
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by BERT tokenizer.
return torch.Size(
[
self.max_sequence_length - 2,
self.transformer.module.config.hidden_size,
]
)
elif self.reduce_output == "concat":
# add the -2 to account of start and end tokens.
return torch.Size([self.transformer.module.config.hidden_size * (self.max_sequence_length - 2)])
return torch.Size([self.transformer.module.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@DeveloperAPI
@register_encoder("xlm", TEXT)
class XLMEncoder(HFTextEncoder):
DEFAULT_MODEL_NAME = "xlm-mlm-en-2048"
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = DEFAULT_MODEL_NAME,
saved_weights_in_checkpoint: bool = False,
trainable: bool = False,
adapter: Optional[BaseAdapterConfig] = None,
reduce_output: str = "sum",
vocab_size: int = 30145,
emb_dim: int = 2048,
n_layers: int = 12,
n_heads: int = 16,
dropout: float = 0.1,
attention_dropout: float = 0.1,
gelu_activation: bool = True,
sinusoidal_embeddings: bool = False,
causal: bool = False,
asm: bool = False,
n_langs: int = 1,
use_lang_emb: bool = True,
max_position_embeddings: int = 512,
embed_init_std: float = 2048**-0.5,
layer_norm_eps: float = 1e-12,
init_std: float = 0.02,
bos_index: int = 0,
eos_index: int = 1,
pad_index: int = 2,
unk_index: int = 3,
mask_index: int = 5,
is_encoder: bool = True,
start_n_top: int = 5,
end_n_top: int = 5,
mask_token_id: int = 0,
lang_id: int = 0,
pad_token_id: int = 2,
bos_token_id: int = 0,
pretrained_kwargs: Dict = None,
encoder_config=None,
**kwargs,
):
super().__init__()
from transformers import XLMConfig, XLMModel
hf_config_params = dict(
vocab_size=vocab_size,
emb_dim=emb_dim,
n_layers=n_layers,
n_heads=n_heads,
dropout=dropout,
attention_dropout=attention_dropout,
gelu_activation=gelu_activation,
sinusoidal_embeddings=sinusoidal_embeddings,
causal=causal,
asm=asm,
n_langs=n_langs,
use_lang_emb=use_lang_emb,
max_position_embeddings=max_position_embeddings,
embed_init_std=embed_init_std,
layer_norm_eps=layer_norm_eps,
init_std=init_std,
bos_index=bos_index,
eos_index=eos_index,
pad_index=pad_index,
unk_index=unk_index,
mask_index=mask_index,
is_encoder=is_encoder,
start_n_top=start_n_top,
end_n_top=end_n_top,
mask_token_id=mask_token_id,
lang_id=lang_id,
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
transformer, _ = load_pretrained_hf_model_with_hub_fallback(
XLMModel, pretrained_model_name_or_path, **pretrained_kwargs
)
else:
transformer = self._init_transformer_from_scratch(XLMModel, XLMConfig, hf_config_params, vocab_size)
self.config = self._init_config(transformer, hf_config_params, encoder_config)
self.transformer = self._wrap_transformer(transformer, adapter, trainable)
self.reduce_output = reduce_output
if self.reduce_output == "cls_pooled":
_cls_pooled_error_message(self.__class__.__name__)
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer.module(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
hidden = transformer_outputs[0]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@staticmethod
def get_schema_cls():
return XLMConfig
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
# TODO(shreya): Confirm that this is it
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by BERT tokenizer.
return torch.Size(
[
self.max_sequence_length - 2,
self.transformer.module.config.hidden_size,
]
)
elif self.reduce_output == "concat":
# add the -2 to account of start and end tokens.
return torch.Size([self.transformer.module.config.hidden_size * (self.max_sequence_length - 2)])
return torch.Size([self.transformer.module.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@DeveloperAPI
@register_encoder("gpt", TEXT)
class GPTEncoder(HFTextEncoder):
DEFAULT_MODEL_NAME = "openai-gpt"
def __init__(
self,
max_sequence_length: int,
reduce_output: str = "sum",
use_pretrained: bool = True,
pretrained_model_name_or_path: str = DEFAULT_MODEL_NAME,
saved_weights_in_checkpoint: bool = False,
trainable: bool = False,
adapter: Optional[BaseAdapterConfig] = None,
vocab_size: int = 30522,
n_positions: int = 40478,
n_ctx: int = 512,
n_embd: int = 768,
n_layer: int = 12,
n_head: int = 12,
afn: str = "gelu",
resid_pdrop: float = 0.1,
embd_pdrop: float = 0.1,
attn_pdrop: float = 0.1,
layer_norm_epsilon: float = 1e-5,
initializer_range: float = 0.02,
pretrained_kwargs: Dict = None,
encoder_config=None,
**kwargs,
):
super().__init__()
from transformers import OpenAIGPTConfig, OpenAIGPTModel
hf_config_params = dict(
vocab_size=vocab_size,
n_positions=n_positions,
n_ctx=n_ctx,
n_embd=n_embd,
n_layer=n_layer,
n_head=n_head,
afn=afn,
resid_pdrop=resid_pdrop,
embd_pdrop=embd_pdrop,
attn_pdrop=attn_pdrop,
layer_norm_epsilon=layer_norm_epsilon,
initializer_range=initializer_range,
)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
transformer, _ = load_pretrained_hf_model_with_hub_fallback(
OpenAIGPTModel, pretrained_model_name_or_path, **pretrained_kwargs
)
else:
transformer = self._init_transformer_from_scratch(
OpenAIGPTModel, OpenAIGPTConfig, hf_config_params, vocab_size
)
if encoder_config is not None:
self.config = self._init_config(transformer, hf_config_params.keys(), encoder_config)
else:
self.config = None
self.reduce_output = reduce_output
if self.reduce_output == "cls_pooled":
_cls_pooled_error_message(self.__class__.__name__)
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.transformer = self._wrap_transformer(transformer, adapter, trainable)
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer.module(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
hidden = transformer_outputs[0]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@staticmethod
def get_schema_cls():
return GPTConfig
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
return torch.Size([self.max_sequence_length, self.transformer.module.config.hidden_size])
elif self.reduce_output == "concat":
return torch.Size([self.transformer.module.config.hidden_size * self.max_sequence_length])
return torch.Size([self.transformer.module.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@DeveloperAPI
@register_encoder("gpt2", TEXT)
class GPT2Encoder(HFTextEncoder):
DEFAULT_MODEL_NAME = "gpt2"
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = DEFAULT_MODEL_NAME,
reduce_output: str = "sum",
trainable: bool = False,
adapter: Optional[BaseAdapterConfig] = None,
vocab_size: int = 50257,
n_positions: int = 1024,
n_ctx: int = 1024,
n_embd: int = 768,
n_layer: int = 12,
n_head: int = 12,
n_inner: Optional[int] = None,
activation_function: str = "gelu",
resid_pdrop: float = 0.1,
embd_pdrop: float = 0.1,
attn_pdrop: float = 0.1,
layer_norm_epsilon: float = 1e-5,
initializer_range: float = 0.02,
scale_attn_weights: bool = True,
pretrained_kwargs: Dict = None,
encoder_config=None,
**kwargs,
):
super().__init__()
from transformers import GPT2Config, GPT2Model
hf_config_params = dict(
vocab_size=vocab_size,
n_positions=n_positions,
n_ctx=n_ctx,
n_embd=n_embd,
n_layer=n_layer,
n_head=n_head,
n_inner=n_inner,
activation_function=activation_function,
resid_pdrop=resid_pdrop,
embd_pdrop=embd_pdrop,
attn_pdrop=attn_pdrop,
layer_norm_epsilon=layer_norm_epsilon,
initializer_range=initializer_range,
scale_attn_weights=scale_attn_weights,
)
if use_pretrained:
pretrained_kwargs = pretrained_kwargs or {}
transformer, _ = load_pretrained_hf_model_with_hub_fallback(
GPT2Model, pretrained_model_name_or_path, **pretrained_kwargs
)
else:
transformer = self._init_transformer_from_scratch(GPT2Model, GPT2Config, hf_config_params, vocab_size)
if encoder_config is not None:
self.config = self._init_config(transformer, hf_config_params.keys(), encoder_config)
else:
self.config = None
self.transformer = self._wrap_transformer(transformer, adapter, trainable)
self.max_sequence_length = max_sequence_length
self.reduce_output = reduce_output
if self.reduce_output == "cls_pooled":
_cls_pooled_error_message(self.__class__.__name__)
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer.module(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
hidden = transformer_outputs[0]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@staticmethod
def get_schema_cls():
return GPT2Config
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
return torch.Size([self.max_sequence_length, self.transformer.module.config.hidden_size])
elif self.reduce_output == "concat":
return torch.Size([self.transformer.module.config.hidden_size * (self.max_sequence_length)])
return torch.Size([self.transformer.module.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@DeveloperAPI
@register_encoder("deberta", TEXT)
class DeBERTaEncoder(HFTextEncoderImpl):
def __init__(self, *args, **kwargs):
from transformers import DebertaV2Config as _DebertaV2Config
from transformers import DebertaV2Model
super().__init__(DebertaV2Model, _DebertaV2Config, DebertaV2Config, *args, **kwargs)
@staticmethod
def get_schema_cls():
return DebertaV2Config
@DeveloperAPI
@register_encoder("roberta", TEXT)
class RoBERTaEncoder(HFTextEncoder):
DEFAULT_MODEL_NAME = "roberta-base"
def __init__(
self,
max_sequence_length,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = DEFAULT_MODEL_NAME,
saved_weights_in_checkpoint: bool = False,
reduce_output: str = "cls_pooled",
trainable: bool = False,
adapter: Optional[BaseAdapterConfig] = None,
vocab_size: int = None,
pad_token_id: int = 1,
bos_token_id: int = 0,
eos_token_id: int = 2,
max_position_embeddings: int = 514,
type_vocab_size: int = 1,
pretrained_kwargs: Dict = None,
encoder_config=None,
**kwargs,
):
super().__init__()
from transformers import RobertaConfig, RobertaModel
hf_config_params = dict(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
transformer, _ = load_pretrained_hf_model_with_hub_fallback(
RobertaModel, pretrained_model_name_or_path, **pretrained_kwargs
)
else:
transformer = self._init_transformer_from_scratch(RobertaModel, RobertaConfig, hf_config_params, vocab_size)
if encoder_config is not None:
self.config = self._init_config(transformer, hf_config_params.keys(), encoder_config)
else:
self.config = None
self.transformer = self._wrap_transformer(transformer, adapter, trainable)
self.max_sequence_length = max_sequence_length
self.reduce_output = reduce_output
if not self.reduce_output == "cls_pooled":
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer.module(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
if self.reduce_output == "cls_pooled":
hidden = transformer_outputs[1]
else:
hidden = transformer_outputs[0][:, 1:-1, :] # bos + [sent] + sep
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@staticmethod
def get_schema_cls():
return RoBERTaConfig
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
return torch.Size([self.max_sequence_length - 2, self.transformer.module.config.hidden_size])
elif self.reduce_output == "concat":
# add the -2 to account of start and end tokens.
return torch.Size([self.transformer.module.config.hidden_size * (self.max_sequence_length - 2)])
return torch.Size([self.transformer.module.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@DeveloperAPI
@register_encoder("transformer_xl", TEXT)
class TransformerXLEncoder(HFTextEncoder):
DEFAULT_MODEL_NAME = "transfo-xl-wt103"
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = DEFAULT_MODEL_NAME,
saved_weights_in_checkpoint: bool = False,
reduce_output: str = "sum",
trainable: bool = False,
adapter: Optional[BaseAdapterConfig] = None,
vocab_size: int = 267735,
cutoffs: List[int] = [20000, 40000, 200000],
d_model: int = 1024,
d_embed: int = 1024,
n_head: int = 16,
d_head: int = 64,
d_inner: int = 4096,
div_val: int = 4,
pre_lnorm: bool = False,
n_layer: int = 18,
mem_len: int = 1600,
clamp_len: int = 1000,
same_length: bool = True,
proj_share_all_but_first: bool = True,
attn_type: int = 0,
sample_softmax: int = -1,
adaptive: bool = True,
dropout: float = 0.1,
dropatt: float = 0.0,
untie_r: bool = True,
init: str = "normal",
init_range: float = 0.01,
proj_init_std: float = 0.01,
init_std: float = 0.02,
layer_norm_epsilon: float = 1e-5,
eos_token_id: int = 0,
pretrained_kwargs: Dict = None,
encoder_config=None,
**kwargs,
):
super().__init__()
from transformers import TransfoXLConfig, TransfoXLModel
hf_config_params = dict(
vocab_size=vocab_size,
cutoffs=cutoffs,
d_model=d_model,
d_embed=d_embed,
n_head=n_head,
d_head=d_head,
d_inner=d_inner,
div_val=div_val,
pre_lnorm=pre_lnorm,
n_layer=n_layer,
mem_len=mem_len,
clamp_len=clamp_len,
same_length=same_length,
proj_share_all_but_first=proj_share_all_but_first,
attn_type=attn_type,
sample_softmax=sample_softmax,
adaptive=adaptive,
dropout=dropout,
dropatt=dropatt,
untie_r=untie_r,
init=init,
init_range=init_range,
proj_init_std=proj_init_std,
init_std=init_std,
layer_norm_epsilon=layer_norm_epsilon,
eos_token_id=eos_token_id,
)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
transformer, _ = load_pretrained_hf_model_with_hub_fallback(
TransfoXLModel, pretrained_model_name_or_path, **pretrained_kwargs
)
else:
config = TransfoXLConfig(**hf_config_params)
transformer = TransfoXLModel(config)
if encoder_config is not None:
self.config = self._init_config(transformer, hf_config_params.keys(), encoder_config)
else:
self.config = None
self.reduce_output = reduce_output
if self.reduce_output == "cls_pooled":
_cls_pooled_error_message(self.__class__.__name__)
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.transformer = self._wrap_transformer(transformer, adapter, trainable)
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: torch.Tensor = None) -> Dict[str, torch.Tensor]:
transformer_outputs = self.transformer.module(inputs)
hidden = transformer_outputs[0]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@staticmethod
def get_schema_cls():
return TransformerXLConfig
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
return torch.Size([self.max_sequence_length, self.transformer.module.config.d_model])
elif self.reduce_output == "concat":
# add the -2 to account of start and end tokens.
return torch.Size([self.transformer.module.config.d_model * self.max_sequence_length])
return torch.Size([self.transformer.module.config.d_model])
@property
def input_dtype(self):
return torch.int32
@DeveloperAPI
@register_encoder("xlnet", TEXT)
class XLNetEncoder(HFTextEncoder):
DEFAULT_MODEL_NAME = "xlnet-base-cased"
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = DEFAULT_MODEL_NAME,
saved_weights_in_checkpoint: bool = False,
reduce_output: str = "sum",
trainable: bool = False,
adapter: Optional[BaseAdapterConfig] = None,
vocab_size: int = 32000,
d_model: int = 1024,
n_layer: int = 24,
n_head: int = 16,
d_inner: int = 4096,
ff_activation: str = "gelu",
untie_r: bool = True,
attn_type: str = "bi",
initializer_range: float = 0.02,
layer_norm_eps: float = 1e-12,
dropout: float = 0.1,
mem_len: Optional[int] = 512,
reuse_len: Optional[int] = None,
use_mems_eval: bool = True,
use_mems_train: bool = False,
bi_data: bool = False,
clamp_len: int = -1,
same_length: bool = False,
summary_type: str = "last",
summary_use_proj: bool = True,
summary_activation: str = "tanh",
summary_last_dropout: float = 0.1,
start_n_top: int = 5,
end_n_top: int = 5,
pad_token_id: int = 5,
bos_token_id: int = 1,
eos_token_id: int = 2,
pretrained_kwargs: Dict = None,
encoder_config=None,
**kwargs,
):
super().__init__()
from transformers import XLNetConfig, XLNetModel
hf_config_params = dict(
vocab_size=vocab_size,
d_model=d_model,
n_layer=n_layer,
n_head=n_head,
d_inner=d_inner,
ff_activation=ff_activation,
untie_r=untie_r,
attn_type=attn_type,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
dropout=dropout,
mem_len=mem_len,
reuse_len=reuse_len,
use_mems_eval=use_mems_eval,
use_mems_train=use_mems_train,
bi_data=bi_data,
clamp_len=clamp_len,
same_length=same_length,
summary_type=summary_type,
summary_use_proj=summary_use_proj,
summary_activation=summary_activation,
summary_last_dropout=summary_last_dropout,
start_n_top=start_n_top,
end_n_top=end_n_top,
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
transformer, _ = load_pretrained_hf_model_with_hub_fallback(
XLNetModel, pretrained_model_name_or_path, **pretrained_kwargs
)
else:
transformer = self._init_transformer_from_scratch(XLNetModel, XLNetConfig, hf_config_params, vocab_size)
if encoder_config is not None:
self.config = self._init_config(transformer, hf_config_params.keys(), encoder_config)
else:
self.config = None
self.max_sequence_length = max_sequence_length
self.reduce_output = reduce_output
if self.reduce_output == "cls_pooled":
_cls_pooled_error_message(self.__class__.__name__)
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.transformer = self._wrap_transformer(transformer, adapter, trainable)
def forward(self, inputs: torch.Tensor, mask: torch.Tensor = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer.module(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
hidden = transformer_outputs[0]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@staticmethod
def get_schema_cls():
return XLNetConfig
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
return torch.Size([self.max_sequence_length, self.transformer.module.config.d_model])
elif self.reduce_output == "concat":
return torch.Size([self.transformer.module.config.d_model * self.max_sequence_length])
return torch.Size([self.transformer.module.config.d_model])
@property
def input_dtype(self):
return torch.int32
@DeveloperAPI
@register_encoder("distilbert", TEXT)
class DistilBERTEncoder(HFTextEncoder):
DEFAULT_MODEL_NAME = "distilbert-base-uncased"
def __init__(
self,
max_sequence_length: int,
pretrained_model_name_or_path: str = DEFAULT_MODEL_NAME,
saved_weights_in_checkpoint: bool = False,
reduce_output: str = "sum",
trainable: bool = False,
adapter: Optional[BaseAdapterConfig] = None,
use_pretrained: bool = True,
vocab_size: int = 30522,
max_position_embeddings: int = 512,
sinusoidal_pos_embds: bool = False,
n_layers: int = 6,
n_heads: int = 12,
dim: int = 768,
hidden_dim: int = 3072,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation: Union[str, Callable] = "gelu",
initializer_range: float = 0.02,
qa_dropout: float = 0.1,
seq_classif_dropout: float = 0.2,
pretrained_kwargs: Dict = None,
encoder_config=None,
**kwargs,
):
super().__init__()
from transformers import DistilBertConfig, DistilBertModel
hf_config_params = dict(
vocab_size=vocab_size,
max_position_embeddings=max_position_embeddings,
sinusoidal_pos_embds=sinusoidal_pos_embds,
n_layers=n_layers,
n_heads=n_heads,
dim=dim,
hidden_dim=hidden_dim,
dropout=dropout,
attention_dropout=attention_dropout,
activation=activation,
initializer_range=initializer_range,
qa_dropout=qa_dropout,
seq_classif_dropout=seq_classif_dropout,
)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
transformer, _ = load_pretrained_hf_model_with_hub_fallback(
DistilBertModel, pretrained_model_name_or_path, **pretrained_kwargs
)
else:
transformer = self._init_transformer_from_scratch(
DistilBertModel, DistilBertConfig, hf_config_params, vocab_size
)
if encoder_config is not None:
self.config = self._init_config(transformer, hf_config_params.keys(), encoder_config)
else:
self.config = None
self.transformer = self._wrap_transformer(transformer, adapter, trainable)
self.reduce_output = reduce_output
if self.reduce_output == "cls_pooled":
_cls_pooled_error_message(self.__class__.__name__)
self.max_sequence_length = max_sequence_length
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.last_inputs = None
self.last_hidden = None
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer.module(
input_ids=inputs,
attention_mask=mask,
)
hidden = transformer_outputs[0][:, 1:-1, :]
self.last_inputs = inputs
self.last_hidden = hidden
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@staticmethod
def get_schema_cls():
return DistilBERTConfig
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by BERT tokenizer.
return torch.Size([self.max_sequence_length - 2, self.transformer.module.config.dim])
elif self.reduce_output == "concat":
# add the -2 to account of start and end tokens.
return torch.Size([self.transformer.module.config.dim * (self.max_sequence_length - 2)])
return torch.Size([self.transformer.module.config.dim])
@property
def input_dtype(self):
return torch.int32
@DeveloperAPI
@register_encoder("ctrl", TEXT)
class CTRLEncoder(HFTextEncoder):
DEFAULT_MODEL_NAME = "ctrl"
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = DEFAULT_MODEL_NAME,
saved_weights_in_checkpoint: bool = False,
reduce_output: str = "sum",
trainable: bool = False,
adapter: Optional[BaseAdapterConfig] = None,
vocab_size: int = 246534,
n_positions: int = 256,
n_ctx: int = 256,
n_embd: int = 1280,
dff: int = 8192,
n_layer: int = 48,
n_head: int = 16,
resid_pdrop: float = 0.1,
embd_pdrop: float = 0.1,
attn_pdrop: float = 0.1,
layer_norm_epsilon: float = 1e-6,
initializer_range: float = 0.02,
pretrained_kwargs: Dict = None,
encoder_config=None,
**kwargs,
):
super().__init__()
from transformers import CTRLConfig, CTRLModel
hf_config_params = dict(
vocab_size=vocab_size,
n_positions=n_positions,
n_ctx=n_ctx,
n_embd=n_embd,
dff=dff,
n_layer=n_layer,
n_head=n_head,
resid_pdrop=resid_pdrop,
embd_pdrop=embd_pdrop,
attn_pdrop=attn_pdrop,
layer_norm_epsilon=layer_norm_epsilon,
initializer_range=initializer_range,
)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
transformer, _ = load_pretrained_hf_model_with_hub_fallback(
CTRLModel, pretrained_model_name_or_path, **pretrained_kwargs
)
self.vocab_size = transformer.config.vocab_size
else:
transformer = self._init_transformer_from_scratch(CTRLModel, CTRLConfig, hf_config_params, vocab_size)
self.vocab_size = vocab_size
if encoder_config is not None:
self.config = self._init_config(transformer, hf_config_params.keys(), encoder_config)
else:
self.config = None
self.max_sequence_length = max_sequence_length
self.transformer = self._wrap_transformer(transformer, adapter, trainable)
self.reduce_output = reduce_output
if self.reduce_output == "cls_pooled":
_cls_pooled_error_message(self.__class__.__name__)
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer.module(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
hidden = transformer_outputs[0]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@staticmethod
def get_schema_cls():
return CTRLConfig
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
return torch.Size([self.max_sequence_length, self.transformer.module.config.n_embd])
elif self.reduce_output == "concat":
# add the -2 to account of start and end tokens.
return torch.Size([self.transformer.module.config.n_embd * (self.max_sequence_length - 2)])
return torch.Size([self.transformer.module.config.n_embd])
@property
def input_dtype(self):
return torch.int32
@DeveloperAPI
@register_encoder("camembert", TEXT)
class CamemBERTEncoder(HFTextEncoder):
DEFAULT_MODEL_NAME = "camembert-base"
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = DEFAULT_MODEL_NAME,
saved_weights_in_checkpoint: bool = False,
reduce_output: str = "cls-pooled",
trainable: bool = False,
adapter: Optional[BaseAdapterConfig] = None,
vocab_size: int = 30522,
hidden_size: int = 768,
num_hidden_layers: int = 12,
num_attention_heads: int = 12,
intermediate_size: int = 3072,
hidden_act: Union[str, Callable] = "gelu",
hidden_dropout_prob: float = 0.1,
attention_probs_dropout_prob: float = 0.1,
max_position_embeddings: int = 512,
type_vocab_size: int = 2,
initializer_range: float = 0.02,
layer_norm_eps: float = 1e-12,
pad_token_id: int = 0,
gradient_checkpointing: bool = False,
position_embedding_type: str = "absolute",
classifier_dropout: float = None,
pretrained_kwargs: Dict = None,
encoder_config=None,
**kwargs,
):
super().__init__()
from transformers import CamembertConfig, CamembertModel
hf_config_params = dict(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
pad_token_id=pad_token_id,
gradient_checkpointing=gradient_checkpointing,
position_embedding_type=position_embedding_type,
classifier_dropout=classifier_dropout,
)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
transformer, _ = load_pretrained_hf_model_with_hub_fallback(
CamembertModel, pretrained_model_name_or_path, **pretrained_kwargs
)
else:
transformer = self._init_transformer_from_scratch(
CamembertModel, CamembertConfig, hf_config_params, vocab_size
)
if encoder_config is not None:
self.config = self._init_config(transformer, hf_config_params.keys(), encoder_config)
else:
self.config = None
self.transformer = self._wrap_transformer(transformer, adapter, trainable)
self.reduce_output = reduce_output
if not self.reduce_output == "cls_pooled":
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer.module(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
if self.reduce_output == "cls_pooled":
hidden = transformer_outputs[1]
else:
hidden = transformer_outputs[0][:, 1:-1, :]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@staticmethod
def get_schema_cls():
return CamemBERTConfig
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by BERT tokenizer.
return torch.Size(
[
self.max_sequence_length - 2,
self.transformer.module.config.hidden_size,
]
)
elif self.reduce_output == "concat":
# add the -2 to account of start and end tokens.
return torch.Size([self.transformer.module.config.hidden_size * (self.max_sequence_length - 2)])
return torch.Size([self.transformer.module.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@DeveloperAPI
@register_encoder("t5", TEXT)
class T5Encoder(HFTextEncoder):
DEFAULT_MODEL_NAME = "t5-small"
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = DEFAULT_MODEL_NAME,
saved_weights_in_checkpoint: bool = False,
reduce_output: str = "sum",
trainable: bool = False,
adapter: Optional[BaseAdapterConfig] = None,
vocab_size: int = 32128,
d_model: int = 512,
d_kv: int = 64,
d_ff: int = 2048,
num_layers: int = 6,
num_decoder_layers: Optional[int] = None,
num_heads: int = 8,
relative_attention_num_buckets: int = 32,
dropout_rate: float = 0.1,
layer_norm_eps: float = 1e-6,
initializer_factor: float = 1,
feed_forward_proj: str = "relu",
pretrained_kwargs: Dict = None,
encoder_config=None,
**kwargs,
):
super().__init__()
from transformers import T5Config, T5Model
hf_config_params = dict(
vocab_size=vocab_size,
d_model=d_model,
d_kv=d_kv,
d_ff=d_ff,
num_layers=num_layers,
num_decoder_layers=num_decoder_layers,
num_heads=num_heads,
relative_attention_num_buckets=relative_attention_num_buckets,
dropout_rate=dropout_rate,
layer_norm_eps=layer_norm_eps,
initializer_factor=initializer_factor,
feed_forward_proj=feed_forward_proj,
)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
transformer, _ = load_pretrained_hf_model_with_hub_fallback(
T5Model, pretrained_model_name_or_path, **pretrained_kwargs
)
else:
transformer = self._init_transformer_from_scratch(T5Model, T5Config, hf_config_params, vocab_size)
if encoder_config is not None:
self.config = self._init_config(transformer, hf_config_params.keys(), encoder_config)
else:
self.config = None
self.max_sequence_length = max_sequence_length
self.reduce_output = reduce_output
if self.reduce_output == "cls_pooled":
_cls_pooled_error_message(self.__class__.__name__)
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.transformer = self._wrap_transformer(transformer, adapter, trainable)
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer.module(
inputs,
decoder_input_ids=inputs,
attention_mask=mask,
)
hidden = transformer_outputs[0][:, 0:-1, :] # [eos token]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@staticmethod
def get_schema_cls():
return T5Config
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 1 to remove EOS token added by T5 tokenizer.
return torch.Size(
[
self.max_sequence_length - 1,
self.transformer.module.config.hidden_size,
]
)
elif self.reduce_output == "concat":
# add the -1 to account of start and end tokens.
return torch.Size([self.transformer.module.config.hidden_size * (self.max_sequence_length - 1)])
return torch.Size([self.transformer.module.config.d_model])
@property
def input_dtype(self):
return torch.int32
@DeveloperAPI
@register_encoder("flaubert", TEXT)
class FlauBERTEncoder(HFTextEncoder):
DEFAULT_MODEL_NAME = "flaubert/flaubert_small_cased"
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool,
pretrained_model_name_or_path: str = DEFAULT_MODEL_NAME,
saved_weights_in_checkpoint: bool = False,
reduce_output: str = "sum",
trainable: bool = False,
adapter: Optional[BaseAdapterConfig] = None,
vocab_size: int = 30145,
pre_norm: bool = False,
layerdrop: float = 0.0,
emb_dim: int = 2048,
n_layers: int = 12,
n_heads: int = 16,
dropout: float = 0.1,
attention_dropout: float = 0.1,
gelu_activation: bool = True,
sinusoidal_embeddings: bool = False,
causal: bool = False,
asm: bool = False,
n_langs: int = 1,
use_lang_emb: bool = True,
max_position_embeddings: int = 512,
embed_init_std: float = 2048**-0.5,
init_std: int = 0.02,
layer_norm_eps: float = 1e-12,
bos_index: int = 0,
eos_index: int = 1,
pad_index: int = 2,
unk_index: int = 3,
mask_index: int = 5,
is_encoder: bool = True,
mask_token_id: int = 0,
lang_id: int = 1,
pretrained_kwargs: Dict = None,
encoder_config=None,
**kwargs,
):
super().__init__()
from transformers import FlaubertConfig, FlaubertModel
hf_config_params = dict(
vocab_size=vocab_size,
pre_norm=pre_norm,
layerdrop=layerdrop,
emb_dim=emb_dim,
n_layers=n_layers,
n_heads=n_heads,
dropout=dropout,
attention_dropout=dropout,
gelu_activation=gelu_activation,
sinusoidal_embeddings=sinusoidal_embeddings,
causal=causal,
asm=asm,
n_langs=n_langs,
use_lang_emb=use_lang_emb,
max_position_embeddings=max_position_embeddings,
embed_init_std=embed_init_std,
init_std=init_std,
layer_norm_eps=layer_norm_eps,
bos_index=bos_index,
eos_index=eos_index,
pad_index=pad_index,
unk_index=unk_index,
mask_index=mask_index,
is_encoder=is_encoder,
mask_token_id=mask_token_id,
lang_id=lang_id,
)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
transformer, _ = load_pretrained_hf_model_with_hub_fallback(
FlaubertModel, pretrained_model_name_or_path, **pretrained_kwargs
)
else:
transformer = self._init_transformer_from_scratch(
FlaubertModel, FlaubertConfig, hf_config_params, vocab_size
)
if encoder_config is not None:
self.config = self._init_config(transformer, hf_config_params.keys(), encoder_config)
else:
self.config = None
self.max_sequence_length = max_sequence_length
self.reduce_output = reduce_output
if self.reduce_output == "cls_pooled":
_cls_pooled_error_message(self.__class__.__name__)
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.transformer = self._wrap_transformer(transformer, adapter, trainable)
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer.module(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
hidden = transformer_outputs[0][:, 1:-1, :]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@staticmethod
def get_schema_cls():
return FlauBERTConfig
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by tokenizer.
return torch.Size(
[
self.max_sequence_length - 2,
self.transformer.module.config.hidden_size,
]
)
elif self.reduce_output == "concat":
# add the -2 to account of start and end tokens.
return torch.Size([self.transformer.module.config.hidden_size * (self.max_sequence_length - 2)])
return torch.Size([self.transformer.module.config.emb_dim])
@property
def input_dtype(self):
return torch.int32
@DeveloperAPI
@register_encoder("electra", TEXT)
class ELECTRAEncoder(HFTextEncoder):
DEFAULT_MODEL_NAME = "google/electra-small-discriminator"
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
pretrained_model_name_or_path: str = DEFAULT_MODEL_NAME,
saved_weights_in_checkpoint: bool = False,
reduce_output: str = "sum",
trainable: bool = False,
adapter: Optional[BaseAdapterConfig] = None,
vocab_size: int = 30522,
embedding_size: int = 128,
hidden_size: int = 256,
num_hidden_layers: int = 12,
num_attention_heads: int = 4,
intermediate_size: int = 1024,
hidden_act: Union[str, Callable] = "gelu",
hidden_dropout_prob: float = 0.1,
attention_probs_dropout_prob: float = 0.1,
max_position_embeddings: int = 512,
type_vocab_size: int = 2,
initializer_range: float = 0.02,
layer_norm_eps: float = 1e-12,
position_embedding_type: str = "absolute",
classifier_dropout: Optional[float] = None,
pretrained_kwargs: Dict = None,
encoder_config=None,
**kwargs,
):
super().__init__()
from transformers import ElectraConfig, ElectraModel
hf_config_params = dict(
vocab_size=vocab_size,
embedding_size=embedding_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
position_embedding_type=position_embedding_type,
classifier_dropout=classifier_dropout,
)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
transformer, _ = load_pretrained_hf_model_with_hub_fallback(
ElectraModel, pretrained_model_name_or_path, **pretrained_kwargs
)
else:
transformer = self._init_transformer_from_scratch(ElectraModel, ElectraConfig, hf_config_params, vocab_size)
if encoder_config is not None:
self.config = self._init_config(transformer, hf_config_params.keys(), encoder_config)
else:
self.config = None
self.max_sequence_length = max_sequence_length
self.reduce_output = reduce_output
if self.reduce_output == "cls_pooled":
_cls_pooled_error_message(self.__class__.__name__)
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.transformer = self._wrap_transformer(transformer, adapter, trainable)
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer.module(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
hidden = transformer_outputs[0][:, 1:-1, :]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@staticmethod
def get_schema_cls():
return ELECTRAConfig
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by tokenizer.
return torch.Size(
[
self.max_sequence_length - 2,
self.transformer.module.config.hidden_size,
]
)
elif self.reduce_output == "concat":
# add the -2 to account of start and end tokens.
return torch.Size([self.transformer.module.config.hidden_size * (self.max_sequence_length - 2)])
return torch.Size([self.transformer.module.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@DeveloperAPI
@register_encoder("longformer", TEXT)
class LongformerEncoder(HFTextEncoder):
DEFAULT_MODEL_NAME = "allenai/longformer-base-4096"
def __init__(
self,
max_sequence_length: int,
use_pretrained: bool = True,
attention_window: Union[List[int], int] = 512,
sep_token_id: int = 2,
pretrained_model_name_or_path: str = DEFAULT_MODEL_NAME,
saved_weights_in_checkpoint: bool = False,
reduce_output: Optional[str] = "cls_pooled",
trainable: bool = False,
adapter: Optional[BaseAdapterConfig] = None,
vocab_size: int = 50265,
num_tokens: Optional[int] = None,
pretrained_kwargs: Dict = None,
encoder_config=None,
**kwargs,
):
super().__init__()
from transformers import LongformerConfig, LongformerModel
hf_config_params = dict(
attention_window=attention_window,
sep_token_id=sep_token_id,
vocab_size=vocab_size,
**kwargs,
)
if use_pretrained and not saved_weights_in_checkpoint:
pretrained_kwargs = pretrained_kwargs or {}
transformer, _ = load_pretrained_hf_model_with_hub_fallback(
LongformerModel, pretrained_model_name_or_path, **pretrained_kwargs
)
else:
transformer = self._init_transformer_from_scratch(
LongformerModel, LongformerConfig, hf_config_params, vocab_size
)
if encoder_config is not None:
self.config = self._init_config(transformer, hf_config_params.keys(), encoder_config)
else:
self.config = None
self.reduce_output = reduce_output
if not self.reduce_output == "cls_pooled":
self.reduce_sequence = SequenceReducer(reduce_mode=reduce_output)
self.transformer = self._wrap_transformer(transformer, adapter, trainable)
self.max_sequence_length = max_sequence_length
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None):
if mask is not None:
mask = mask.to(torch.int32)
transformer_outputs = self.transformer.module(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
if self.reduce_output == "cls_pooled":
hidden = transformer_outputs[1]
else:
hidden = transformer_outputs[0][:, 1:-1, :] # bos + [sent] + sep
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@staticmethod
def get_schema_cls():
return LongformerConfig
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# Subtract 2 to remove CLS and PAD tokens added by Longformer (== Roberta) tokenizer.
return torch.Size(
[
self.max_sequence_length - 2,
self.transformer.module.config.hidden_size,
]
)
elif self.reduce_output == "concat":
# add the -2 to account of start and end tokens.
return torch.Size([self.transformer.module.config.hidden_size * (self.max_sequence_length - 2)])
return torch.Size([self.transformer.module.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@DeveloperAPI
@register_encoder("auto_transformer", TEXT)
class AutoTransformerEncoder(HFTextEncoder):
DEFAULT_MODEL_NAME = None
def __init__(
self,
pretrained_model_name_or_path: str,
max_sequence_length: int,
reduce_output: str = "sum",
trainable: bool = False,
adapter: Optional[BaseAdapterConfig] = None,
vocab_size: Optional[int] = None,
pretrained_kwargs: Dict = None,
encoder_config=None,
**kwargs,
):
super().__init__()
from transformers import AutoModel
pretrained_kwargs = pretrained_kwargs or {}
transformer, _ = load_pretrained_hf_model_with_hub_fallback(
AutoModel, pretrained_model_name_or_path, **pretrained_kwargs
)
self._maybe_resize_token_embeddings(transformer, vocab_size)
self.config = self._init_config(transformer, [], encoder_config)
# Precompute the set of params that are included in the forward signature of the AutoModel implementation so
# we can filter out unused params during the `forward` call.
self.forward_kwargs = set(inspect.signature(transformer.forward).parameters.keys())
self.transformer = self._wrap_transformer(transformer, adapter, trainable)
self.reduce_output = reduce_output
if self.reduce_output != "cls_pooled":
self.reduce_sequence = SequenceReducer(
reduce_mode=reduce_output, encoding_size=self.transformer.module.config.hidden_size
)
self.max_sequence_length = max_sequence_length
def _maybe_resize_token_embeddings(self, transformer, vocab_size: Optional[int] = None):
"""Overridden because AutoModel should use its own vocab size unless vocab size is explicitly specified."""
if vocab_size is not None:
transformer.resize_token_embeddings(vocab_size)
self.vocab_size = vocab_size
else:
self.vocab_size = transformer.config.vocab_size
def forward(self, inputs: torch.Tensor, mask: Optional[torch.Tensor] = None):
if mask is not None:
mask = mask.to(torch.int32)
# The forward signature of AutoModel is not consistent across implementations, so we need to make sure we're
# only passing in params included in the forward signature.
kwargs = dict(
input_ids=inputs,
attention_mask=mask,
token_type_ids=torch.zeros_like(inputs),
)
kwargs = {k: v for k, v in kwargs.items() if k in self.forward_kwargs}
transformer_outputs = self.transformer.module(**kwargs)
if self.reduce_output == "cls_pooled":
# this works only if the user know that the specific model
# they want to use has the same outputs of
# the BERT base class call() function
hidden = transformer_outputs["pooler_output"]
else:
hidden = transformer_outputs["last_hidden_state"]
hidden = self.reduce_sequence(hidden, self.reduce_output)
return {"encoder_output": hidden}
@staticmethod
def get_schema_cls():
return AutoTransformerConfig
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
if self.reduce_output is None:
# TODO(justin): This may need to be conditioned on which AutoModel gets chosen.
return torch.Size([self.max_sequence_length, self.transformer.module.config.hidden_size])
if self.reduce_output == "concat":
return torch.Size(
[
self.max_sequence_length * self.transformer.module.config.hidden_size,
]
)
elif self.reduce_output == "concat":
# add the -2 to account of start and end tokens.
return torch.Size([self.transformer.module.config.hidden_size * (self.max_sequence_length - 2)])
return torch.Size([self.transformer.module.config.hidden_size])
@property
def input_dtype(self):
return torch.int32
@DeveloperAPI
@register_encoder("tf_idf", [TEXT])
class TfIdfEncoder(Encoder):
def __init__(
self,
max_sequence_length: int,
encoder_config=None,
str2idf=None,
vocab=None,
vocab_size: int = None,
**kwargs,
):
super().__init__()
self.config = encoder_config
self.max_sequence_length = max_sequence_length
self.vocab_size = vocab_size
logger.debug(f" {self.name}")
# Convert mapping of token -> frequency to a dense array
idf = np.zeros(vocab_size)
for i, s in enumerate(vocab):
idf[i] = str2idf[s]
self.idf = torch.from_numpy(idf).float().unsqueeze(0)
def forward(self, t: torch.Tensor, mask=None):
# Compute the term frequency within each row
tf = torch.stack([t_i.bincount(minlength=self.vocab_size) for t_i in torch.unbind(t.long())])
# Normalize the term frequency by the number of tokens in each row
tf = tf / tf.sum(dim=1).unsqueeze(-1)
# Multiply the term frequency by the inverse document frequency
tfidf = tf * self.idf
return {"encoder_output": tfidf}
@staticmethod
def get_schema_cls():
return TfIdfEncoderConfig
@property
def input_shape(self) -> torch.Size:
return torch.Size([self.max_sequence_length])
@property
def output_shape(self) -> torch.Size:
return torch.Size([self.vocab_size])
def get_embedding_layer(self) -> nn.Module:
return self
| [] |
2024-01-10 | EPFLRocketTeam/real_time_simulator | postProcess~fancy_plotter.py | from bokeh.plotting import figure, curdoc
from bokeh.resources import CDN
from bokeh.embed import file_html
from bokeh.io import show
from bokeh.layouts import gridplot, layout, row, column
from bokeh.models import CheckboxGroup, CustomJS, ColumnDataSource
from bokeh.models import Button, RadioGroup, FileInput, TextInput, RangeSlider, Slider, Panel, Tabs
from bokeh.themes import Theme
import rospy
import rospkg
import rosbag
from real_time_simulator.msg import FSM
from real_time_simulator.msg import State
from real_time_simulator.msg import Control
from real_time_simulator.msg import Sensor
from real_time_simulator.msg import Trajectory
from real_time_simulator.msg import Waypoint
import numpy as np
import math
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation as R
from scipy import interpolate
import time
# Arrays with GNC data
simu_data = None
nav_data = None
feedback_data = None
control_data = None
target_data = None
# Display parameters
tStart = -1
tEnd = 40
target_iter = 0
point_spacing = 350
# Data type to display
simu = True
nav = False
horizon = False
def fill_full_state(bag, topic = ""):
msg_count = bag.get_message_count(topic)
np_data = np.zeros((14, msg_count))
attitude = np.zeros((msg_count,4))
i = 0
for _, msg, t in bag.read_messages(topics=[topic]):
new_attitude = msg.pose.orientation
attitude[i] = np.array([new_attitude.x, new_attitude.y, new_attitude.z, new_attitude.w])
np_data[13, i] = t.to_sec()
np_data[0, i] = msg.pose.position.x
np_data[1, i] = msg.pose.position.y
np_data[2, i] = msg.pose.position.z
np_data[3, i] = msg.twist.linear.x
np_data[4, i] = msg.twist.linear.y
np_data[5, i] = msg.twist.linear.z
np_data[9, i] = msg.twist.angular.x
np_data[10, i] = msg.twist.angular.y
np_data[11, i] = msg.twist.angular.z
np_data[12, i] = msg.propeller_mass
i = i+1
r = R.from_quat(attitude)
attitude_eul = r.as_euler('xyz', degrees=True)
np_data[6:9, :] = np.transpose(attitude_eul)
np_data[9:12, :] = np.rad2deg(np_data[9:12, :] )
return np_data
def load_log_file(attr, old, new):
global simu_data
global nav_data
global feedback_data
global control_data
global target_data
rospack = rospkg.RosPack()
bag = rosbag.Bag(rospack.get_path('real_time_simulator') + '/log/' + new)
# Get first time message for synchronization
for topic, msg, t in bag.read_messages(topics=['/fsm_pub']):
if msg.state_machine != "Idle":
time_init = t.to_sec()
break
simu_data = fill_full_state(bag, topic = "/rocket_state")
nav_data = fill_full_state(bag, topic = "/kalman_rocket_state")
control_data = np.zeros((5, bag.get_message_count('/control_pub')))
i = 0
for topic, msg, t in bag.read_messages(topics=['/control_pub']):
control_data[0, i] = msg.force.x
control_data[1, i] = msg.force.y
control_data[2, i] = msg.force.z
control_data[3, i] = msg.torque.z
control_data[4, i] = t.to_sec()
i = i+1
feedback_data = np.zeros((5, bag.get_message_count('/control_measured')))
i = 0
for topic, msg, t in bag.read_messages(topics=['/control_measured']):
feedback_data[0, i] = msg.force.x
feedback_data[1, i] = msg.force.y
feedback_data[2, i] = msg.force.z
feedback_data[3, i] = msg.torque.z
feedback_data[4, i] = t.to_sec()
i = i+1
# Guidance optimal trajectory
target_positionX = []
target_positionY = []
target_positionZ = []
target_speedZ = []
target_prop_mass = []
time_target = []
thrust_target = []
for topic, msg, t in bag.read_messages(topics=['/target_trajectory']):
new_waypoint = msg.trajectory
time_target.append([point.time for point in new_waypoint])
target_positionX.append([point.position.x for point in new_waypoint])
target_positionY.append([point.position.y for point in new_waypoint])
target_positionZ.append([point.position.z for point in new_waypoint])
target_speedZ.append([point.speed.z for point in new_waypoint])
target_prop_mass.append([point.propeller_mass for point in new_waypoint])
thrust_target.append([point.thrust for point in new_waypoint])
bag.close()
target_data = [target_positionZ, target_speedZ, target_prop_mass, thrust_target, time_target, target_positionX, target_positionY]
print("Apogee: {}".format(max(simu_data[2])))
# Synchronize time
control_data[4] = control_data[4] - time_init
simu_data[13] = simu_data[13] - time_init
nav_data[13] = nav_data[13] - time_init
feedback_data[4] = feedback_data[4] - time_init
update_plot()
#select_target[::20,:] = True
def update_range(attr, old, new):
global tStart
global tEnd
tStart = new[0]
tEnd = new[1]
update_plot()
def update_iteration(attr, old, new):
global target_iter
target_iter = new
update_plot()
def update_nav_points(attr, old, new):
global point_spacing
point_spacing = max(1, int(np.size(nav_data, 1)*(1 - np.log10(20*new+1)/3.3)))
update_plot()
## ----------- Plot flight data ----------------------
doc = curdoc()
doc.theme = Theme(json={'attrs': {
# apply defaults to Figure properties
'Figure': {
'outline_line_color': "DimGrey",
'min_border': 10,
'background_fill_color': "#FFFCFC",
'plot_width':100,
},
'Line': {
'line_width': 2,
},
'Axis': {
'axis_line_color': "DimGrey",
},
'Title': {
'text_font_size': "12px",
'text_line_height': 0.3,
'align':'center',
},
'Legend': {
'label_text_font_size': "10px",
'background_fill_alpha': 0.5,
'location': 'bottom_right',
},
}})
# Create figures for plots
f_posXY = figure(title="Position [m]", title_location="left", x_axis_label='Time [s]')
f_posZ = figure(title="Position [m]", title_location="left", x_axis_label='Time [s]')
f_speedXY = figure(title="Speed [m/s]", title_location="left", x_axis_label='Time [s]')
f_speedZ = figure(title="Speed [m/s]", title_location="left", x_axis_label='Time [s]')
f_attitude = figure(title="Euler angle [°]", title_location="left", x_axis_label='Time [s]')
f_omega = figure(title="Angular rate [°/s]", title_location="left", x_axis_label='Time [s]')
f_thrust = figure(title="Main thrust [N]", title_location="left", x_axis_label='Time [s]')
f_force = figure(title="Side force [N]", x_axis_label='Time [s]')
f_mass = figure(title="Propellant mass [kg]", x_axis_label='Time [s]')
f_posZ_sel = figure(plot_width=600, plot_height=600)
f_speedZ_sel = figure(plot_width=600, plot_height=600)
f_thrust_sel = figure(plot_width=600, plot_height=600)
f_selectable = figure()
f_selectable.toolbar_location = "above"
f_posZ.toolbar_location = "above"
f_speedZ.toolbar_location = "below"
f_thrust.toolbar_location = "below"
# Create empty source for data
source_simu = ColumnDataSource(data=dict( t=[],
posX=[], posY=[], posZ=[],
speedX=[], speedY=[], speedZ=[],
attX=[], attY=[], attZ=[],
omegaX=[], omegaY=[], omegaZ=[],
mass = []
))
source_nav = ColumnDataSource(data=dict( t=[],
posX=[], posY=[], posZ=[],
speedX=[], speedY=[], speedZ=[],
attX=[], attY=[], attZ=[],
omegaX=[], omegaY=[], omegaZ=[],
mass = []
))
source_control = ColumnDataSource(data=dict(t=[],
thrust=[],
forceX=[],
forceY=[],
torqueZ=[]))
source_feedback = ColumnDataSource(data=dict( t=[],
thrust=[],
forceX=[],
forceY=[],
torqueZ=[]))
source_target = ColumnDataSource(data = dict( t=[],
posZ=[],
speedZ=[],
mass=[],
thrust=[]))
# Map simulation data to plots
f_posXY.line('t', 'posX', source=source_simu, color = "SteelBlue", legend_label="X")
f_posXY.line('t', 'posY', source=source_simu, color = "Coral", legend_label="Y")
f_posZ.line('t', 'posZ', source=source_simu, color = "Teal", legend_label="simu Z")
f_speedXY.line('t', 'speedX', source=source_simu, color = "SteelBlue", legend_label="X")
f_speedXY.line('t', 'speedY', source=source_simu, color = "Coral", legend_label="Y")
f_speedZ.line('t', 'speedZ', source=source_simu, color = "Teal", legend_label="simu Z")
f_attitude.line('t', 'attX', source=source_simu, color = "SteelBlue", legend_label="X")
f_attitude.line('t', 'attY', source=source_simu, color = "Coral", legend_label="Y")
f_attitude.line('t', 'attZ', source=source_simu, color = "Teal", legend_label="Z")
f_omega.line('t', 'omegaX', source=source_simu, color = "SteelBlue", legend_label="X")
f_omega.line('t', 'omegaY', source=source_simu, color = "Coral", legend_label="Y")
f_omega.line('t', 'omegaZ', source=source_simu, color = "Teal", legend_label="Z")
f_mass.line('t', 'mass', source=source_simu, color = "SeaGreen")
# Map navigation data to plots
f_posXY.scatter('t', 'posX', source=source_nav, marker = "+", line_dash='dashed', color = "SteelBlue", legend_label="X")
f_posXY.scatter('t', 'posY', source=source_nav, marker = "+", line_dash='dashed', color = "Coral", legend_label="Y")
f_posZ.scatter('t', 'posZ', source=source_nav, marker = "+", line_dash='dashed', color = "Teal", size=8, legend_label="est. Z")
f_speedXY.scatter('t', 'speedX', source=source_nav, marker = "+", line_dash='dashed', color = "SteelBlue", legend_label="X")
f_speedXY.scatter('t', 'speedY', source=source_nav, marker = "+", line_dash='dashed', color = "Coral", legend_label="Y")
f_speedZ.scatter('t', 'speedZ', source=source_nav, marker = "+", line_dash='dashed', color = "Teal", size=8, legend_label="est. Z")
f_attitude.scatter('t', 'attX', source=source_nav, marker = "+", line_dash='dashed', color = "SteelBlue", legend_label="X")
f_attitude.scatter('t', 'attY', source=source_nav, marker = "+", line_dash='dashed', color = "Coral", legend_label="Y")
f_attitude.scatter('t', 'attZ', source=source_nav, marker = "+", line_dash='dashed', color = "Teal", legend_label="Z")
f_omega.scatter('t', 'omegaX', source=source_nav, marker = "+", line_dash='dashed', color = "SteelBlue", legend_label="X")
f_omega.scatter('t', 'omegaY', source=source_nav, marker = "+", line_dash='dashed', color = "Coral", legend_label="Y")
f_omega.scatter('t', 'omegaZ', source=source_nav, marker = "+", line_dash='dashed', color = "Teal", legend_label="Z")
f_mass.scatter('t', 'mass', source=source_nav, marker = "+", line_dash='dashed', color = "SeaGreen")
# Map measured forces to plots
f_thrust.line('t', 'thrust', source=source_feedback, color = "FireBrick", legend_label="measured")
f_force.line('t', 'forceX', source=source_feedback, color = "SteelBlue", legend_label="X")
f_force.line('t', 'forceY', source=source_feedback, color = "Coral", legend_label="Y")
f_force.line('t', 'torqueZ', source=source_feedback, color = "Teal", legend_label="Z torque")
# Map controlled forces to plots
f_thrust.scatter('t', 'thrust', source=source_control, marker = "+", line_dash='dashed', color = "Orange", size=8, legend_label="command")
f_thrust.line('t', 'thrust', source=source_control, line_alpha=0.5, color = "Orange")
f_force.scatter('t', 'forceX', source=source_control, marker = "+", line_dash='dashed', color = "SteelBlue", legend_label="X")
f_force.scatter('t', 'forceY', source=source_control, marker = "+", line_dash='dashed', color = "Coral", legend_label="Y")
f_force.scatter('t', 'torqueZ', source=source_control, marker = "+", line_dash='dashed', color = "Teal", legend_label="Z torque")
# Map target from guidance to plots
f_thrust.line('t', 'thrust', source=source_target, line_alpha=0.5, line_width = 3, color="Orange")
f_posZ.line('t', 'posZ', source=source_target, line_alpha=0.5, line_width = 3, color="Teal")
f_posXY.line('t', 'posX', source=source_target, line_alpha=0.5, line_width = 3, color="SteelBlue")
f_posXY.line('t', 'posY', source=source_target, line_alpha=0.5, line_width = 3, color="Coral")
f_speedZ.line('t', 'speedZ', source=source_target, line_alpha=0.5, line_width = 3, color="Teal")
f_mass.line('t', 'mass', source=source_target, line_alpha=0.5, line_width = 3, color="SeaGreen")
# Selectable plot
tab1 = Panel(child=f_posZ, title="Altitude [m]")
tab2 = Panel(child=f_thrust, title="Main thrust [N]")
tab3 = Panel(child=f_speedZ, title="Speed [m/s]")
select_tabs = Tabs(tabs=[tab1, tab2, tab3], width = 800, height = 600)
# Create Checkbox to select type of data to display
LABELS = ["Simulation", "Navigation", "Horizon"]
check_plot_type = CheckboxGroup(labels=LABELS, active=[0], margin = (20,5,20,30), background="Gainsboro", height = 60, width = 120)
# Create widget to define file path
file_name = TextInput(margin = (20,5,70,30))
file_name.on_change('value', load_log_file)
# Create slider to change displayed time
range_slider = RangeSlider(start=-1, end=60, value=(-1,30), step=.1, title="Time", width = 800, height = 10)
range_slider.on_change('value', update_range)
# Create slider to change displayed guidance iteration
iteration_slider = Slider(start=0, end=100, value=0, step=1, title="Guidance iteration", width = 250, margin = (20,5,70,30))
iteration_slider.on_change('value', update_iteration)
# Create slider to change density of displayed navigation points
nav_slider = Slider(start=0, end=100, value=50, step=1, title="Navigation points density [%]", margin = (20,5,70,30))
nav_slider.on_change('value', update_nav_points)
file_input = FileInput()
file_input.on_change('filename', load_log_file)
# Create complete layout with all widgets
grid_plot = gridplot([[f_posXY, f_posZ, f_attitude], [f_speedXY, f_speedZ, f_omega], [f_mass, f_thrust, f_force]], plot_width=450, plot_height=350)
main_plot = column(range_slider, grid_plot)
param_col = column(check_plot_type, file_input, iteration_slider, nav_slider)
doc_layout = row(main_plot, param_col, select_tabs)
doc.add_root(doc_layout)
def update_plot():
if simu and np.all(simu_data) != None and np.all(feedback_data) != None:
select = np.logical_and(simu_data[13]>tStart, simu_data[13] <tEnd)
select_feedback = np.logical_and(feedback_data[4]>tStart, feedback_data[4] <tEnd)
source_simu.data = dict(t=simu_data[13][select],
posX=simu_data[0][select],
posY=simu_data[1][select],
posZ=simu_data[2][select],
speedX=simu_data[3][select],
speedY=simu_data[4][select],
speedZ=simu_data[5][select],
attX=simu_data[6][select],
attY=simu_data[7][select],
attZ=simu_data[8][select],
omegaX=simu_data[9][select],
omegaY=simu_data[10][select],
omegaZ=simu_data[11][select],
mass = simu_data[12][select])
source_feedback.data=dict(t=feedback_data[4][select_feedback],
thrust=feedback_data[2][select_feedback],
forceX=feedback_data[0][select_feedback],
forceY=feedback_data[1][select_feedback],
torqueZ=feedback_data[3][select_feedback])
else:
source_simu.data=dict(t=[],
posX=[], posY=[], posZ=[],
speedX=[], speedY=[], speedZ=[],
attX=[], attY=[], attZ=[],
omegaX=[], omegaY=[], omegaZ=[],
mass = []
)
source_feedback.data=dict([],
thrust=[],
forceX=[],
forceY=[],
torqueZ=[])
if nav and np.any(nav_data) != None and np.any(control_data) != None:
select_est = np.logical_and(nav_data[13]>tStart, nav_data[13] <tEnd)
select_control = np.logical_and(control_data[4]>tStart, control_data[4] <tEnd)
source_nav.data = dict(t=nav_data[13][select_est][::point_spacing],
posX=nav_data[0][select_est][::point_spacing],
posY=nav_data[1][select_est][::point_spacing],
posZ=nav_data[2][select_est][::point_spacing],
speedX=nav_data[3][select_est][::point_spacing],
speedY=nav_data[4][select_est][::point_spacing],
speedZ=nav_data[5][select_est][::point_spacing],
attX=nav_data[6][select_est][::point_spacing],
attY=nav_data[7][select_est][::point_spacing],
attZ=nav_data[8][select_est][::point_spacing],
omegaX=nav_data[9][select_est][::point_spacing],
omegaY=nav_data[10][select_est][::point_spacing],
omegaZ=nav_data[11][select_est][::point_spacing],
mass = nav_data[12][select_est][::point_spacing])
source_control.data=dict(t=control_data[4][select_control],
thrust=control_data[2][select_control],
forceX=control_data[0][select_control],
forceY=control_data[1][select_control],
torqueZ=control_data[3][select_control])
else:
source_nav.data=dict( t=[],
posX=[], posY=[], posZ=[],
speedX=[], speedY=[], speedZ=[],
attX=[], attY=[], attZ=[],
omegaX=[], omegaY=[], omegaZ=[],
mass = []
)
source_control.data=dict(t=[],
thrust=[],
forceX=[],
forceY=[],
torqueZ=[])
if horizon and np.any(target_data) != None:
target_path = np.array([data[target_iter] for data in target_data])
select_target = np.logical_and(target_path[4]>tStart, target_path[4] <tEnd)
source_target.data = dict(t=target_path[4][select_target],
posX=target_path[5][select_target],
posY=target_path[6][select_target],
posZ=target_path[0][select_target],
speedZ=target_path[1][select_target],
mass=target_path[2][select_target],
thrust=target_path[3][select_target])
def check_plot_type_handler(new):
global simu
global nav
global horizon
simu = True if 0 in new else False
nav = True if 1 in new else False
horizon = True if 2 in new else False
update_plot()
check_plot_type.on_click(check_plot_type_handler)
| [] |
2024-01-10 | OmarHHM/celebrities | api_service.py | import openai
import requests
import io
from db_config import MessageModel
class ApiService:
MAX_VOICE_CALLS = 2
conversations = {}
@classmethod
def get_response_openAI(cls, promp):
response = openai.Completion.create(
model='text-davinci-003',
prompt=promp,
max_tokens=200,
temperature=0.1,
stop=None,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.6,
)
generate_response = response.choices[0].text.strip()
return generate_response
@classmethod
def get_response_chat_openAI(cls, messages):
response = openai.ChatCompletion.create(
model='gpt-3.5-turbo-0301',
messages=messages,
max_tokens=200,
temperature=0.9,
stop=None,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.6,
)
generate_response = response['choices'][0]['message']['content']
return generate_response
@classmethod
def validate(cls, request):
promp: str = f"""Validate if the following message is directed to Shakira or if it contains the word @onealdeaBot.
If so, return Y; otherwise, return N. Message = {request}"""
return cls.get_response_openAI(promp)
@classmethod
def texto_to_voice(cls, response_chatgpt, bot_data):
CHUNK_SIZE = 1024
url = f"""https://api.elevenlabs.io/v1/text-to-speech/{bot_data.voice_id}"""
headers = {
"Accept": "audio/mpeg",
"Content-Type": "application/json",
"xi-api-key": "xxxxxx"
}
data = {
"text": response_chatgpt,
"model_id": "eleven_multilingual_v1",
"voice_settings": {
"stability": 0.5,
"similarity_boost": 0.5
}
}
response = requests.post(url, json=data, headers=headers)
audio_bytes = io.BytesIO()
for chunk in response.iter_content(chunk_size=CHUNK_SIZE):
if chunk:
audio_bytes.write(chunk)
audio_bytes.seek(0)
return audio_bytes
@classmethod
def generate_response(cls, user_id, fan_name, request, bot_data, username):
messages = [
{
"role": "system",
"content": f"""
Asume el rol del artista {bot_data.name}.
Tus respuestas seran orientadas a tus fans.
No debes usar insultos.
Debes responder de manera amable y alegre.
Debes user siempre el nombre de tu fan, el cual es {fan_name}."""
},
]
messages_by_username = MessageModel.query.filter_by(username=username).all()
for message in messages_by_username:
messages.append({
"role": "user",
"content": message.user
})
messages.append({
"role": "assistant",
"content": message.bot
})
messages.append({
"role": "user",
"content": request
})
response = cls.get_response_chat_openAI(messages)
if user_id not in cls.conversations:
cls.conversations[user_id] = 0
return cls.texto_to_voice(response,bot_data) , response
return response, response | [
"Validate if the following message is directed to Shakira or if it contains the word @onealdeaBot.\n If so, return Y; otherwise, return N. Message = PLACEHOLDER"
] |
2024-01-10 | maduardar/chatGPT-telebot | buttons~others.py | from telegram.ext import ContextTypes, ConversationHandler
from telegram import (
Update,
ReplyKeyboardRemove)
import time
import json
import html
import openai
import asyncio
import traceback
from pathlib import Path
from typing import Dict
from db.MySqlConn import config
from config import (
TYPING_REPLY,
logger)
def get_project_root() -> Path:
return Path(__file__).resolve().parent.parent
async def non_text_handler(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
"""Stores the photos and asks for a location."""
project_root = get_project_root()
user = update.message.from_user
if len(update.message.photo) != 0:
await update.message.reply_text(text='Функция отправки изображения пока недоступна! \n'
'Пожалуйста, используй текст, чтобы задавать вопросы!')
photo_file = await update.message.photo[-1].get_file()
# can't get photo's name
await photo_file.download_to_drive(
f'{project_root}/data/photos/{user.name}-{time.strftime("%Y%m%d-%H%M%S")}.jpg')
logger.info("Photo of %s: %s", user.first_name, 'user_photo.jpg')
else:
await update.message.reply_text(text='Хм, что-то странное! \nПожалуйста, используй текст,'
' чтобы задавать вопросы!')
if update.message.document:
file = await update.message.document.get_file()
await file.download_to_drive(
f'{project_root}/data/documents/{user.name}-{time.strftime("%Y%m%d-%H%M%S")}.jpg')
if update.message.video:
video = await update.message.video.get_file()
await video.download_to_drive(
f'{project_root}/data/videos/{user.name}-{time.strftime("%Y%m%d-%H%M%S")}.jpg')
return TYPING_REPLY
def facts_to_str(user_data: Dict[str, str]) -> str:
"""Helper function for formatting the gathered user info."""
facts = [f'{key} - {value}' for key, value in user_data.items()]
return "\n".join(facts).join(['\n', '\n'])
async def done(update: Update, context: ContextTypes.DEFAULT_TYPE) -> int:
"""Display the gathered info and end the conversation."""
if 'choice' in context.user_data:
del context.user_data['choice']
await update.message.reply_text(
f"I learned these facts about you: {facts_to_str(context.user_data)}Until next time!",
reply_markup=ReplyKeyboardRemove(),
)
return ConversationHandler.END
async def error_handler(update: object, context: ContextTypes.DEFAULT_TYPE) -> None:
"""Log the error and send a telegram message to notify the developer."""
# Log the error before we do anything else, so we can see it even if something breaks.
logger.error("Exception while handling an update:", exc_info=context.error)
# traceback.format_exception returns the usual python message about an exception, but as a
# list of strings rather than a single string, so we have to join them together.
tb_list = traceback.format_exception(None, context.error, context.error.__traceback__)
tb_string = "".join(tb_list)
# Build the message with some markup and additional information about what happened.
# You might need to add some logic to deal with messages longer than the 4096-character limit.
update_str = update.to_dict() if isinstance(update, Update) else str(update)
message = (
f"An exception was raised while handling an update\n"
f"<pre>update = {html.escape(json.dumps(update_str, indent=2, ensure_ascii=False))}"
"</pre>\n\n"
f"<pre>error type = {type(context.error)}</pre>"
f"<pre>context.chat_data = {html.escape(str(context.chat_data))}</pre>\n\n"
f"<pre>context.user_data = {html.escape(str(context.user_data))}</pre>\n\n"
f"<pre>prompt = {html.escape(str(update.message.text))}</pre>\n\n"
f"<pre>{html.escape(tb_string)}</pre>"
)
# Finally, send the message
error_reply = ""
if type(context.error) == openai.error.InvalidRequestError:
error_reply = "The response was filtered due to the prompt triggering Azure OpenAI’s content management " \
"policy. Please modify your prompt and retry. To learn more about our content filtering " \
"policies please read our documentation: https://go.microsoft.com/fwlink/?linkid=2198766"
elif type(context.error) in [openai.error.Timeout, asyncio.exceptions.TimeoutError]:
error_reply = "Время запроса истекло. Повтори запрос, плиз"
if error_reply:
await update.message.reply_text(error_reply, parse_mode="Markdown", disable_web_page_preview=True)
else:
await update.message.reply_text("Повтори чуть попозже, плиз", parse_mode="Markdown", disable_web_page_preview=True)
await context.bot.send_message(
chat_id=config["DEVELOPER_CHAT_ID"], text=message
)
| [] |
2024-01-10 | mathalro/artificial-intelligence | alexa-gpt~lambda~lambda.py | import logging
import ask_sdk_core.utils as ask_utils
from openai import OpenAI
from ask_sdk_core.skill_builder import SkillBuilder
from ask_sdk_core.dispatch_components import AbstractRequestHandler
from ask_sdk_core.dispatch_components import AbstractExceptionHandler
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_model import Response
# Set your OpenAI API key
client = OpenAI(
api_key="YOUR_API_KEY"
)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class LaunchRequestHandler(AbstractRequestHandler):
"""Handler for Skill Launch."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("LaunchRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "Chat G.P.T. mode activated"
session_attr = handler_input.attributes_manager.session_attributes
session_attr["chat_history"] = []
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
class GptQueryIntentHandler(AbstractRequestHandler):
"""Handler for Gpt Query Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("GptQueryIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
query = handler_input.request_envelope.request.intent.slots["query"].value
session_attr = handler_input.attributes_manager.session_attributes
chat_history = session_attr["chat_history"]
response = generate_gpt_response(chat_history, query)
session_attr["chat_history"].append((query, response))
return (
handler_input.response_builder
.speak(response)
.ask("Any other questions?")
.response
)
class CatchAllExceptionHandler(AbstractExceptionHandler):
"""Generic error handling to capture any syntax or routing errors."""
def can_handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> bool
return True
def handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> Response
logger.error(exception, exc_info=True)
speak_output = "Sorry, I had trouble doing what you asked. Please try again."
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
class CancelOrStopIntentHandler(AbstractRequestHandler):
"""Single handler for Cancel and Stop Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return (ask_utils.is_intent_name("AMAZON.CancelIntent")(handler_input) or
ask_utils.is_intent_name("AMAZON.StopIntent")(handler_input))
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "Leaving Chat G.P.T. mode"
return (
handler_input.response_builder
.speak(speak_output)
.response
)
def generate_gpt_response(chat_history, new_question):
try:
messages = [{"role": "system", "content": "You are a helpful assistant."}]
for question, answer in chat_history[-10:]:
messages.append({"role": "user", "content": question})
messages.append({"role": "assistant", "content": answer})
messages.append({"role": "user", "content": new_question})
response = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=messages,
max_tokens=300,
n=1,
temperature=0.5
)
return response.choices[0].message.content
except Exception as e:
return f"Error generating response: {str(e)}"
sb = SkillBuilder()
sb.add_request_handler(LaunchRequestHandler())
sb.add_request_handler(GptQueryIntentHandler())
sb.add_request_handler(CancelOrStopIntentHandler())
sb.add_exception_handler(CatchAllExceptionHandler())
lambda_handler = sb.lambda_handler() | [
"You are a helpful assistant."
] |
2024-01-10 | Asim-Sidd02/MentalHealth | beshie.py | import openai
import streamlit as st
from PIL import Image
import os
from streamlit_chat import message
from streamlit_extras.colored_header import colored_header
st.set_page_config(page_title="Beshie Chatbot", page_icon=":robot:")
openai.api_key = st.secrets["openai_secret_key"]
# # And the root-level secrets are also accessible as environment variables:
# os.environ["openai_secret_key"] == st.secrets["openai_secret_key"]
page_bg = f"""
<style>
[data-testid="stSidebar"] {{
background-color:#1F423F;
}}
[data-testid="stToolbar"] {{
background-color:#FCFCFC;
}}
</style>
"""
st.markdown(page_bg,unsafe_allow_html=True)
# Sidebar contents
with st.sidebar:
# st.title('Beshie')
image = Image.open('Beshie Logo.png')
st.image(image, width=280)
st.markdown("<h2 style='text-align: center; color: white;'> Mental Health Chatbot </h2>", unsafe_allow_html= True)
st.markdown("<h1 style='text-align: left; color: white;'> About </h1>", unsafe_allow_html= True)
st.markdown("""
<p style='text-align: left; color: white;'> Meet Beshie, your friendly mental health chatbot! Whether you're feeling down, anxious, or stressed,
Beshie is here to help you navigate through your emotions and provide you with the guidance you need to feel better.
With Beshie, you can talk about your mental health concerns in a comfortable way,
using Tagalog and English slangs. So don't hesitate to chat with Beshie anytime, anywhere! </p><br><br>
""", unsafe_allow_html=True)
st.markdown("<p style='color:white;'> Made with 💚 by <a href='https://github.com/omaresguerra' style='color:white;'>Omar Esguerra</a> </p>", unsafe_allow_html=True)
# Generate empty lists for generated and past.
## generated stores AI generated responses
if 'generated' not in st.session_state:
st.session_state['generated'] = ['Hello Bes, how may I help you?']
## past stores User's questions
if 'past' not in st.session_state:
st.session_state['past'] = ['Hi']
# Layout of input/response containers
# colored_header(label='', description='', color_name="green-70")
response_container = st.container()
input_container = st.container()
# User input
## Function for taking user provided prompt as input
def get_text():
text = st.text_input("You: ", "", key="input")
return text
def clear_text():
st.session_state["input"] = ""
## Applying the user input box
with input_container:
user_input = get_text()
st.button("Clear Text", on_click=clear_text)
messages = [{"role": "system", "content": "You are a friendly mental health adviser providing mental health support and service. \
Make your responses more friendly by including mixture of English and Tagalog slangs like 'Bes' to make the conversation more interesting."}]
def CustomChatGPT(user_input):
messages.append({"role": "user", "content": user_input})
response = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages = messages,
temperature=0,
)
ChatGPT_reply = response["choices"][0]["message"]["content"]
messages.append({"role": "assistant", "content": ChatGPT_reply})
return ChatGPT_reply
## Conditional display of AI generated responses as a function of user provided prompts
with response_container:
if user_input:
response = CustomChatGPT(user_input)
st.session_state.past.append(user_input)
st.session_state.generated.append(response)
if st.session_state['generated']:
for i in range(len(st.session_state['generated'])):
message(st.session_state['past'][i], is_user=True, key=str(i) + '_user')
message(st.session_state["generated"][i], key=str(i))
| [
"You are a friendly mental health adviser providing mental health support and service. Make your responses more friendly by including mixture of English and Tagalog slangs like 'Bes' to make the conversation more interesting."
] |
2024-01-10 | Gyarbij/strife | utilities~ai_utils.py | import aiohttp
import io
from datetime import datetime
import re
import asyncio
import time
import random
import asyncio
from urllib.parse import quote
from utilities.config_loader import load_current_language, config
import openai
import os
from dotenv import load_dotenv
load_dotenv()
current_language = load_current_language()
internet_access = config['INTERNET_ACCESS']
openai.api_key = os.getenv('CHIMERA_GPT_KEY')
openai.api_base = "https://chimeragpt.adventblocks.cc/api/v1"
async def search(prompt):
"""
Asynchronously searches for a prompt and returns the search results as a blob.
Args:
prompt (str): The prompt to search for.
Returns:
str: The search results as a blob.
Raises:
None
"""
if not internet_access or len(prompt) > 200:
return
search_results_limit = config['MAX_SEARCH_RESULTS']
url_match = re.search(r'(https?://\S+)', prompt)
if url_match:
search_query = url_match.group(0)
else:
search_query = prompt
if search_query is not None and len(search_query) > 200:
return
current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
blob = f"Search results for: '{search_query}' at {current_time}:\n"
if search_query is not None:
try:
async with aiohttp.ClientSession() as session:
async with session.get('https://ddg-api.herokuapp.com/search',
params={'query': search_query, 'limit': search_results_limit}) as response:
search = await response.json()
except aiohttp.ClientError as e:
print(f"An error occurred during the search request: {e}")
return
for index, result in enumerate(search):
try:
blob += f'[{index}] "{result["snippet"]}"\n\nURL: {result["link"]}\n'
except Exception as e:
blob += f'Search error: {e}\n'
blob += "\nSearch results allows you to have real-time information and the ability to browse the internet\n.As the links were generated by the system rather than the user, please send a response along with the link if necessary.\n"
return blob
else:
blob = "No search query is needed for a response"
return blob
async def fetch_models():
return openai.Model.list()
def generate_response(instructions, search, history):
if search is not None:
search_results = search
elif search is None:
search_results = "Search feature is disabled"
messages = [
{"role": "system", "name": "instructions", "content": instructions},
*history,
{"role": "system", "name": "search_results", "content": search_results},
]
response = openai.ChatCompletion.create(
model=config['GPT_MODEL'],
messages=messages
)
message = response.choices[0].message.content
return message
def generate_gpt4_response(prompt):
messages = [
{"role": "system", "name": "admin_user", "content": prompt},
]
response = openai.ChatCompletion.create(
model='gpt-4',
messages=messages
)
message = response.choices[0].message.content
return message
async def poly_image_gen(session, prompt):
seed = random.randint(1, 100000)
image_url = f"https://image.pollinations.ai/prompt/{prompt}?seed={seed}"
async with session.get(image_url) as response:
image_data = await response.read()
image_io = io.BytesIO(image_data)
return image_io
# async def fetch_image_data(url):
# async with aiohttp.ClientSession() as session:
# async with session.get(url) as response:
# return await response.read()
async def dall_e_gen(prompt, size, num_images):
response = openai.Image.create(
prompt=prompt,
n=num_images,
size=size,
)
imagefileobjs = []
for image in response["data"]:
image_url = image["url"]
async with aiohttp.ClientSession() as session:
async with session.get(image_url) as response:
content = await response.content.read()
img_file_obj = io.BytesIO(content)
imagefileobjs.append(img_file_obj)
return imagefileobjs
async def generate_image_prodia(prompt, model, sampler, seed, neg):
print("\033[1;32m(Prodia) Creating image for :\033[0m", prompt)
start_time = time.time()
async def create_job(prompt, model, sampler, seed, neg):
if neg is None:
negative = "(nsfw:1.5),verybadimagenegative_v1.3, ng_deepnegative_v1_75t, (ugly face:0.8),cross-eyed,sketches, (worst quality:2), (low quality:2), (normal quality:2), lowres, normal quality, ((monochrome)), ((grayscale)), skin spots, acnes, skin blemishes, bad anatomy, DeepNegative, facing away, tilted head, {Multiple people}, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worstquality, low quality, normal quality, jpegartifacts, signature, watermark, username, blurry, bad feet, cropped, poorly drawn hands, poorly drawn face, mutation, deformed, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, extra fingers, fewer digits, extra limbs, extra arms,extra legs, malformed limbs, fused fingers, too many fingers, long neck, cross-eyed,mutated hands, polar lowres, bad body, bad proportions, gross proportions, text, error, missing fingers, missing arms, missing legs, extra digit, extra arms, extra leg, extra foot, repeating hair, nsfw, [[[[[bad-artist-anime, sketch by bad-artist]]]]], [[[mutation, lowres, bad hands, [text, signature, watermark, username], blurry, monochrome, grayscale, realistic, simple background, limited palette]]], close-up, (swimsuit, cleavage, armpits, ass, navel, cleavage cutout), (forehead jewel:1.2), (forehead mark:1.5), (bad and mutated hands:1.3), (worst quality:2.0), (low quality:2.0), (blurry:2.0), multiple limbs, bad anatomy, (interlocked fingers:1.2),(interlocked leg:1.2), Ugly Fingers, (extra digit and hands and fingers and legs and arms:1.4), crown braid, (deformed fingers:1.2), (long fingers:1.2)"
else:
negative = neg
url = 'https://api.prodia.com/generate'
params = {
'new': 'true',
'prompt': f'{quote(prompt)}',
'model': model,
'negative_prompt': f"{negative}",
'steps': '100',
'cfg': '9.5',
'seed': f'{seed}',
'sampler': sampler,
'upscale': 'True',
'aspect_ratio': 'square'
}
async with aiohttp.ClientSession() as session:
async with session.get(url, params=params) as response:
data = await response.json()
return data['job']
job_id = await create_job(prompt, model, sampler, seed, neg)
url = f'https://api.prodia.com/job/{job_id}'
headers = {
'authority': 'api.prodia.com',
'accept': '*/*',
}
async with aiohttp.ClientSession() as session:
while True:
async with session.get(url, headers=headers) as response:
json = await response.json()
if json['status'] == 'succeeded':
async with session.get(f'https://images.prodia.xyz/{job_id}.png?download=1', headers=headers) as response:
content = await response.content.read()
img_file_obj = io.BytesIO(content)
duration = time.time() - start_time
print(f"\033[1;34m(Prodia) Finished image creation\n\033[0mJob id : {job_id} Prompt : ", prompt, "in", duration, "seconds.")
return img_file_obj
| [] |
2024-01-10 | molamk/gpt2-react-flask | server~run_generation.py | #!/usr/bin/env python3
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/Transformer-XL/XLNet)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
from tqdm import trange
import torch
import torch.nn.functional as F
import numpy as np
from pytorch_transformers import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig
from pytorch_transformers import GPT2LMHeadModel, GPT2Tokenizer
from pytorch_transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from pytorch_transformers import XLNetLMHeadModel, XLNetTokenizer
from pytorch_transformers import TransfoXLLMHeadModel, TransfoXLTokenizer
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (
GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig)), ())
MODEL_CLASSES = {
'gpt2': (GPT2LMHeadModel, GPT2Tokenizer),
'openai-gpt': (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
'xlnet': (XLNetLMHeadModel, XLNetTokenizer),
'transfo-xl': (TransfoXLLMHeadModel, TransfoXLTokenizer),
}
# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
PADDING_TEXT = """ In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[
0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(
F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[...,
1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(model, length, context, num_samples=1, temperature=1, top_k=0, top_p=0.0, is_xlnet=False, device='cpu'):
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
generated = context
with torch.no_grad():
for _ in trange(length):
inputs = {'input_ids': generated}
if is_xlnet:
# XLNet is a direct (predict same token, not next token) and bi-directional model by default
# => need one additional dummy token in the input (will be masked), attention mask and target mapping (see model docstring)
input_ids = torch.cat((generated, torch.zeros(
(1, 1), dtype=torch.long, device=device)), dim=1)
perm_mask = torch.zeros(
(1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float, device=device)
# Previous tokens don't see last token
perm_mask[:, :, -1] = 1.0
target_mapping = torch.zeros(
(1, 1, input_ids.shape[1]), dtype=torch.float, device=device)
target_mapping[0, 0, -1] = 1.0 # predict last token
inputs = {'input_ids': input_ids, 'perm_mask': perm_mask,
'target_mapping': target_mapping}
# Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
outputs = model(**inputs)
next_token_logits = outputs[0][0, -1, :] / temperature
filtered_logits = top_k_top_p_filtering(
next_token_logits, top_k=top_k, top_p=top_p)
next_token = torch.multinomial(
F.softmax(filtered_logits, dim=-1), num_samples=1)
generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1)
return generated
def generate_text(
padding_text=None,
model_type='gpt2',
model_name_or_path='gpt2',
prompt='',
length=20,
temperature=1.0,
top_k=0,
top_p=0.9,
no_cuda=True,
seed=42,
):
device = torch.device(
"cuda" if torch.cuda.is_available() and not no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
np.random.seed(seed)
torch.manual_seed(seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(seed)
model_type = model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[model_type]
tokenizer = tokenizer_class.from_pretrained(model_name_or_path)
model = model_class.from_pretrained(model_name_or_path)
model.to(device)
model.eval()
if length < 0 and model.config.max_position_embeddings > 0:
length = model.config.max_position_embeddings
elif 0 < model.config.max_position_embeddings < length:
# No generation bigger than model size
length = model.config.max_position_embeddings
elif length < 0:
length = MAX_LENGTH # avoid infinite loop
while True:
raw_text = prompt if prompt else input("Model prompt >>> ")
if model_type in ["transfo-xl", "xlnet"]:
# Models with memory likes to have a long prompt for short inputs.
raw_text = (
padding_text if padding_text else PADDING_TEXT) + raw_text
context_tokens = tokenizer.encode(raw_text)
out = sample_sequence(
model=model,
context=context_tokens,
length=length,
temperature=temperature,
top_k=top_k,
top_p=top_p,
device=device,
is_xlnet=bool(model_type == "xlnet"),
)
out = out[0, len(context_tokens):].tolist()
text = tokenizer.decode(out, clean_up_tokenization_spaces=True)
print(text)
if prompt:
break
return text
| [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.