date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | bopopescu/JARVIS-MUSIC | qhackSRE~lyricsMatch~semantic_search~unsupervised_search.py | import pandas as pd
import numpy as np
from collections import Counter
import re
from lyricsMatch.semantic_search.search_init import data, count_vect_lyric, tf_transformer_lyric, words_tfidf_lyric, count_vect_name, tf_transformer_name, words_tfidf_name, model
# languange processing imports
#import nltk
import string
from gensim.corpora import Dictionary
# preprocessing imports
from sklearn.preprocessing import LabelEncoder
from pprint import pprint
# model imports
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel, Phrases, phrases
from gensim.models.doc2vec import LabeledSentence
from gensim.models.doc2vec import TaggedDocument
from gensim.models.doc2vec import Doc2Vec
from gensim.test.utils import get_tmpfile
from sklearn.preprocessing import normalize
#import spacy
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
import scipy
import warnings
warnings.filterwarnings("ignore",category=FutureWarning)
warnings.filterwarnings("ignore",category=DeprecationWarning)
def process_query(count_vect, tf_transformer, lyric):
#lyric_no_stops = remove_stopwords(lyric)
#lyric_lemmatized = lemmatization(lyric_no_stops, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
##lyric_lemmatized = lemmatize_more(lyric_lemmatized)
#lyric = list(map(lambda words: " ".join(words), lyric_lemmatized))
words_count = count_vect.transform(lyric)
search_tfidf = tf_transformer.transform(words_count)
return search_tfidf
def search_top5(words_tfidf, search_tfidf, data):
matrix_dot = np.dot(words_tfidf, search_tfidf[0].transpose())
#print(type(matrix_dot.todense()))
matrix_dot = matrix_dot.todense()
#sorted_index = matrix_dot.argsort(axis=0)
#sorted_index = np.array(sorted_index)[:-6:-1]
return matrix_dot
#for iindex in sorted_index:
# print(data.iloc[iindex].song)
#print(sorted_index)
def search_lyrics(lyric):
lyric = [lyric]
# lyric = ['Overwhelming Everything about you is so overwhelming']
# lyric = ['I think I got one Her soul is presidential like Barack']
# lyric = ['To the left to the left To the left to the left']
search_tfidf_lyric = process_query(count_vect_lyric, tf_transformer_lyric, lyric)
matrix_lyric = search_top5(words_tfidf_lyric, search_tfidf_lyric, data)
search_tfidf_name = process_query(count_vect_name, tf_transformer_name, lyric)
matrix_name = search_top5(words_tfidf_name, search_tfidf_name, data)
inferred_vector = model.infer_vector(lyric[0].split(' '))
sims = model.docvecs.most_similar([inferred_vector], topn=len(model.docvecs))
rank = [[data.index.get_loc(int(docid.strip('doc'))), sim] for docid, sim in sims]
rank_df = pd.DataFrame(rank, columns=['docid', 'sim'])
rank_df = rank_df.sort_values(by=['docid'])
rank_df.index = rank_df.docid
rank_df = rank_df.drop(columns=['docid'])
matrix_d2v = rank_df.as_matrix()
# output is in a dict. 5 items. key varies but always different. each item is a list. list[0] sone name, list[1] singer name, list[2] youtube link
matrix_dot = 0.7 * matrix_lyric + 0.1 * matrix_name + 0.2 * matrix_d2v
sorted_index = matrix_dot.argsort(axis=0)
sorted_index = np.array(sorted_index)[:-6:-1]
result_dict = dict()
link = 'https://www.youtube.com/results?search_query='
for iindex in sorted_index:
parameter = data.iloc[iindex].song.values[0].replace(' ', '+') + '+' + data.iloc[iindex].singer.values[
0].replace(' ', '+')
#print(link + parameter.lower())
result = []
result.append(data.iloc[iindex].song.values[0])
result.append(data.iloc[iindex].singer.values[0])
result.append(link + parameter.lower())
result_dict[iindex[0]] = result
return result_dict
# print(data.iloc[iindex].song)
if __name__ == '__main__':
print(search_lyrics('Overwhelming Everything about you is so overwhelming'))
print(search_lyrics('Overwhelming Everything about you is so overwhelming'))
| [] |
2024-01-10 | bopopescu/JARVIS-MUSIC | qhackSRE~lyricsMatch~semantic_search~search_init.py | import pandas as pd
import numpy as np
from collections import Counter
import re
# languange processing imports
#import nltk
import string
from gensim.corpora import Dictionary
# preprocessing imports
from sklearn.preprocessing import LabelEncoder
from pprint import pprint
# model imports
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel, Phrases, phrases
from gensim.models.doc2vec import LabeledSentence
from gensim.models.doc2vec import TaggedDocument
from gensim.models.doc2vec import Doc2Vec
from gensim.test.utils import get_tmpfile
from sklearn.preprocessing import normalize
#import spacy
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
import scipy
import warnings
import os
warnings.filterwarnings("ignore",category=FutureWarning)
warnings.filterwarnings("ignore",category=DeprecationWarning)
# read lyrics.csv
def process_corpus(column):
lyric_dir = os.path.join('lyricsMatch', 'semantic_search', 'lyrics.csv')
data = pd.read_csv(lyric_dir, names=['singer', 'song', 'lyrics'])
data = data[~data['lyrics'].isna()]
# data_words_nostops = remove_stopwords(data.lyrics)
# data_lemmatized = lemmatization(data_words_nostops, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
# corpus = list(map(lambda words: " ".join(words), data_lemmatized))
count_vect = CountVectorizer().fit(data[column].values)
words_count = count_vect.transform(data[column].values)
tf_transformer = TfidfTransformer(sublinear_tf=True).fit(words_count)
words_tfidf = tf_transformer.transform(words_count)
return data, count_vect, tf_transformer, words_tfidf
# scipy.sparse.save_npz('sparse_matrix.npz',words_tfidf)
# words_tfidf = scipy.sparse.load_npz('sparse_matrix.npz')
data, count_vect_lyric, tf_transformer_lyric, words_tfidf_lyric = process_corpus('lyrics')
data, count_vect_name, tf_transformer_name, words_tfidf_name = process_corpus('song')
model_file = os.path.join(os.getcwd(), 'lyricsMatch', 'semantic_search', "my_doc2vec_model")
fname = get_tmpfile(model_file)
model = Doc2Vec.load(fname)
| [] |
2024-01-10 | SantraMosambi/test | playground~agentbox.py | import asyncio
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
import sys
import os
script_dir = os.path.dirname(os.path.abspath(__file__))
openagent_dir = os.path.abspath(os.path.join(script_dir, ".."))
sys.path.append(openagent_dir)
import openagent
from openagent.llms._openai import OpenAI as guidance_llm
from openagent.agent.chat import ChatAgent
from dotenv import load_dotenv
load_dotenv()
from jupyter_client import KernelManager
from IPython import display
import subprocess
import ast
import argparse
import threading
def agent():
llm = guidance_llm(
model="gpt-3.5-turbo"
)
chat_template = '''
{{#user~}}
I want to translate the following English text into Python code:
QUERY: {{input}}
{{~/user}}
{{#assistant~}}
Sure, I can assist with that. If I need more information, I'll ask for clarification.
{{~/assistant}}
{{#user~}}
Yes, go ahead and write the complete code.
{{~/user}}
{{#assistant~}}
{{gen 'response' temperature=0 max_tokens=3900}}
{{~/assistant}}
{{#assistant~}}
If the context or the task is not clear, please provide additional information to clarify.
{{~/assistant}}'''
agent = ChatAgent(
llm=llm,
prompt_template=chat_template,
)
return agent
def install_dependencies(code):
try:
# Parse the code to extract import statements
parsed_ast = ast.parse(code)
imports = []
for node in ast.walk(parsed_ast):
if isinstance(node, ast.Import):
imports.extend([name.name for name in node.names])
elif isinstance(node, ast.ImportFrom):
module_name = node.module
if module_name is not None:
imports.append(module_name)
# Remove duplicate imports and filter out standard library modules
imports = list(set(imports))
# print("imports", imports)
resolved_imports = set()
for imp in imports:
if '.' in imp:
parent_module = imp.split('.')[0]
resolved_imports.add(parent_module)
else:
resolved_imports.add(imp)
# Remove duplicate imports and filter out standard library modules
resolved_imports = list(resolved_imports)
# print("resolved_imports", resolved_imports)
third_party_dependencies = [dep for dep in resolved_imports if dep not in sys.modules]
# print("third_party_dependencies", third_party_dependencies)
if third_party_dependencies:
subprocess.check_call([sys.executable, "-m", "pip", "install"] + third_party_dependencies)
return True
else:
# print("No third-party dependencies detected.")
return True
except subprocess.CalledProcessError:
print("Dependency installation failed.")
return False
def run_python_code_in_kernel(code):
# Create a kernel manager
km = KernelManager(kernel_name='python3') # Use the appropriate kernel name
# Start the kernel
km.start_kernel()
# Connect to the kernel
kc = km.client()
kc.start_channels()
# Execute the code in the kernel
kc.execute(code)
# Create a thread for waiting on messages
def wait_for_messages():
try:
while True:
msg = kc.get_iopub_msg()
msg_type = msg['header']['msg_type']
if msg_type == 'display_data':
output_data = msg['content']['data']
if 'image/png' in output_data:
display.display_png(output_data['image/png'], raw=True)
elif 'image/jpeg' in output_data:
display.display_jpeg(output_data['image/png'], raw=True)
elif msg_type == 'stream':
output_data = msg['content']['text']
output_data = output_data.split("\n")
for output in output_data[:-1]:
display.display(output)
except asyncio.CancelledError:
pass # Ignore the exception
# Start the message-waiting thread
message_thread = threading.Thread(target=wait_for_messages)
message_thread.start()
# Wait for the specified timeout
timeout_seconds = 10
message_thread.join(timeout_seconds)
# Check if the thread is still alive (indicating timeout)
if message_thread.is_alive():
print("Code execution completed")
else:
print("Code execution completed within the timeout.")
# Stop the kernel
kc.stop_channels()
km.shutdown_kernel()
# Main function
def main(gpt_prompt):
res = agent().run(input=gpt_prompt)
code = f"""{res.split('```')[1].replace('python', '')}"""
print(code)
# Install dependencies
if install_dependencies(code):
# Run the generated code in the Jupyter kernel
run_python_code_in_kernel(code)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Execute Python code from the command line.')
parser.add_argument("--gpt_prompt", help="Python code to be executed", default=None)
args = parser.parse_args()
gpt_prompt = args.gpt_prompt
main(gpt_prompt)
| [
"\n {{#user~}}\n I want to translate the following English text into Python code:\n QUERY: {{input}}\n {{~/user}}\n\n {{#assistant~}}\n Sure, I can assist with that. If I need more information, I'll ask for clarification.\n {{~/assistant}}\n\n {{#user~}}\n Yes, go ahead and write the complete code.\n {{~/user}}\n\n {{#assistant~}}\n {{gen 'response' temperature=0 max_tokens=3900}}\n {{~/assistant}}\n\n {{#assistant~}}\n If the context or the task is not clear, please provide additional information to clarify.\n {{~/assistant}}"
] |
2024-01-10 | SantraMosambi/test | openagent~finetune~LLMFinetune.py | from abc import ABC, abstractmethod
from logging import Logger
import openai
class LLMFinetune(ABC):
def __init__(self, logger: Logger, openai_key: str):
self.logger = logger
openai.api_key = openai_key
@abstractmethod
def transform_data(self, train_csv_file: str, val_csv_file: str , train_output_file: str, val_output_file: str) -> str:
pass
@abstractmethod
def finetune(self, **kwargs):
pass
| [] |
2024-01-10 | aws-samples/text-embeddings-pipeline-for-rag | lambda~embedding_function.py | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import os
import json
import boto3
from botocore.exceptions import ClientError
import langchain
from langchain.embeddings import BedrockEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores.pgvector import PGVector
from langchain.indexes import SQLRecordManager
from langchain.indexes import index
import sqlalchemy
from utils import bedrock
def lambda_handler(event, context):
# Get content of uploaded object
s3 = boto3.client('s3')
s3_details = event["Records"][0]["s3"]
response = s3.get_object(Bucket=s3_details["bucket"]["name"], Key=s3_details["object"]["key"])
content = response['Body'].read().decode('utf-8')
# Set up client for Amazon Bedrock
boto3_bedrock = bedrock.get_bedrock_client(region="us-east-1")
br_embeddings = BedrockEmbeddings(model_id="amazon.titan-embed-text-v1", client=boto3_bedrock)
splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = splitter.create_documents([content], metadatas=[{"source": s3_details["object"]["key"]}]);
print(f"Number of documents after split and chunking = {len(docs)}")
# Retrieve database credentials from AWS Secrets Manager
db_credential = get_db_credential()
pgvector_connection_string = PGVector.connection_string_from_db_params(
driver="psycopg2",
host=db_credential["host"],
port=int(db_credential["port"]),
database=db_credential["username"],
user=db_credential["username"],
password=db_credential["password"],
)
# Record Manager is used to load and keep in sync documents from any source into a vector store
# https://blog.langchain.dev/syncing-data-sources-to-vector-stores/
collection_name = "knowledge_base"
namespace = f"pgvector/{collection_name}"
record_manager = SQLRecordManager(
namespace, engine=sqlalchemy.create_engine("postgresql+psycopg2://postgres:" + db_credential["password"] + "@" + db_credential["host"] + "/postgres")
)
record_manager.create_schema()
# Create vector store
vectorstore_pgvector_aws = PGVector(pgvector_connection_string, br_embeddings, collection_name=collection_name)
# Create embeddings and store in vector store
index(
docs_source=docs,
record_manager=record_manager,
vector_store=vectorstore_pgvector_aws,
cleanup="incremental",
source_id_key="source"
)
# Performing a query for testing
print("Performing a query for testing")
print("-" * 35)
query = "How do the new features of AWS Health help me?"
docs_with_score = vectorstore_pgvector_aws.similarity_search_with_score(query)
for doc, score in docs_with_score:
print("Score: ", score)
print(doc.page_content)
print("-" * 35)
print("boto3: " + boto3.__version__ + ", langchain: " + langchain.__version__)
def get_db_credential():
secret_name = "text-embeddings-pipeline-vector-store"
# Create a Secrets Manager client
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager'
)
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
# For a list of exceptions thrown, see
# https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html
raise e
# Decrypts secret using the associated KMS key.
return json.loads(get_secret_value_response['SecretString']) | [] |
2024-01-10 | aramasethu/rebuff | python-sdk~rebuff~rebuff.py | import secrets
from typing import Any, Dict, Optional, Tuple, Union
import requests
from pydantic import BaseModel
class DetectApiRequest(BaseModel):
userInput: str
userInputBase64: Optional[str] = None
runHeuristicCheck: bool
runVectorCheck: bool
runLanguageModelCheck: bool
maxHeuristicScore: float
maxModelScore: float
maxVectorScore: float
class DetectApiSuccessResponse(BaseModel):
heuristicScore: float
modelScore: float
vectorScore: Dict[str, float]
runHeuristicCheck: bool
runVectorCheck: bool
runLanguageModelCheck: bool
maxHeuristicScore: float
maxModelScore: float
maxVectorScore: float
injectionDetected: bool
class ApiFailureResponse(BaseModel):
error: str
message: str
class Rebuff:
def __init__(self, api_token: str, api_url: str = "https://playground.rebuff.ai"):
self.api_token = api_token
self.api_url = api_url
self._headers = {
"Authorization": f"Bearer {self.api_token}",
"Content-Type": "application/json",
}
def detect_injection(
self,
user_input: str,
max_heuristic_score: float = 0.75,
max_vector_score: float = 0.90,
max_model_score: float = 0.9,
check_heuristic: bool = True,
check_vector: bool = True,
check_llm: bool = True,
) -> Union[DetectApiSuccessResponse, ApiFailureResponse]:
"""
Detects if the given user input contains an injection attempt.
Args:
user_input (str): The user input to be checked for injection.
max_heuristic_score (float, optional): The maximum heuristic score allowed. Defaults to 0.75.
max_vector_score (float, optional): The maximum vector score allowed. Defaults to 0.90.
max_model_score (float, optional): The maximum model (LLM) score allowed. Defaults to 0.9.
check_heuristic (bool, optional): Whether to run the heuristic check. Defaults to True.
check_vector (bool, optional): Whether to run the vector check. Defaults to True.
check_llm (bool, optional): Whether to run the language model check. Defaults to True.
Returns:
Tuple[Union[DetectApiSuccessResponse, ApiFailureResponse], bool]: A tuple containing the detection
metrics and a boolean indicating if an injection was detected.
"""
request_data = DetectApiRequest(
userInput=user_input,
userInputBase64=encode_string(user_input),
runHeuristicCheck=check_heuristic,
runVectorCheck=check_vector,
runLanguageModelCheck=check_llm,
maxVectorScore=max_vector_score,
maxModelScore=max_model_score,
maxHeuristicScore=max_heuristic_score,
)
response = requests.post(
f"{self.api_url}/api/detect",
json=request_data.dict(),
headers=self._headers,
)
response.raise_for_status()
response_json = response.json()
success_response = DetectApiSuccessResponse.parse_obj(response_json)
if (
success_response.heuristicScore > max_heuristic_score
or success_response.modelScore > max_model_score
or success_response.vectorScore["topScore"] > max_vector_score
):
# Injection detected
success_response.injectionDetected = True
return success_response
else:
# No injection detected
success_response.injectionDetected = False
return success_response
@staticmethod
def generate_canary_word(length: int = 8) -> str:
"""
Generates a secure random hexadecimal canary word.
Args:
length (int, optional): The length of the canary word. Defaults to 8.
Returns:
str: The generated canary word.
"""
return secrets.token_hex(length // 2)
def add_canary_word(
self,
prompt: Any,
canary_word: Optional[str] = None,
canary_format: str = "<!-- {canary_word} -->",
) -> Tuple[Any, str]:
"""
Adds a canary word to the given prompt which we will use to detect leakage.
Args:
prompt (Any): The prompt to add the canary word to.
canary_word (Optional[str], optional): The canary word to add. If not provided, a random canary word will be
generated. Defaults to None.
canary_format (str, optional): The format in which the canary word should be added.
Defaults to "<!-- {canary_word} -->".
Returns:
Tuple[Any, str]: A tuple containing the modified prompt with the canary word and the canary word itself.
"""
# Generate a canary word if not provided
if canary_word is None:
canary_word = self.generate_canary_word()
# Embed the canary word in the specified format
canary_comment = canary_format.format(canary_word=canary_word)
if isinstance(prompt, str):
prompt_with_canary: str = canary_comment + "\n" + prompt
return prompt_with_canary, canary_word
try:
import langchain
if isinstance(prompt, langchain.PromptTemplate):
prompt.template = canary_comment + "\n" + prompt.template
return prompt, canary_word
except ImportError:
pass
raise TypeError(
f"prompt_template must be a PromptTemplate or a str, "
f"but was {type(prompt)}"
)
def is_canary_word_leaked(
self,
user_input: str,
completion: str,
canary_word: str,
log_outcome: bool = True,
) -> bool:
"""
Checks if the canary word is leaked in the completion.
Args:
user_input (str): The user input.
completion (str): The completion generated by the AI.
canary_word (str): The canary word to check for leakage.
log_outcome (bool, optional): Whether to log the outcome of the leakage check. Defaults to True.
Returns:
bool: True if the canary word is leaked, False otherwise.
"""
if canary_word in completion:
if log_outcome:
self.log_leakage(user_input, completion, canary_word)
return True
return False
def log_leakage(self, user_input: str, completion: str, canary_word: str) -> None:
"""
Logs the leakage of a canary word.
Args:
user_input (str): The user input.
completion (str): The completion generated by the AI.
canary_word (str): The leaked canary word.
"""
data = {
"user_input": user_input,
"completion": completion,
"canaryWord": canary_word,
}
response = requests.post(
f"{self.api_url}/api/log", json=data, headers=self._headers
)
response.raise_for_status()
return
def encode_string(message: str) -> str:
return message.encode("utf-8").hex()
| [
"PLACEHOLDER\nPLACEHOLDER"
] |
2024-01-10 | my625/PromptQG | test~AQG_chatgpt.py | import os
import time
import spacy
nlp = spacy.load("en_core_web_sm")
import openai
openai.api_key="put api key here"
#openai.api_key = os.getenv("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")
#uu=open("UK-Abs/test-data/stats-UK-test.txt")
ll=os.listdir("context/")
#ll=os.listdir("IN-Abs/test-data/judgement/")
no_of_words=0
no_of_words2=0
temp_summary2=""
for i in range(0,700):
#temp=ll[i].split("\t")
#print(temp[0],temp[1],temp[2])
#if i<71:
# continue
#print(i)
#continue
lll=open("context/"+ll[i],"r")
llll=str(lll.read())
#temp2=llll.split(".")
#temp_summary=""
#parsed=""
llllll=open("output/"+temp[i],"w+")
#llllll.write()
#time.sleep(60)
last=""
ttt=""
counter=0
#for j in range(0,len(temp2)):
#temp2[j]=temp2[j].strip("\n")
#parsed = nlp(temp2[j])
#no_of_words2=no_of_words
#no_of_words=no_of_words+len(parsed)
#print(len(parsed))
#temp_summary2=temp_summary
#counter=counter+1
#last=temp2[j]
#temp_summary=temp_summary+temp2[j]
#if temp_summary=="":
# temp_summary=temp_summary+last+"."+temp2[j]+"."
#else:
# temp_summary=temp_summary+temp2[j]+"."
#ratio=int(1024*0.333333333333)
#last=temp2[j]
#ratio=int(1024*((int(temp[2])*1.0)/int(temp[1])))
#if ((no_of_words>1024) and (no_of_words2<=1024)):
temp_summary2="Given the context"+"{"+llll+"}"+" , generate a Question."
#try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content":temp_summary2}],
temperature=0.7,
max_tokens=50,
stop=None
)
#except:
# cc=0
#temp_summary=temp_summary+parsed
# response = openai.Completion.create(
# model="text-davinci-003",
# prompt="{"+temp_summary2+"}"+"Tl;Dr",
# temperature=0.7,
# max_tokens=ratio,
# top_p=1.0,
# frequency_penalty=0.0,
# presence_penalty=1
# )
for choice in response.choices:
print(choice)
#if "text" in choice:
# print(choice.text)
llllll.write(choice["message"]["content"]+"\n")
#print(choice["message"]["content"])
# llllll.write(str(response["choices"][0]["text"]))
print("---------------")
print(temp[0])
print("----")
print(choice["message"]["content"])
print("----")
print(temp_summary2)
print("-------------------------")
#print(response["choices"][0]["text"])
temp_summary2=""
temp_summary=""
#time.sleep(5)
#print(response["choices"][0]["text"])
#llllll.write(str(response["choices"][0]["text"]))
#print("--------------")
#print(temp[0])
#print("----")
#print(response["choices"][0]["text"])
#print("----")
#print(temp_summary2)
#print("-------------------------")
#temp_summary2=""
#temp_summary=""
#llllll.close()
| [] |
2024-01-10 | my625/PromptQG | test~AQG_davinci.py | import os
import time
import spacy
nlp = spacy.load("en_core_web_sm")
import openai
openai.api_key="put api key here"
#openai.api_key = os.getenv("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")
#uu=open("UK-Abs/test-data/stats-UK-test.txt")
ll=os.listdir("context/")
#ll=os.listdir("IN-Abs/test-data/judgement/")
no_of_words=0
no_of_words2=0
temp_summary2=""
for i in range(0,700):
#temp=ll[i].split("\t")
#print(temp[0],temp[1],temp[2])
#if i<71:
# continue
#print(i)
#continue
lll=open("context/"+ll[i],"r")
llll=str(lll.read())
#temp2=llll.split(".")
#temp_summary=""
#parsed=""
llllll=open("output/"+temp[i],"w+")
#llllll.write()
#time.sleep(60)
last=""
ttt=""
counter=0
#for j in range(0,len(temp2)):
#temp2[j]=temp2[j].strip("\n")
#parsed = nlp(temp2[j])
#no_of_words2=no_of_words
#no_of_words=no_of_words+len(parsed)
#print(len(parsed))
#temp_summary2=temp_summary
#counter=counter+1
#last=temp2[j]
#temp_summary=temp_summary+temp2[j]
#if temp_summary=="":
# temp_summary=temp_summary+last+"."+temp2[j]+"."
#else:
# temp_summary=temp_summary+temp2[j]+"."
#ratio=int(1024*0.333333333333)
#last=temp2[j]
#ratio=int(1024*((int(temp[2])*1.0)/int(temp[1])))
#if ((no_of_words>1024) and (no_of_words2<=1024)):
temp_summary2="Given the context"+"{"+llll+"}"+" , generate a Question."
#try:
response = openai.Completion.create(
model="text-davinci-003",
prompt="Given the context"+"{"+llll+"}"+" , generate a Question.",
temperature=0.7,
max_tokens=50,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=1
)
#except:
# cc=0
#temp_summary=temp_summary+parsed
# response = openai.Completion.create(
# model="text-davinci-003",
# prompt="{"+temp_summary2+"}"+"Tl;Dr",
# temperature=0.7,
# max_tokens=ratio,
# top_p=1.0,
# frequency_penalty=0.0,
# presence_penalty=1
# )
#for choice in response.choices:
# print(choice)
#if "text" in choice:
# print(choice.text)
#llllll.write(choice["message"]["content"]+"\n")
#print(choice["message"]["content"])
llllll.write(str(response["choices"][0]["text"]))
#print("---------------")
#print(temp[0])
#print("----")
#print(choice["message"]["content"])
#print("----")
#print(temp_summary2)
#print("-------------------------")
#print(response["choices"][0]["text"])
#temp_summary2=""
#temp_summary=""
#time.sleep(5)
#print(response["choices"][0]["text"])
#llllll.write(str(response["choices"][0]["text"]))
#print("--------------")
#print(temp[0])
#print("----")
#print(response["choices"][0]["text"])
#print("----")
#print(temp_summary2)
#print("-------------------------")
#temp_summary2=""
#temp_summary=""
#llllll.close()
| [
"Given the context{PLACEHOLDER} , generate a Question."
] |
2024-01-10 | AliLogicM/VoiceRecognition | virtualAssistant.py | import openai
import speech_recognition as sr
import pyttsx3
import time
#Inicializa api OpenAI
openai.api_key = "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"
#SpeechToText
engine=pyttsx3.init()
def transcribe_audio_to_test(filename):
recogizer=sr.Recognizer()
with sr.AudioFile(filename)as source:
audio=recogizer.record(source)
try:
return recogizer.recognize_google(audio, language="es")
except:
print("No se que ha pasao")
def generate_response(prompt):
response= openai.Completion.create(
engine="text-curie-001",
prompt=prompt,
max_tokens=2049,
n=1,
stop=None,
temperature=0.5,
)
return response ["choices"][0]["text"]
# Set Spanish voice for text-to-speech engine
voices = engine.getProperty('voices')
spanish_voice = None
for voice in voices:
if "spanish" in voice.languages:
spanish_voice = voice.id
if spanish_voice is not None:
engine.setProperty('voice', spanish_voice)
def speak_text(text):
engine.say(text)
engine.runAndWait()
def main():
while True:
#Waith for user say "genius"
print("Di 'Hola' para empezar a grabar")
with sr.Microphone() as source:
recognizer=sr.Recognizer()
audio=recognizer.listen(source)
try:
transcription = recognizer.recognize_google(audio, language="es")
if transcription.lower()=="hola":
#record audio
filename ="input.wav"
print("Dime que quieres mozo")
with sr.Microphone() as source:
recognizer=sr.Recognizer()
source.pause_threshold=1
audio=recognizer.listen(source,phrase_time_limit=None,timeout=None)
with open(filename,"wb")as f:
f.write(audio.get_wav_data())
#transcript audio to test
text=transcribe_audio_to_test(filename)
if text:
print(f"Yo {text}")
#Generate the response
response = generate_response(text)
print(f"El bot ese dice: {response}")
#read resopnse using GPT3
speak_text(response)
except Exception as e:
print("Ahhhhhh erroor : {}".format(e))
if __name__=="__main__":
main() | [] |
2024-01-10 | LucaZoss/NLP_LLM-PDFParser | Looping_Legends~EXTRACTION~extract_preprod_v1.py | from openai import OpenAI
import os
client = OpenAI(api_key=os.getenv('OPEN_AI_KEY'))
print("environement is build")
# Getting text files from pdf previously created with OCR
# for pre-production purposes, upload from local file without going through OCR process
with open("/Users/lucazosso/Desktop/IE_Course/Hackathon/Looping_Legends/extracted/XS2021832634_extracted.txt", "r") as file:
term_sheet_test = file.read()
# Fields to target
financial_product_terms = {
"ISIN": "International Securities Identification Number",
"Issuer": "The entity or organization that issues the financial product",
"Ccy": "The currency in which the financial product is denominated or traded",
"Underlying(s)": "The assets, indices, or securities on which the financial product's performance is based, for example: GLE, RNO FP,VOW3 GY, DAI GY. it is sometimes called bloomberg code/identifier",
"Strike": "The strike price of the underlying and not the barrier. It must be a number and must not contain any letters. It is a financial number so it will have decimals. You can also calculate it by taking the knock-in barrier price divided by barrier level, and it cannot be 100% of Initial Level ",
"Launch Date": "The date when the financial product is officially issued and made available for investment or trading",
"Final Valuation Day": "The date on which the final valuation of the financial product is made",
"Maturity": "The date on which the financial product is set to expire or mature",
"Cap": "An upper limit on potential returns for the financial product",
"Barrier": "A specific level or threshold that, if reached, can trigger certain events or determine the product's performance"
}
# There is a unique value for each Underlying so if there are multiple underlyings there will be multiple strike prices.
elements_to_extract_value_from = financial_product_terms.keys()
# Prompting
prompt = f'''
I want you to act as a data extraction specialist in the finance industry with special knowledge in financial term sheet.
Your task is to go through this text file {term_sheet_test} and extract the value of the corresponding key elements listed here {elements_to_extract_value_from}.
I have also included in this file {financial_product_terms} the meaning of each element to help you in your extraction.
Please keep in mind that if multiple underlyings are present then each underlying has its unique strick price.
As a result please provide a dictionary as an output format (key: value(s)).
'''
# Non-streaming:
print("----- standard request -----")
completion = client.chat.completions.create(
model="gpt-3.5-turbo-16k",
messages=[
{
"role": "user",
"content": f"{prompt}",
},
],
)
print(completion.choices[0].message.content)
| [
"\nI want you to act as a data extraction specialist in the finance industry with special knowledge in financial term sheet.\nYour task is to go through this text file PLACEHOLDER and extract the value of the corresponding key elements listed here PLACEHOLDER.\nI have also included in this file PLACEHOLDER the meaning of each element to help you in your extraction.\nPlease keep in mind that if multiple underlyings are present then each underlying has its unique strick price.\nAs a result please provide a dictionary as an output format (key: value(s)).\n\n"
] |
2024-01-10 | LucaZoss/NLP_LLM-PDFParser | production~code~langchain_extraction.py | import os
import json
import time
from openai import OpenAI
from typing import Dict
import pandas as pd
import dotenv
from pathlib import Path
from langchain.chains import create_extraction_chain
from langchain.chat_models import ChatOpenAI
env_path = Path(
'/content/ATT85165.env')
dotenv.load_dotenv(dotenv_path=env_path)
# Load your OpenAI API key
client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
print("welcome to the matrix")
# Schema
schema = {
"properties": {
"ISIN": {"type": "string"},
"Issuer": {"type": "string"},
"Currency": {"type": "string"},
"Underlying(s)": {
"type": "array",
"items": {"type": "string"}
},
"Strike": {
"type": "array",
"items": {"type": "number"}
},
"Launch Date": {"type": "string", "format": "date"},
"Final Valuation Day": {"type": "string", "format": "date"},
"Maturity": {"type": "string", "format": "date"},
"Cap": {"type": "number"},
"Barrier": {"type": "number"},
},
"required": ["ISIN", "Issuer", "Currency", "Underlying(s)", "Strike", "Launch Date", "Final Valuation Day", "Maturity", "Cap", "Barrier"],
}
# COLUMNS for Pandas DF
COLUMNS = ['PDF ID', 'ISIN', 'Issuer', 'Currency',
'Underlying(s)', 'Strike', 'Launch Date', 'Final Valuation Day', 'Maturity', 'Cap', 'Barrier']
text_example = '''
Final Terms and Conditions (our ref. CE4247RAI) as of February 16th, 2022
15M Capped Bonus Certificate Plus Worst-of on DAX®, FTSE100
and IBEX 35® in USD Quanto
Issuer
BNP Paribas Issuance B.V. (S&P's A+)
Guarantor
BNP Paribas (S&P's A+ / Moody's Aa3 / Fitch AA-)
Issue Type
Certificate
Issue Amount
USD 1,600,000
Number of Certificates
1,600
Notional Amount per
Certificate (N)
1 Certificate = USD 1,000
Currency
USD Quanto
Issue Price per
Certificate
100.00%
Listing
None
Trade Date
February 15th, 2022
Strike Date
February 15th, 2022
Issue Date
March 01st, 2022
Redemption Valuation
May 15th, 2023
Date
Redemption Date
May 30th, 2023
Underlying Indices
i
Name of Underlying
Bloomberg
Indexi
Administrator
Register
Indexi
Initial
Code
1
DAX®
DAX
15412.71
STOXX Ltd.
Included
2
FTSE100
UKX
7608.92
FTSE
Included
International
Limited
3
IBEX 35®
IBEX
8718.00
SOCIEDAD
Included
DE BOLSAS
S.A.
-
Final Redemption
On the Redemption Date, the Issuer shall redeem each Certificate at the following Cash
Settlement Amount:
1) If WO IndexFinal is greater than or equal to 120% x WO IndexInitial:
N x 120%
2) If a Knock-out Event has not occurred and WO IndexFinal is less than 120% x WO
IndexInitial:
N x max
108.20%.
WO IndexFinal
WO IndexInitial
Equity Derivatives Solutions / Structured Products - Funds Of Funds /
1
Family Offices
[email protected]
BNP PARIBAS
CORPORATE & INSTITUTIONAL BANKING
The bank for a changing world
3) If a Knock-out Event has occurred:
N x
WO IndexFinal
WO Index Initial
Where
WO Index is the Underlying Index with the worst performance from the Strike Date to the
Redemption Valuation Date, defined as:
" IndexInitial.
3
Index 'Final
WO IndexInitial is the official closing level of WO Index on the Strike Date.
WO IndexFinal is the official closing level of WO Index on the Redemption Valuation Date.
Indexi
Initial with i from 1 to 3 is the official closing level of the Indexi
on the Strike Date.
Indexi
Final with i from 1 to 3 is the official closing level of the Indexi
on the Redemption
Valuation Date.
Knock-out Level
DAX® - 10,788.8970 (70% of Index1
FTSE100 - 5,326.2440 (70% of Index2
Initial)
IBEX 35® - 6,102.60 (70% of Index3
Initial)
Initial)
Knock-out
Determination Day
The Redemption Valuation Date.
Knock-out Valuation
Time
Specific Scheduled Closing Time of each Underlying Index on the Redemption Valuation Date.
Knock-out Event
A Knock-out Event shall be deemed to occur if, at the Knock-out Valuation Time on the Knock-
out Determination Day, at least one Underlying Index closes at a level strictly less than its Knock-
out Level.
'''
def extract_fields_langchain(pdf_texts: Dict[str, str]) -> pd.DataFrame:
fields_df = pd.DataFrame(columns=COLUMNS)
for pdf_id, text in pdf_texts.items():
prompt = f'''Act as an expert in financial analysis, specializing in interpreting and extracting key data from financial term sheets.\
Your task is to extract the following fields and their associated value(s):
Fields to Extrcact: ISIN, Issuer, Currency, Underlying(s), Strike, Launch Date, Final Valuation Day, Maturity, Cap, Barrier
In cases of missing informations for Cap and Barrier fields mark them as: NaN.\
Use the following constraints delimited by triple backticks to extract the needed informations:
```
- ISIN: Always 12 alphanumeric characters. If unclear, use any 12 alphanumeric characters in the document.
- Issuer: Must be a bank name.
- Currency: Must be a valid currency.
- Underlying(s): Extract Bloomberg codes/tickers; multiple entries separated by commas.
- Strike: Contains between two to six-digits and at least one decimals; find values close to 'Underlying(s)'.
- Launch Date/Trade Date/Strike Date: In date format, excluding the issue date.Ensure to use the precise value as found in the input text.
- Final Valuation Day/Redemption Valuation Date: In date format.
- Maturity/Redemption date: In date format.
- Cap: A number over 100; percentage close to an index.
- Barrier/Bonus Barrier/Knock-In Barrier/Knock-Out Barrier: Percentage less than 100.
```\
For clarity and accuracy, here is an example of the extracted fields and their associated values that you should produce from the the following {text_example}:
"ISIN": "XS2033997748",
"Issuer": "BNP",
"Currency": "USD",
"Underlying(s)": ["DAX", "UKX", "IBEX"],
"Strike": [15412.71, 7608.92, 8718.00],
"Launch Date": "15.02.2022",
"Final Valuation Day": "15.05.2023",
"Maturity: "30.05.2023"
"Cap": 120,
"Barrier": 70
\
Apply the above process, using the provided definitions to extract the key information, Ensure 'Underlying(s)' and 'Strike' are close. For Barrier, specify the percentage value.\
Text to extract is delimited by triple backtick:
```{text}```
'''
# "gpt-3.5-turbo-16k"
try:
print(f"prompting {pdf_id}")
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-16k")
# Extract the response text
chain = create_extraction_chain(schema, llm)
response_dict = chain.run(prompt)
response_dict['PDF ID'] = pdf_id
# Convert the dictionary to a DataFrame
response_df = pd.DataFrame([response_dict])
# Concatenate with the existing DataFrame
fields_df = pd.concat([fields_df, response_df], ignore_index=True)
# fields_df = fields_df.concat(response_dict, ignore_index=True)
time.sleep(1) # Adjust the delay as required
except Exception as e:
print(f"Error processing {pdf_id}: {e}")
return fields_df
| [
"Act as an expert in financial analysis, specializing in interpreting and extracting key data from financial term sheets. Your task is to extract the following fields and their associated value(s):\n Fields to Extrcact: ISIN, Issuer, Currency, Underlying(s), Strike, Launch Date, Final Valuation Day, Maturity, Cap, Barrier\n In cases of missing informations for Cap and Barrier fields mark them as: NaN.\n Use the following constraints delimited by triple backticks to extract the needed informations:\n ```\n - ISIN: Always 12 alphanumeric characters. If unclear, use any 12 alphanumeric characters in the document.\n - Issuer: Must be a bank name.\n - Currency: Must be a valid currency.\n - Underlying(s): Extract Bloomberg codes/tickers; multiple entries separated by commas.\n - Strike: Contains between two to six-digits and at least one decimals; find values close to 'Underlying(s)'.\n - Launch Date/Trade Date/Strike Date: In date format, excluding the issue date.Ensure to use the precise value as found in the input text.\n - Final Valuation Day/Redemption Valuation Date: In date format.\n - Maturity/Redemption date: In date format.\n - Cap: A number over 100; percentage close to an index.\n - Barrier/Bonus Barrier/Knock-In Barrier/Knock-Out Barrier: Percentage less than 100.\n ``` For clarity and accuracy, here is an example of the extracted fields and their associated values that you should produce from the the following \nFinal Terms and Conditions (our ref. CE4247RAI) as of February 16th, 2022\n15M Capped Bonus Certificate Plus Worst-of on DAX®, FTSE100\nand IBEX 35® in USD Quanto\nIssuer\nBNP Paribas Issuance B.V. (S&P's A+)\nGuarantor\nBNP Paribas (S&P's A+ / Moody's Aa3 / Fitch AA-)\nIssue Type\nCertificate\nIssue Amount\nUSD 1,600,000\nNumber of Certificates\n1,600\nNotional Amount per\nCertificate (N)\n1 Certificate = USD 1,000\nCurrency\nUSD Quanto\nIssue Price per\nCertificate\n100.00%\nListing\nNone\nTrade Date\nFebruary 15th, 2022\nStrike Date\nFebruary 15th, 2022\nIssue Date\nMarch 01st, 2022\nRedemption Valuation\nMay 15th, 2023\nDate\nRedemption Date\nMay 30th, 2023\nUnderlying Indices\ni\nName of Underlying\nBloomberg\nIndexi\nAdministrator\nRegister\nIndexi\nInitial\nCode\n1\nDAX®\nDAX\n15412.71\nSTOXX Ltd.\nIncluded\n2\nFTSE100\nUKX\n7608.92\nFTSE\nIncluded\nInternational\nLimited\n3\nIBEX 35®\nIBEX\n8718.00\nSOCIEDAD\nIncluded\nDE BOLSAS\nS.A.\n-\nFinal Redemption\nOn the Redemption Date, the Issuer shall redeem each Certificate at the following Cash\nSettlement Amount:\n1) If WO IndexFinal is greater than or equal to 120% x WO IndexInitial:\nN x 120%\n2) If a Knock-out Event has not occurred and WO IndexFinal is less than 120% x WO\nIndexInitial:\nN x max\n108.20%.\nWO IndexFinal\nWO IndexInitial\nEquity Derivatives Solutions / Structured Products - Funds Of Funds /\n1\nFamily Offices\[email protected]\n\nBNP PARIBAS\nCORPORATE & INSTITUTIONAL BANKING\nThe bank for a changing world\n3) If a Knock-out Event has occurred:\nN x\nWO IndexFinal\nWO Index Initial\nWhere\nWO Index is the Underlying Index with the worst performance from the Strike Date to the\nRedemption Valuation Date, defined as:\n\" IndexInitial.\n3\nIndex 'Final\nWO IndexInitial is the official closing level of WO Index on the Strike Date.\nWO IndexFinal is the official closing level of WO Index on the Redemption Valuation Date.\nIndexi\nInitial with i from 1 to 3 is the official closing level of the Indexi\non the Strike Date.\nIndexi\nFinal with i from 1 to 3 is the official closing level of the Indexi\non the Redemption\nValuation Date.\nKnock-out Level\nDAX® - 10,788.8970 (70% of Index1\nFTSE100 - 5,326.2440 (70% of Index2\nInitial)\nIBEX 35® - 6,102.60 (70% of Index3\nInitial)\nInitial)\nKnock-out\nDetermination Day\nThe Redemption Valuation Date.\nKnock-out Valuation\nTime\nSpecific Scheduled Closing Time of each Underlying Index on the Redemption Valuation Date.\nKnock-out Event\nA Knock-out Event shall be deemed to occur if, at the Knock-out Valuation Time on the Knock-\nout Determination Day, at least one Underlying Index closes at a level strictly less than its Knock-\nout Level.\n:\n \"ISIN\": \"XS2033997748\",\n \"Issuer\": \"BNP\",\n \"Currency\": \"USD\",\n \"Underlying(s)\": [\"DAX\", \"UKX\", \"IBEX\"],\n \"Strike\": [15412.71, 7608.92, 8718.00],\n \"Launch Date\": \"15.02.2022\",\n \"Final Valuation Day\": \"15.05.2023\",\n \"Maturity: \"30.05.2023\"\n \"Cap\": 120,\n \"Barrier\": 70\n \n Apply the above process, using the provided definitions to extract the key information, Ensure 'Underlying(s)' and 'Strike' are close. For Barrier, specify the percentage value. Text to extract is delimited by triple backtick:\n ```PLACEHOLDER```\n "
] |
2024-01-10 | LucaZoss/NLP_LLM-PDFParser | production~code~extraction.py | import os
import json
import time
from openai import OpenAI
from typing import Dict
import pandas as pd
from dotenv import load_dotenv
from pathlib import Path
env_path = Path(
'/Users/lucazosso/Desktop/IE_Course/Hackathon/production/ATT85165.env')
load_dotenv(dotenv_path=env_path)
# Load your OpenAI API key
client = OpenAI(api_key=os.getenv('OPENAI_API_KEY'))
print("welcome to the matrix")
# COLUMNS for Pandas DF
COLUMNS = ['PDF ID', 'ISIN', 'Issuer', 'Currency',
'Underlying(s)', 'Strike', 'Launch Date', 'Final Valuation Day', 'Maturity', 'Cap', 'Barrier']
text_example = '''Final Terms and Conditions (our ref. CE4247RAI) as of February 16th, 2022
15M Capped Bonus Certificate Plus Worst-of on DAX®, FTSE100
and IBEX 35® in USD Quanto
Issuer
BNP Paribas Issuance B.V. (S&P's A+)
Guarantor
BNP Paribas (S&P's A+ / Moody's Aa3 / Fitch AA-)
Issue Type
Certificate
Issue Amount
USD 1,600,000
Number of Certificates
1,600
Notional Amount per
Certificate (N)
1 Certificate = USD 1,000
Currency
USD Quanto
Issue Price per
Certificate
100.00%
Listing
None
Trade Date
February 15th, 2022
Strike Date
February 15th, 2022
Issue Date
March 01st, 2022
Redemption Valuation
May 15th, 2023
Date
Redemption Date
May 30th, 2023
Underlying Indices
i
Name of Underlying
Bloomberg
Indexi
Administrator
Register
Indexi
Initial
Code
1
DAX®
DAX
15412.71
STOXX Ltd.
Included
2
FTSE100
UKX
7608.92
FTSE
Included
International
Limited
3
IBEX 35®
IBEX
8718.00
SOCIEDAD
Included
DE BOLSAS
S.A.
-
Final Redemption
On the Redemption Date, the Issuer shall redeem each Certificate at the following Cash
Settlement Amount:
1) If WO IndexFinal is greater than or equal to 120% x WO IndexInitial:
N x 120%
2) If a Knock-out Event has not occurred and WO IndexFinal is less than 120% x WO
IndexInitial:
N x max
108.20%.
WO IndexFinal
WO IndexInitial
Equity Derivatives Solutions / Structured Products - Funds Of Funds /
1
Family Offices
[email protected]
BNP PARIBAS
CORPORATE & INSTITUTIONAL BANKING
The bank for a changing world
3) If a Knock-out Event has occurred:
N x
WO IndexFinal
WO Index Initial
Where
WO Index is the Underlying Index with the worst performance from the Strike Date to the
Redemption Valuation Date, defined as:
" IndexInitial.
3
Index 'Final
WO IndexInitial is the official closing level of WO Index on the Strike Date.
WO IndexFinal is the official closing level of WO Index on the Redemption Valuation Date.
Indexi
Initial with i from 1 to 3 is the official closing level of the Indexi
on the Strike Date.
Indexi
Final with i from 1 to 3 is the official closing level of the Indexi
on the Redemption
Valuation Date.
Knock-out Level
DAX® - 10,788.8970 (70% of Index1
FTSE100 - 5,326.2440 (70% of Index2
Initial)
IBEX 35® - 6,102.60 (70% of Index3
Initial)
Initial)
Knock-out
Determination Day
The Redemption Valuation Date.
Knock-out Valuation
Time
Specific Scheduled Closing Time of each Underlying Index on the Redemption Valuation Date.
Knock-out Event
A Knock-out Event shall be deemed to occur if, at the Knock-out Valuation Time on the Knock-
out Determination Day, at least one Underlying Index closes at a level strictly less than its Knock-
out Level.
'''
# debut function
def extract_fields(pdf_texts: Dict[str, str]) -> pd.DataFrame:
fields_df = pd.DataFrame(columns=COLUMNS)
for pdf_id, text in pdf_texts.items():
prompt = f'''Act as an expert in financial analysis, specializing in interpreting and extracting key data from financial term sheets.\
Your task is to extract the following fields and their associated value(s), and return them in a proper JSON format witht the following keys:
ISIN, Issuer, Currency, Underlying(s), Strike, Launch Date, Final Valuation Day, Maturity, Cap, Barrier
In cases of missing informations for Cap and Barrier fields mark them as: NaN.\
Use the following constraints delimited by triple backticks to extract the needed informations:
```
- ISIN: Always 12 alphanumeric characters. If unclear, use any 12 alphanumeric characters in the document.
- Issuer: Must be a bank name.
- Currency: Must be a valid currency.
- Underlying(s): Extract Bloomberg codes/tickers; multiple entries separated by commas.
- Strike: Contains between two to six-digits and at least one decimals; find values close to 'Underlying(s)'.
- Launch Date/Trade Date/Strike Date: In date format, excluding the issue date.Ensure to use the precise value as found in the input text.
- Final Valuation Day/Redemption Valuation Date: In date format.
- Maturity/Redemption date: In date format.
- Cap: A number over 100; percentage close to an index.
- Barrier/Bonus Barrier/Knock-In Barrier/Knock-Out Barrier: Percentage less than 100.
```\
For clarity and accuracy, here is an example of the extracted fields and their associated values that you should produce from the the following {text_example},(Remember the output should be in JSON Format.):
"ISIN": "XS2033997748",
"Issuer": "BNP",
"Currency": "USD",
"Underlying(s)": ["DAX", "UKX", "IBEX"],
"Strike": [15412.71, 7608.92, 8718.00],
"Launch Date": "15.02.2022",
"Final Valuation Day": "15.05.2023",
"Maturity: "30.05.2023"
"Cap": 120,
"Barrier": 70
\
Apply the above process, using the provided definitions to extract the key information, Ensure 'Underlying(s)' and 'Strike' are close. For Barrier, specify the percentage value.\
Text to extract is delimited by triple backtick:
```{text}```
'''
# "gpt-3.5-turbo-16k"
try:
print(f"prompting {pdf_id}")
completion = client.chat.completions.create(
model="gpt-3.5-turbo-16k",
messages=[{"role": "user", "content": prompt}],
)
# Extract the response text
response = completion.choices[0].message.content
response_dict = json.loads(response)
response_dict['PDF ID'] = pdf_id
# Convert the dictionary to a DataFrame
response_df = pd.DataFrame([response_dict])
# Concatenate with the existing DataFrame
fields_df = pd.concat([fields_df, response_df], ignore_index=True)
# fields_df = fields_df.concat(response_dict, ignore_index=True)
time.sleep(1) # Adjust the delay as required
except Exception as e:
print(f"Error processing {pdf_id}: {e}")
return fields_df
# add loggings
# Be cautious with data types: use strings for text and dates, and use numbers (floats or integers) for numerical values.\
| [
"Act as an expert in financial analysis, specializing in interpreting and extracting key data from financial term sheets. Your task is to extract the following fields and their associated value(s), and return them in a proper JSON format witht the following keys:\n ISIN, Issuer, Currency, Underlying(s), Strike, Launch Date, Final Valuation Day, Maturity, Cap, Barrier\n In cases of missing informations for Cap and Barrier fields mark them as: NaN.\n Use the following constraints delimited by triple backticks to extract the needed informations:\n ```\n - ISIN: Always 12 alphanumeric characters. If unclear, use any 12 alphanumeric characters in the document.\n - Issuer: Must be a bank name.\n - Currency: Must be a valid currency.\n - Underlying(s): Extract Bloomberg codes/tickers; multiple entries separated by commas.\n - Strike: Contains between two to six-digits and at least one decimals; find values close to 'Underlying(s)'.\n - Launch Date/Trade Date/Strike Date: In date format, excluding the issue date.Ensure to use the precise value as found in the input text.\n - Final Valuation Day/Redemption Valuation Date: In date format.\n - Maturity/Redemption date: In date format.\n - Cap: A number over 100; percentage close to an index.\n - Barrier/Bonus Barrier/Knock-In Barrier/Knock-Out Barrier: Percentage less than 100.\n ``` For clarity and accuracy, here is an example of the extracted fields and their associated values that you should produce from the the following Final Terms and Conditions (our ref. CE4247RAI) as of February 16th, 2022\n15M Capped Bonus Certificate Plus Worst-of on DAX®, FTSE100\nand IBEX 35® in USD Quanto\nIssuer\nBNP Paribas Issuance B.V. (S&P's A+)\nGuarantor\nBNP Paribas (S&P's A+ / Moody's Aa3 / Fitch AA-)\nIssue Type\nCertificate\nIssue Amount\nUSD 1,600,000\nNumber of Certificates\n1,600\nNotional Amount per\nCertificate (N)\n1 Certificate = USD 1,000\nCurrency\nUSD Quanto\nIssue Price per\nCertificate\n100.00%\nListing\nNone\nTrade Date\nFebruary 15th, 2022\nStrike Date\nFebruary 15th, 2022\nIssue Date\nMarch 01st, 2022\nRedemption Valuation\nMay 15th, 2023\nDate\nRedemption Date\nMay 30th, 2023\nUnderlying Indices\ni\nName of Underlying\nBloomberg\nIndexi\nAdministrator\nRegister\nIndexi\nInitial\nCode\n1\nDAX®\nDAX\n15412.71\nSTOXX Ltd.\nIncluded\n2\nFTSE100\nUKX\n7608.92\nFTSE\nIncluded\nInternational\nLimited\n3\nIBEX 35®\nIBEX\n8718.00\nSOCIEDAD\nIncluded\nDE BOLSAS\nS.A.\n-\nFinal Redemption\nOn the Redemption Date, the Issuer shall redeem each Certificate at the following Cash\nSettlement Amount:\n1) If WO IndexFinal is greater than or equal to 120% x WO IndexInitial:\nN x 120%\n2) If a Knock-out Event has not occurred and WO IndexFinal is less than 120% x WO\nIndexInitial:\nN x max\n108.20%.\nWO IndexFinal\nWO IndexInitial\nEquity Derivatives Solutions / Structured Products - Funds Of Funds /\n1\nFamily Offices\[email protected]\n\nBNP PARIBAS\nCORPORATE & INSTITUTIONAL BANKING\nThe bank for a changing world\n3) If a Knock-out Event has occurred:\nN x\nWO IndexFinal\nWO Index Initial\nWhere\nWO Index is the Underlying Index with the worst performance from the Strike Date to the\nRedemption Valuation Date, defined as:\n\" IndexInitial.\n3\nIndex 'Final\nWO IndexInitial is the official closing level of WO Index on the Strike Date.\nWO IndexFinal is the official closing level of WO Index on the Redemption Valuation Date.\nIndexi\nInitial with i from 1 to 3 is the official closing level of the Indexi\non the Strike Date.\nIndexi\nFinal with i from 1 to 3 is the official closing level of the Indexi\non the Redemption\nValuation Date.\nKnock-out Level\nDAX® - 10,788.8970 (70% of Index1\nFTSE100 - 5,326.2440 (70% of Index2\nInitial)\nIBEX 35® - 6,102.60 (70% of Index3\nInitial)\nInitial)\nKnock-out\nDetermination Day\nThe Redemption Valuation Date.\nKnock-out Valuation\nTime\nSpecific Scheduled Closing Time of each Underlying Index on the Redemption Valuation Date.\nKnock-out Event\nA Knock-out Event shall be deemed to occur if, at the Knock-out Valuation Time on the Knock-\nout Determination Day, at least one Underlying Index closes at a level strictly less than its Knock-\nout Level.\n,(Remember the output should be in JSON Format.):\n\n \"ISIN\": \"XS2033997748\",\n \"Issuer\": \"BNP\",\n \"Currency\": \"USD\",\n \"Underlying(s)\": [\"DAX\", \"UKX\", \"IBEX\"],\n \"Strike\": [15412.71, 7608.92, 8718.00],\n \"Launch Date\": \"15.02.2022\",\n \"Final Valuation Day\": \"15.05.2023\",\n \"Maturity: \"30.05.2023\"\n \"Cap\": 120,\n \"Barrier\": 70\n \n Apply the above process, using the provided definitions to extract the key information, Ensure 'Underlying(s)' and 'Strike' are close. For Barrier, specify the percentage value. Text to extract is delimited by triple backtick:\n ```PLACEHOLDER```\n "
] |
2024-01-10 | LucaZoss/NLP_LLM-PDFParser | production~extras_toppings.py | # Sausage_999
from pathlib import Path
from dotenv import load_dotenv
import pandas as pd
from typing import Dict
from openai import OpenAI
import time
import json
import os
import spacy
from transformers import pipeline
import re
# Define your financial terms
terms = ['ISIN', 'Issuer', 'Currency', 'Underlying\(s\)', 'Strike', 'Launch Date',
'Final Valuation Day', 'Maturity', 'Cap', 'Barrier', 'Bloomberg Code', 'ETI']
# Create regex patterns (case-insensitive)
patterns = {term: re.compile(term, re.IGNORECASE) for term in terms}
def extract_lines(text):
lines = text.split('\n')
matching_indices = set()
for i, line in enumerate(lines):
for pattern in patterns.values():
if pattern.search(line):
matching_indices.update(
{max(0, i-5), i, min(i+5, len(lines)-1)})
break
# Extract lines based on matching indices
extracted_lines = [lines[i] for i in sorted(matching_indices)]
return extracted_lines
# Example text (replace this with your actual text)
text = text
# Extract lines
sausage_999 = []
matching_lines = extract_lines(text)
for line in matching_lines:
sausage_999.append(line)
sausage_999_string = str(" ".join(sausage_999))
print(type(sausage_999_string))
print(sausage_999_string)
# summarizers_transformers
# Assuming 'sausage_999_string' is your input string
input_text = sausage_999_string
# Split the text into chunks of approximately 1024 tokens
# This is a simplistic split and might need adjustment based on actual content
max_length = 1024
chunks = [input_text[i:i+max_length]
for i in range(0, len(input_text), max_length)]
# Initialize the summarization pipeline
pipe = pipeline("summarization",
model="nickmuchi/fb-bart-large-finetuned-trade-the-event-finance-summarizer")
# Summarize each chunk
summaries = [pipe(chunk)[0]['summary_text'] for chunk in chunks]
# Combine summaries (optional)
final_summary = ' '.join(summaries)
print(final_summary)
# Spacy Summarizer
# Load the Spacy model
nlp = spacy.load('en_core_web_sm')
# Process the text
doc = nlp(testing_sausage_1)
sausage_999 = []
# Extract entities
for ent in doc.ents:
var = ent.text
var_0 = ent.label_
sausage_999.append(ent.text)
# print(ent.text, ent.label_)
type(sausage_999)
print(sausage_999)
# Zero-Class-Classifier
classifier = pipeline("zero-shot-classification",
model="MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli", use_fast=False)
candidate_labels = ['ISIN', 'Issuer', 'Ccy',
'Underlying(s)', 'Strike', 'Launch Date', 'Final Valuation Day', 'Maturity', 'Cap', 'Barrier']
# PLACE HOLDER
| [] |
2024-01-10 | javidlt/EmbeddingsPlayground | pages~01Generator.py | import streamlit as st
from streamlit import session_state as ss
import pandas as pd
from sentence_transformers import SentenceTransformer, util
import torch
import operator
import cohere
import numpy as np
st.set_page_config(
page_title="Generador",
page_icon="👋",
)
st.write("# Generar embeddings")
def generate(mod, df, colText):
embedder = SentenceTransformer(mod)
tot = len(df)
dfW = df
dfW["Embedding"] = None
progress_text = "Generando embeddings"
my_bar = st.progress(0, text=progress_text)
for index, row in dfW.iterrows():
pro = int(((index+1)/tot)*100)
embedding = embedder.encode(str(row[colText])).tolist()
dfW.at[index, 'Embedding'] = embedding
my_bar.progress(pro, text=progress_text)
return dfW
def generateCohere(mod, df, colText, apiKey):
co = cohere.Client(apiKey)
doc_emb = co.embed(df[colText].astype(str).tolist(), input_type="search_document", model=mod).embeddings
doc_emb = np.asarray(doc_emb)
return doc_emb
def convert_to_json(df):
jsonToRet = pd.DataFrame.from_dict(df)
return jsonToRet.to_json(index=False)
def convert_to_csv(df):
csvToRet = pd.DataFrame.from_dict(df)
return csvToRet.to_csv(index=False)
if 'listOfFilesNamesGenerate' not in st.session_state:
st.session_state.listOfFilesNamesGenerate = []
if 'listOfDictsGenerateEmbd' not in st.session_state:
st.session_state.listOfDictsGenerateEmbd = []
if 'indexOfDataset' not in st.session_state:
st.session_state.indexOfDataset = 0
if 'uploaded_file_count' not in st.session_state:
st.session_state.uploaded_file_count = 0
if 'dfWithGeneratedEmbeddings' not in st.session_state:
st.session_state.dfWithGeneratedEmbeddings = {}
if 'datasetToUseGen' not in st.session_state:
st.session_state.datasetToUseGen = ""
uploaded_fileCount = st.session_state.uploaded_file_count
datasetToUse = st.session_state.datasetToUseGen
uploaded_file = st.sidebar.file_uploader("Choose a file", type=["csv", "excel", "json"])
if uploaded_file is not None and (uploaded_file.name not in st.session_state.listOfFilesNamesGenerate):
if st.sidebar.button('usar archivo'):
uploaded_fileCount = uploaded_fileCount+1
if uploaded_file is not None and (uploaded_fileCount != st.session_state.uploaded_file_count):
# Can be used wherever a "file-like" object is accepted:
if uploaded_file.name.endswith('.csv'):
df = pd.read_csv(uploaded_file)
elif uploaded_file.name.endswith('.xlsx'):
df = pd.read_excel(uploaded_file)
elif uploaded_file.name.endswith('.json'):
df = pd.read_json(uploaded_file)
dictEmbd = df.to_dict()
st.session_state.listOfDictsGenerateEmbd.append(dictEmbd)
st.session_state.listOfFilesNamesGenerate.append(uploaded_file.name)
st.session_state.uploaded_file_count = st.session_state.uploaded_file_count+1
if st.session_state.listOfDictsGenerateEmbd != []:
st.session_state.datasetToUseGen = st.sidebar.radio("Dataset a usar", st.session_state.listOfFilesNamesGenerate)
st.session_state.indexOfDataset = st.session_state.listOfFilesNamesGenerate.index(st.session_state.datasetToUseGen)
dfEmbd = pd.DataFrame.from_dict(st.session_state.listOfDictsGenerateEmbd[st.session_state.indexOfDataset])
column_names = list(dfEmbd.columns.values)
st.session_state.columnGenWiText = st.selectbox('Nombre de columna con texto', column_names)
with st.container():
col1, col2 = st.columns(2)
with col1:
st.session_state.typeGen = st.radio("Modelo para embeddings",["**default**", "**Cualquier modelo huggingFace**"],)
with col2:
if st.session_state.typeGen == "**default**":
st.session_state.modelGen = st.selectbox(
'Modelo',
('ggrn/e5-small-v2', 'Cohere/Cohere-embed-english-v3.0', 'Cohere/Cohere-embed-multilingual-v3.0', 'intfloat/multilingual-e5-small', 'intfloat/e5-small-v2', 'sentence-transformers/all-MiniLM-L6-v2'))
else:
st.session_state.modelGen = st.text_input('Modelo')
if 'Cohere' in st.session_state.modelGen:
st.session_state.CohereAPIGenerate = st.text_input('API KEY')
dfFinal = pd.DataFrame()
if st.button('Generar embeddings', type="primary"):
if 'Cohere' in st.session_state.modelGen:
dfFinal = generateCohere(st.session_state.modelGen, dfEmbd,st.session_state.columnGenWiText, st.session_state.CohereAPIGenerate)
else:
print("si entra")
dfFinal = generate(st.session_state.modelGen, dfEmbd,st.session_state.columnGenWiText)
print(dfFinal)
st.session_state.dfWithGeneratedEmbeddings = dfFinal.to_dict()
if st.session_state.dfWithGeneratedEmbeddings != {}:
json = convert_to_json(st.session_state.dfWithGeneratedEmbeddings)
csv = convert_to_csv(st.session_state.dfWithGeneratedEmbeddings)
with st.container ():
col1, col2 = st.columns(2)
with col1:
st.download_button(
"Descargar json",
json,
f"{st.session_state.listOfFilesNamesGenerate[st.session_state.indexOfDataset]}_Embeddings.json",
"text/json",
key='download-json'
)
with col2:
st.download_button(
"Descargar csv",
csv,
f"{st.session_state.listOfFilesNamesGenerate[st.session_state.indexOfDataset]}_Embeddings.csv",
"text/csv",
key='download-csv'
)
dfToPrint = pd.DataFrame.from_dict(st.session_state.dfWithGeneratedEmbeddings)
if datasetToUse != st.session_state.datasetToUseGen:
st.markdown("**Se ha cambiado el dataset con el que estas trabajando, descarga el resultado o se borrará tu avance cuando des click a generar.**")
st.write(dfToPrint)
else:
st.markdown(
"""
### Pasos
- Subir json, csv o excel con la columna de texto con la que deseas generar los embeddings
- Escribir cuál es la columna del texto
- Seleccionar el modelo con el que se harán los embeddings
- Exportar json con tus embeddings para usarlo
"""
) | [] |
2024-01-10 | astooke/rlpyt | rlpyt~models~running_mean_std.py |
import torch
import torch.distributed as dist
from rlpyt.utils.tensor import infer_leading_dims
class RunningMeanStdModel(torch.nn.Module):
"""Adapted from OpenAI baselines. Maintains a running estimate of mean
and variance of data along each dimension, accessible in the `mean` and
`var` attributes. Supports multi-GPU training by all-reducing statistics
across GPUs."""
def __init__(self, shape):
super().__init__()
self.register_buffer("mean", torch.zeros(shape))
self.register_buffer("var", torch.ones(shape))
self.register_buffer("count", torch.zeros(()))
self.shape = shape
def update(self, x):
_, T, B, _ = infer_leading_dims(x, len(self.shape))
x = x.view(T * B, *self.shape)
batch_mean = x.mean(dim=0)
batch_var = x.var(dim=0, unbiased=False)
batch_count = T * B
if dist.is_initialized(): # Assume need all-reduce.
mean_var = torch.stack([batch_mean, batch_var])
dist.all_reduce(mean_var)
world_size = dist.get_world_size()
mean_var /= world_size
batch_count *= world_size
batch_mean, batch_var = mean_var[0], mean_var[1]
if self.count == 0:
self.mean[:] = batch_mean
self.var[:] = batch_var
else:
delta = batch_mean - self.mean
total = self.count + batch_count
self.mean[:] = self.mean + delta * batch_count / total
m_a = self.var * self.count
m_b = batch_var * batch_count
M2 = m_a + m_b + delta ** 2 * self.count * batch_count / total
self.var[:] = M2 / total
self.count += batch_count
| [] |
2024-01-10 | linbeta/ChatGPT_Utilities | abstractor.py | # -*- coding: utf-8 -*-
import openai
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
PROMPT = """
你是一個精通於繁體中文的研究報告撰寫助手,協助我撰寫水位/濁度AI模型優化與驗證的報告。
關於我的研究計畫,有以下幾個要點:
1. 這是一個防汛相關的地理資訊系統(GIS),以下簡稱本系統,主要的客戶是臺北水源特定區管理分署,可以從圖台上看到雨量站、水位站及濁度站的觀測數值與AI預報數值。
2. 本系統除了水位濁度觀測與預測以外,還介接了許多外部API,可於本系統中便利的觀測氣象、降雨資訊。本系統也開發許多與防汛有關的工具,例如傳真通報發送工具、通訊錄、查看CCTV影像、彙整報表等等。
3. 本系統需要針對水位/濁度AI模型的整體工作框架進行優化,現有的AI模型是以之前3-5年的水位、濁度觀測數據來進行LSTM模型的訓練。去年計畫以驗證有不錯的成效。
4. 本案今年將導入機器學習生命週期的概念來優化整體AI預報模式。
5. 本案今年將導入MLFlow來做為未來AI模型優化的工具。
"""
filecontent = """
在機器學習生命週期的階段中,本年度聚焦在運行中的模型之成效監控,以實務運用的成效分析,驗證此模型是否達到本案業務目標(是否達到business goal),並以此監控分析來調整運行中的模型監控項目、模型開發(包含模型調校fine tunine)甚至未來的資料收集與資料預處理流程。
1. 請為我詳細介紹何謂機器機器學習生命週期(Machine Learning Lifecycle)?
包含那些階段?各階段如何設計?是否有標準化的生命週期架構?
2. 以本案水位/濁度預報的LSTM模型而言,實務上可以如何規劃這樣的機器學習生命週期?
這部份請幫我詳細介紹各個階段的具體內容與方法,主標題須包含中文及英文,共提供約1000至1500字篇幅之論述與報告書內容。
"""
def shorten_content(filecontent: str, model_name, filename: str):
messages = [
{"role": "system", "content": PROMPT},
{"role": "user", "content": f"{filecontent}"},
]
print("processing....")
resp = openai.ChatCompletion.create(
model = model_name,
messages = messages,
temperature = 0.8
)
output = resp["choices"][0]["message"]["content"]
print(output)
usage = resp["usage"]
print(usage)
with open(filename, "w", encoding="utf-8") as f:
f.write(f"{output} \n\n [system PROMPT]\n{PROMPT} \n\n [user prompt]\n{filecontent}\n\n [Token and usage]\n{usage}")
if __name__ == "__main__":
shorten_content(filecontent, "gpt-4", "output-1.txt")
| [
"\n在機器學習生命週期的階段中,本年度聚焦在運行中的模型之成效監控,以實務運用的成效分析,驗證此模型是否達到本案業務目標(是否達到business goal),並以此監控分析來調整運行中的模型監控項目、模型開發(包含模型調校fine tunine)甚至未來的資料收集與資料預處理流程。\n\n1. 請為我詳細介紹何謂機器機器學習生命週期(Machine Learning Lifecycle)?\n包含那些階段?各階段如何設計?是否有標準化的生命週期架構?\n\n2. 以本案水位/濁度預報的LSTM模型而言,實務上可以如何規劃這樣的機器學習生命週期?\n這部份請幫我詳細介紹各個階段的具體內容與方法,主標題須包含中文及英文,共提供約1000至1500字篇幅之論述與報告書內容。\n",
"\n你是一個精通於繁體中文的研究報告撰寫助手,協助我撰寫水位/濁度AI模型優化與驗證的報告。\n關於我的研究計畫,有以下幾個要點:\n1. 這是一個防汛相關的地理資訊系統(GIS),以下簡稱本系統,主要的客戶是臺北水源特定區管理分署,可以從圖台上看到雨量站、水位站及濁度站的觀測數值與AI預報數值。\n2. 本系統除了水位濁度觀測與預測以外,還介接了許多外部API,可於本系統中便利的觀測氣象、降雨資訊。本系統也開發許多與防汛有關的工具,例如傳真通報發送工具、通訊錄、查看CCTV影像、彙整報表等等。\n3. 本系統需要針對水位/濁度AI模型的整體工作框架進行優化,現有的AI模型是以之前3-5年的水位、濁度觀測數據來進行LSTM模型的訓練。去年計畫以驗證有不錯的成效。\n4. 本案今年將導入機器學習生命週期的概念來優化整體AI預報模式。\n5. 本案今年將導入MLFlow來做為未來AI模型優化的工具。\n"
] |
2024-01-10 | linbeta/ChatGPT_Utilities | reviewer.py | import argparse
from dotenv import load_dotenv
import openai
import os
PROMPT = """
You will recieve a file's contents as text.
Generate a code review for the file. Indicate what changes should be made to improve its style, performance, readability and manintainability.
If there are any reputable libraries that could be introduced to improve the code, please suggest them.
Be kind and constructive.
For each suggested change, include line numbers to which your are referring.
"""
def code_review(file_path: str, model_name: str):
with open(file_path, 'r') as f:
content = f.read()
reviews = make_code_review_request(content, model_name)
print(reviews)
def make_code_review_request(filecontent: str, model_name):
print(model_name)
messages = [
{"role": "system", "content": PROMPT},
{"role": "user", "content": f"Code review the following file:\n{filecontent}"},
]
resp = openai.ChatCompletion.create(
model = model_name,
messages = messages,
)
return resp["choices"][0]["message"]["content"]
def main():
parser = argparse.ArgumentParser(description="Code review the file with the path provided, default using gpt-3.5-turbo")
parser.add_argument("file")
parser.add_argument("--model", default="gpt-3.5-turbo")
args = parser.parse_args()
code_review(args.file, args.model)
if __name__ == "__main__":
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
main()
| [
"\nYou will recieve a file's contents as text.\nGenerate a code review for the file. Indicate what changes should be made to improve its style, performance, readability and manintainability.\nIf there are any reputable libraries that could be introduced to improve the code, please suggest them.\nBe kind and constructive.\nFor each suggested change, include line numbers to which your are referring.\n",
"Code review the following file:\nPLACEHOLDER"
] |
2024-01-10 | wufengchun/tanuki.py | examples~web_scraper~cocktail.py | import openai
import os
from dotenv import load_dotenv
from pydantic import BaseModel
from typing import List, Optional
load_dotenv()
import tanuki
from utils import scrape_url
openai.api_key = os.getenv("OPENAI_API_KEY")
class Cocktail(BaseModel):
name: str
ingredients: List[str] = []
instructions: str
similar: List[str] = []
@tanuki.patch
def extract_cocktail(content: str) -> Optional[Cocktail]:
"""
Examine the content string and extract the cocktail details for the ingredients, instructions, and similar cocktails.
"""
@tanuki.align
def align_extract_cocktail() -> None:
print("Aligning...")
cocktail = """Black Rose | Kindred Cocktails\n\n\n\n\n\n Skip to main content\n \n\n\n\n\n\nKindred Cocktails\n\n\nToggle navigation\n\n\n\n\n\n\n\n\nMain navigation\n\n\nHome\n\n\nCocktails\n\n\nNew\n\n\nInfo \n\n\nStyle guidelines\n\n\nIngredients\n\n\n\n\n\nMeasurement units\n\n\nHistoric Cocktail Books\n\n\nRecommended Brands\n\n\nAmari & Friends\n\n\nArticles & Reviews\n\n\n\n\n\nAbout us\n\n\nLearn More\n\n\nFAQ\n\n\nTerms of Use\n\n\nContact us\n\n\n\n\nYou \n\n\nLog in\n\n\nSign Up\n\n\nReset your password\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nHome\n\n\nCocktails\n\n\n Black Rose\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nCopy\n\n\n\n\nBlack Rose\n \n\n\n\n\n\n\n\n\n\n2 oz Bourbon\n\n1 ds Grenadine\n\n2 ds Peychaud's Bitters\n\n1 Lemon peel (flamed, for garnish)\n\n\n\nInstructions\nFill an old-fashioned glass three-quarters full with ice. Add the bourbon, grenadine, and bitters, and stir. Garnish with the lemon peel.\n\n\n\n\n\n\nCocktail summary\n\n\n\nPosted by\nThe Boston Shaker\n on \n4/12/2011\n\n\n\n\nIs of\nunknown authenticity\n\n\nReference\nDale Degroff, The Essential Cocktail, p48\n\n\n\nCurator\nNot yet rated\n\n\nAverage\n3.5 stars (6 ratings)\n\n\n\nYieldsDrink\n\n\nScale\n\n\nBourbon, Peychaud's Bitters, Grenadine, Lemon peel\nPT5M\nPT0M\nCocktail\nCocktail\n1\ncraft, alcoholic\n3.66667\n6\n\n\n\n\n\n\n\n\n\n\nCocktail Book\n\nLog in or sign up to start building your Cocktail Book.\n\n\n\n\nFrom other usersWith a modest grenadine dash, this drink didn't do much for me, but adding a bit more won me over.\nSimilar cocktailsNew Orleans Cocktail — Bourbon, Peychaud's Bitters, Orange Curaçao, Lemon peelOld Fashioned — Bourbon, Bitters, Sugar, Lemon peelBattle of New Orleans — Bourbon, Peychaud's Bitters, Absinthe, Orange bitters, Simple syrupImproved Whiskey Cocktail — Bourbon, Bitters, Maraschino Liqueur, Absinthe, Simple syrup, Lemon peelDerby Cocktail — Bourbon, Bénédictine, BittersMother-In-Law — Bourbon, Orange Curaçao, Maraschino Liqueur, Peychaud's Bitters, Bitters, Torani Amer, Simple syrupMint Julep — Bourbon, Rich demerara syrup 2:1, MintThe Journey — Bourbon, Mezcal, Hazelnut liqueurBenton's Old Fashioned — Bourbon, Bitters, Grade B maple syrup, Orange peelFancy Mint Julep — Bourbon, Simple syrup, Mint, Fine sugar\n\nComments\n\n\n\n\n\nLog in or register to post comments\n\n\n\n\n\n\n\n\n© 2010-2023 Dan Chadwick. Kindred Cocktails™ is a trademark of Dan Chadwick."""
assert extract_cocktail(cocktail) == Cocktail(
name="Black Rose",
ingredients=["2 oz Bourbon", "1 ds Grenadine", "2 ds Peychaud's Bitters", "1 Lemon peel (flamed, for garnish)"],
instructions="Fill an old-fashioned glass three-quarters full with ice. Add the bourbon, grenadine, and bitters, and stir. Garnish with the lemon peel.",
similar=["New Orleans Cocktail", "Old Fashioned", "Battle of New Orleans", "Improved Whiskey Cocktail", "Derby Cocktail", "Mother-In-Law", "Mint Julep", "The Journey", "Benton's Old Fashioned", "Fancy Mint Julep"],
)
if __name__ == '__main__':
# Align the function
align_extract_cocktail()
# Web scrape the url and extract the cocktail information
url = "https://kindredcocktails.com/cocktail/old-fashioned"
# url = "https://kindredcocktails.com/cocktail/journey"
contents = scrape_url(url=url)
print(contents)
# Process the cocktail block using Tanuki
cocktail = extract_cocktail(contents[0])
print(cocktail)
| [] |
2024-01-10 | wufengchun/tanuki.py | examples~web_scraper~streeteasy.py | from numpy import square
import openai
import os
from dotenv import load_dotenv
from pydantic import BaseModel
from typing import List, Optional
load_dotenv()
import tanuki
from utils import scrape_url
openai.api_key = os.getenv("OPENAI_API_KEY")
class Property(BaseModel):
neighborhood: str
address: str
price: float
fee: bool
beds: float
bath: float
listed_by: str
@tanuki.patch
def extract_property(content: str) -> Optional[Property]:
"""
Examine the content string and extract the rental property details for the neighborhood, address,
price, number of beds, number of bathrooms, square footage, and company that is listing the property.
"""
@tanuki.align
def align_extract_property() -> None:
print("Aligning...")
unit_one = "Rental Unit in Lincoln Square\n \n\n\n229 West 60th Street #7H\n\n\n\n$7,250\nNO FEE\n\n\n\n\n\n\n\n\n2 Beds\n\n\n\n\n2 Baths\n\n\n\n\n\n 1,386\n square feet\nsq_ft\n\n\n\n\n\n Listing by Algin Management"
assert extract_property(unit_one) == Property(
neighborhood="Lincoln Square",
address="229 West 60th Street #7H",
price=7250.0,
fee=False,
beds=2.0,
bath=2.0,
listed_by="Algin Management",
)
if __name__ == '__main__':
# Align the function
align_extract_property()
# Web scrape the url and extract the rental property details
url = "https://streeteasy.com/2-bedroom-apartments-for-rent/manhattan?page=2"
contents = scrape_url(url=url, class_name="listingCardBottom")
print(contents)
# Process the rental property block using Tanuki
units = []
for content in contents[1:3]:
units.append(extract_property(content))
print(units)
| [] |
2024-01-10 | dtch1997/gpt-text-gym | gpt_text_gym~examples~minigrid_tools.py | import openai
import time
import minigrid # noqa
import gymnasium as gym
import re
import dotenv
import sympy
from typing import Dict, List, Tuple, Optional, Any
from minigrid.core.constants import COLOR_NAMES
from minigrid.core.grid import Grid
from minigrid.core.mission import MissionSpace
from minigrid.core.world_object import Door, Goal, Key, Wall, Ball, Box
from minigrid.manual_control import ManualControl
from minigrid.minigrid_env import MiniGridEnv
from gpt_text_gym import ROOT_DIR
LLM_MODEL = "gpt-4"
OPENAI_TEMPERATURE = 0.0
openai.api_key = dotenv.get_key(ROOT_DIR / ".env", "API_KEY")
class PutNearEnv(MiniGridEnv):
"""
## Description
The agent is instructed through a textual string to pick up an object and
place it next to another object. This environment is easy to solve with two
objects, but difficult to solve with more, as it involves both textual
understanding and spatial reasoning involving multiple objects.
## Mission Space
"put the {move_color} {move_type} near the {target_color} {target_type}"
{move_color} and {target_color} can be "red", "green", "blue", "purple",
"yellow" or "grey".
{move_type} and {target_type} Can be "box", "ball" or "key".
## Action Space
| Num | Name | Action |
|-----|--------------|-------------------|
| 0 | left | Turn left |
| 1 | right | Turn right |
| 2 | forward | Move forward |
| 3 | pickup | Pick up an object |
| 4 | drop | Drop an object |
| 5 | toggle | Unused |
| 6 | done | Unused |
## Observation Encoding
- Each tile is encoded as a 3 dimensional tuple:
`(OBJECT_IDX, COLOR_IDX, STATE)`
- `OBJECT_TO_IDX` and `COLOR_TO_IDX` mapping can be found in
[minigrid/minigrid.py](minigrid/minigrid.py)
- `STATE` refers to the door state with 0=open, 1=closed and 2=locked
## Rewards
A reward of '1 - 0.9 * (step_count / max_steps)' is given for success, and '0' for failure.
## Termination
The episode ends if any one of the following conditions is met:
1. The agent picks up the wrong object.
2. The agent drop the correct object near the target.
3. Timeout (see `max_steps`).
## Registered Configurations
N: number of objects.
- `MiniGrid-PutNear-6x6-N2-v0`
- `MiniGrid-PutNear-8x8-N3-v0`
"""
def __init__(self, size=6, numObjs=2, max_steps: int | None = None, **kwargs):
COLOR_NAMES.remove("grey")
self.size = size
self.numObjs = numObjs
self.obj_types = ["key", "ball", "box"]
mission_space = MissionSpace(
mission_func=self._gen_mission,
ordered_placeholders=[
COLOR_NAMES,
self.obj_types,
COLOR_NAMES,
self.obj_types,
],
)
if max_steps is None:
max_steps = 5 * size
super().__init__(
mission_space=mission_space,
width=size,
height=size,
# Set this to True for maximum speed
see_through_walls=True,
max_steps=max_steps,
**kwargs,
)
@staticmethod
def _gen_mission(
move_color: str, move_type: str, target_color: str, target_type: str
):
return f"put the {move_color} {move_type} near the {target_color} {target_type}"
def _gen_grid(self, width, height):
self.grid = Grid(width, height)
# Generate the surrounding walls
self.grid.horz_wall(0, 0)
self.grid.horz_wall(0, height - 1)
self.grid.vert_wall(0, 0)
self.grid.vert_wall(width - 1, 0)
# Types and colors of objects we can generate
types = ["key", "ball", "box"]
objs = []
objPos = []
def near_obj(env, p1):
for p2 in objPos:
dx = p1[0] - p2[0]
dy = p1[1] - p2[1]
if abs(dx) <= 1 and abs(dy) <= 1:
return True
return False
# Until we have generated all the objects
while len(objs) < self.numObjs:
objType = self._rand_elem(types)
objColor = self._rand_elem(COLOR_NAMES)
# If this object already exists, try again
if (objType, objColor) in objs:
continue
if objType == "key":
obj = Key(objColor)
elif objType == "ball":
obj = Ball(objColor)
elif objType == "box":
obj = Box(objColor)
else:
raise ValueError(
"{} object type given. Object type can only be of values key, ball and box.".format(
objType
)
)
pos = self.place_obj(obj, reject_fn=near_obj)
objs.append((objType, objColor))
objPos.append(pos)
# Randomize the agent start position and orientation
self.place_agent()
# Choose a random object to be moved
objIdx = self._rand_int(0, len(objs))
self.move_type, self.moveColor = objs[objIdx]
self.move_pos = objPos[objIdx]
# Choose a target object (to put the first object next to)
while True:
targetIdx = self._rand_int(0, len(objs))
if targetIdx != objIdx:
break
self.target_type, self.target_color = objs[targetIdx]
self.target_pos = objPos[targetIdx]
self.mission = "put the {} {} near the {} {}".format(
self.moveColor,
self.move_type,
self.target_color,
self.target_type,
)
def step(self, action):
preCarrying = self.carrying
obs, reward, terminated, truncated, info = super().step(action)
u, v = self.dir_vec
ox, oy = (self.agent_pos[0] + u, self.agent_pos[1] + v)
tx, ty = self.target_pos
# If we picked up the wrong object, terminate the episode
if action == self.actions.pickup and self.carrying:
if (
self.carrying.type != self.move_type
or self.carrying.color != self.moveColor
):
terminated = True
# If successfully dropping an object near the target
if action == self.actions.drop and preCarrying:
if self.grid.get(ox, oy) is preCarrying:
if abs(ox - tx) <= 1 and abs(oy - ty) <= 1:
reward = self._reward()
terminated = True
return obs, reward, terminated, truncated, info
def openai_call(
prompt: str,
model: str = LLM_MODEL,
temperature: float = OPENAI_TEMPERATURE,
max_tokens: int = 100,
):
while True:
try:
trimmed_prompt = prompt
# TODO: Enable trimmed prompt.
# Use 4000 instead of the real limit (4097) to give a bit of wiggle room for the encoding of roles.
# TODO: different limits for different models.
# trimmed_prompt = limit_tokens_from_string(prompt, model, 4000 - max_tokens)
# Use chat completion API
messages = [{"role": "system", "content": trimmed_prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
n=1,
stop=None,
)
return response.choices[0].message.content.strip()
except openai.error.RateLimitError:
print(
" *** The OpenAI API rate limit has been exceeded. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.Timeout:
print(
" *** OpenAI API timeout occurred. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.APIError:
print(
" *** OpenAI API error occurred. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.APIConnectionError:
print(
" *** OpenAI API connection error occurred. Check your network settings, proxy configuration, SSL certificates, or firewall rules. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.InvalidRequestError:
print(
" *** OpenAI API invalid request. Check the documentation for the specific API method you are calling and make sure you are sending valid and complete parameters. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.ServiceUnavailableError:
print(
" *** OpenAI API service unavailable. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
else:
break
def get_objects(env: gym.Env) -> List[str]:
env_str = str(env.unwrapped)
objects = []
OBJECT_TO_STR = {
"wall": "W",
"floor": "F",
"door": "D",
"key": "K",
"ball": "A",
"box": "B",
"goal": "G",
"lava": "V",
}
STR_TO_OBJECT = {v: k for k, v in OBJECT_TO_STR.items()}
# Map agent's direction to short string
AGENT_DIR_TO_STR = {0: ">", 1: "V", 2: "<", 3: "^"}
STR_TO_AGENT_DIR = {v: k for k, v in AGENT_DIR_TO_STR.items()}
# Map of colors to short string
COLOR_TO_STR = {
"red": "R",
"green": "G",
"blue": "B",
"purple": "P",
"yellow": "Y",
}
STR_TO_COLOR = {v: k for k, v in COLOR_TO_STR.items()}
rows = env_str.split("\n")
n_rows = 6
n_cols = 6
for row in range(n_rows):
for col in range(n_cols):
cell = rows[row][2 * col : 2 * col + 2]
if cell == " ":
# empty cell
continue
elif cell[0] in STR_TO_AGENT_DIR and cell[0] == cell[1]:
# agent
continue
elif cell[0] in ("W", "F", "V"):
# wall, floor, or lava
# Skip for now
continue
# object_name = STR_TO_OBJECT[cell[0]]
else:
# interactable object
object_type = STR_TO_OBJECT[cell[0]]
object_color = STR_TO_COLOR[cell[1]]
object_name = f"{object_color} {object_type}"
objects.append(object_name)
return objects
def get_objects_in_view(obs: Dict) -> List[str]:
"""
Get objects in the agent's field of view.
"""
pass
def get_inventory(env: gym.Env) -> str:
object = env.unwrapped.carrying
if object is None:
return "nothing"
else:
return f"{object.color} {object.type}"
def describe_environment(env: gym.Env, obs: Dict) -> str:
objects = get_objects(env)
inventory = get_inventory(env)
# TODO: Only get visible objects
env_description = f"""
You are in a room.
You see: {', '.join(objects)}.
You are facing: {obs["direction"]}.
You are currently holding: {inventory}.
"""
return env_description
def planning_agent(env, obs, previous_goal: str) -> str:
prompt = f"""
You are controlling a simulated agent to complete tasks.
The overall goal is: {obs["mission"]}.
The previous goal was: {previous_goal}.
{describe_environment(env, obs)}
Describe the next goal in one sentence. Be concise.
"""
print(f"\n****PLANNING AGENT PROMPT****\n{prompt}\n")
response = openai_call(prompt)
print(f"\n****PLANNING AGENT RESPONSE****\n{response}\n")
return response.strip().lower()
def evaluation_agent(env, obs, current_goal: str, additional_context: str = ""):
prompt = (
"""
Answer the question.
Rules:
1. If you know the answer, answer yes or no.
---
Follow the following format.
Question: ${the question to be answered}
Tools: ${descriptions of available tools}
Context: ${information relevant to the question}
Answer: ${yes / no / need more information}
---
"""
+ f"""
Question: Has the current goal been achieved?
Tools: get_coordinate(object_name: str) -> Tuple[int, int], is_next_to(coord1: Tuple[int, int], coord2: Tuple[int, int]) -> bool
Context: {describe_environment(env, obs)} {additional_context}. The overall goal is: {obs["mission"]}. The current goal is: {current_goal}
Answer:
"""
)
print(f"\n****EVALUATION AGENT PROMPT****\n{prompt}\n")
response = openai_call(prompt)
print(f"\n****EVALUATION AGENT RESPONSE****\n{response}\n")
# Define tools which require local variables
def get_coordinate(object_name: str) -> Tuple[int, int]:
return get_coordinate_of_object(obs, object_name)
if response.lower() == "need more information":
tool_choice_response = tool_choice_agent(
env, obs, current_goal, additional_context
)
tool_choice = re.search(r"next tool to use: (.*)", tool_choice_response).group(
1
)
tool_result = eval(tool_choice)
additional_context += f"The result of {tool_choice} is {tool_result}."
return evaluation_agent(env, obs, current_goal, additional_context)
elif response.lower() in ("yes", "no"):
return response.strip().lower()
else:
raise ValueError(f"Invalid response: {response}")
# Tools
def get_coordinate_of_object(obs: Dict[str, Any], object_name: str) -> Tuple[int, int]:
"""Get the coordinate of an object"""
return (0, 0)
def is_next_to(coord1: Tuple[int, int], coord2: Tuple[int, int]) -> bool:
"""Check if two coordinates are next to each other"""
x1, y1 = coord1
x2, y2 = coord2
return abs(x1 - x2) <= 1 and abs(y1 - y2) <= 1
def tool_choice_agent(env, obs, current_goal: str, additional_context: str) -> str:
prompt = (
"""
Identify the appropriate tool that will help answer a complex question.
Rules:
1. Choose exactly one tool to use.
---
Example of writing 'next tool to use'.
Next tool to use: get_coordinate(obs, "green key")
Next tool to use: is_next_to((0,2), (3,4))
---
Follow the following format.
Question: ${the question to be answered}
Tools: ${descriptions of available tools}
Context: ${information relevant to the question}
Rationale: Let's think step by step. To answer this question, we first need to find out ${the missing information}
Next tool to use: ${the name and invocation arguments of the tool}
---
"""
+ f"""
Question: Has the current goal been achieved?
Tools: get_coordinate(object_name: str) -> Tuple[int, int], is_next_to(coord1: Tuple[int, int], coord2: Tuple[int, int]) -> bool,
Context: {describe_environment(env, obs)} {additional_context}. The overall goal is: {obs["mission"]}. The current goal is: {current_goal}.
Rationale: Let's think step by step. To answer this question, we first need to find out
"""
)
print(f"\n****TOOL CHOICE AGENT PROMPT****\n{prompt}\n")
response = openai_call(prompt)
print(f"\n****TOOL CHOICE AGENT RESPONSE****\n{response}\n")
return response.strip().lower()
from minigrid.core.actions import Actions
def key_handler(event, env) -> Optional[Actions]:
key: str = event.key
print("pressed", key)
if key == "escape":
env.close()
return
if key == "backspace":
env.reset()
return
key_to_action = {
"left": Actions.left,
"right": Actions.right,
"up": Actions.forward,
"space": Actions.toggle,
"pageup": Actions.pickup,
"pagedown": Actions.drop,
"tab": Actions.pickup,
"left shift": Actions.drop,
"enter": Actions.done,
}
return key_to_action.get(key)
def manual_control():
import pygame
env = PutNearEnv(size=6, numObjs=2, max_steps=50, render_mode="human")
obs, _ = env.reset()
env.render()
previous_goal = ""
current_goal = planning_agent(env, obs, previous_goal)
while True:
# Step the agent
# TODO: Implement CLI for manual control
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
event.key = pygame.key.name(int(event.key))
action = key_handler(event, env)
if action is None:
continue
obs, _, terminated, truncated, _ = env.step(action)
env.render()
# Evaluate the agent
evaluation = evaluation_agent(env, obs, current_goal)
if evaluation == "yes":
previous_goal = current_goal
current_goal = planning_agent(env, obs, previous_goal)
elif evaluation == "no":
pass
else:
raise ValueError(f"Invalid evaluation: {evaluation}")
def main():
env = PutNearEnv(size=6, numObjs=2, max_steps=50, render_mode="human")
obs, _ = env.reset()
env.render()
previous_goal = ""
current_goal = planning_agent(env, obs, previous_goal)
while True:
# Step the agent
action = env.action_space.sample()
obs, _, terminated, truncated, _ = env.step(action)
env.render()
# Evaluate the agent
evaluation = evaluation_agent(env, obs, current_goal)
if evaluation == "yes":
previous_goal = current_goal
current_goal = planning_agent(env, obs, previous_goal)
elif evaluation == "no":
pass
else:
raise ValueError(f"Invalid evaluation: {evaluation}")
if __name__ == "__main__":
manual_control()
| [
"\nAnswer the question. \n\nRules:\n1. If you know the answer, answer yes or no.\n---\n\nFollow the following format. \n\nQuestion: ${the question to be answered}\nTools: ${descriptions of available tools}\nContext: ${information relevant to the question}\nAnswer: ${yes / no / need more information}\n---\n",
"mission",
"\nIdentify the appropriate tool that will help answer a complex question. \n\nRules: \n1. Choose exactly one tool to use.\n---\nExample of writing 'next tool to use'. \n\nNext tool to use: get_coordinate(obs, \"green key\")\nNext tool to use: is_next_to((0,2), (3,4))\n---\n\nFollow the following format. \n\nQuestion: ${the question to be answered}\nTools: ${descriptions of available tools}\nContext: ${information relevant to the question}\nRationale: Let's think step by step. To answer this question, we first need to find out ${the missing information}\nNext tool to use: ${the name and invocation arguments of the tool}\n---\n"
] |
2024-01-10 | dtch1997/gpt-text-gym | gpt_text_gym~examples~minigrid_proof_of_concept.py | import openai
import time
import minigrid # noqa
import gymnasium as gym
import re
import dotenv
from typing import Dict, List, Tuple, Optional
from minigrid.core.constants import COLOR_NAMES
from minigrid.core.grid import Grid
from minigrid.core.mission import MissionSpace
from minigrid.core.world_object import Door, Goal, Key, Wall, Ball, Box
from minigrid.manual_control import ManualControl
from minigrid.minigrid_env import MiniGridEnv
from gpt_text_gym import ROOT_DIR
LLM_MODEL = "gpt-4"
OPENAI_TEMPERATURE = 0.0
openai.api_key = dotenv.get_key(ROOT_DIR / ".env", "API_KEY")
class PutNearEnv(MiniGridEnv):
"""
## Description
The agent is instructed through a textual string to pick up an object and
place it next to another object. This environment is easy to solve with two
objects, but difficult to solve with more, as it involves both textual
understanding and spatial reasoning involving multiple objects.
## Mission Space
"put the {move_color} {move_type} near the {target_color} {target_type}"
{move_color} and {target_color} can be "red", "green", "blue", "purple",
"yellow" or "grey".
{move_type} and {target_type} Can be "box", "ball" or "key".
## Action Space
| Num | Name | Action |
|-----|--------------|-------------------|
| 0 | left | Turn left |
| 1 | right | Turn right |
| 2 | forward | Move forward |
| 3 | pickup | Pick up an object |
| 4 | drop | Drop an object |
| 5 | toggle | Unused |
| 6 | done | Unused |
## Observation Encoding
- Each tile is encoded as a 3 dimensional tuple:
`(OBJECT_IDX, COLOR_IDX, STATE)`
- `OBJECT_TO_IDX` and `COLOR_TO_IDX` mapping can be found in
[minigrid/minigrid.py](minigrid/minigrid.py)
- `STATE` refers to the door state with 0=open, 1=closed and 2=locked
## Rewards
A reward of '1 - 0.9 * (step_count / max_steps)' is given for success, and '0' for failure.
## Termination
The episode ends if any one of the following conditions is met:
1. The agent picks up the wrong object.
2. The agent drop the correct object near the target.
3. Timeout (see `max_steps`).
## Registered Configurations
N: number of objects.
- `MiniGrid-PutNear-6x6-N2-v0`
- `MiniGrid-PutNear-8x8-N3-v0`
"""
def __init__(self, size=6, numObjs=2, max_steps: int | None = None, **kwargs):
COLOR_NAMES.remove("grey")
self.size = size
self.numObjs = numObjs
self.obj_types = ["key", "ball", "box"]
mission_space = MissionSpace(
mission_func=self._gen_mission,
ordered_placeholders=[
COLOR_NAMES,
self.obj_types,
COLOR_NAMES,
self.obj_types,
],
)
if max_steps is None:
max_steps = 5 * size
super().__init__(
mission_space=mission_space,
width=size,
height=size,
# Set this to True for maximum speed
see_through_walls=True,
max_steps=max_steps,
**kwargs,
)
@staticmethod
def _gen_mission(
move_color: str, move_type: str, target_color: str, target_type: str
):
return f"put the {move_color} {move_type} near the {target_color} {target_type}"
def _gen_grid(self, width, height):
self.grid = Grid(width, height)
# Generate the surrounding walls
self.grid.horz_wall(0, 0)
self.grid.horz_wall(0, height - 1)
self.grid.vert_wall(0, 0)
self.grid.vert_wall(width - 1, 0)
# Types and colors of objects we can generate
types = ["key", "ball", "box"]
objs = []
objPos = []
def near_obj(env, p1):
for p2 in objPos:
dx = p1[0] - p2[0]
dy = p1[1] - p2[1]
if abs(dx) <= 1 and abs(dy) <= 1:
return True
return False
# Until we have generated all the objects
while len(objs) < self.numObjs:
objType = self._rand_elem(types)
objColor = self._rand_elem(COLOR_NAMES)
# If this object already exists, try again
if (objType, objColor) in objs:
continue
if objType == "key":
obj = Key(objColor)
elif objType == "ball":
obj = Ball(objColor)
elif objType == "box":
obj = Box(objColor)
else:
raise ValueError(
"{} object type given. Object type can only be of values key, ball and box.".format(
objType
)
)
pos = self.place_obj(obj, reject_fn=near_obj)
objs.append((objType, objColor))
objPos.append(pos)
# Randomize the agent start position and orientation
self.place_agent()
# Choose a random object to be moved
objIdx = self._rand_int(0, len(objs))
self.move_type, self.moveColor = objs[objIdx]
self.move_pos = objPos[objIdx]
# Choose a target object (to put the first object next to)
while True:
targetIdx = self._rand_int(0, len(objs))
if targetIdx != objIdx:
break
self.target_type, self.target_color = objs[targetIdx]
self.target_pos = objPos[targetIdx]
self.mission = "put the {} {} near the {} {}".format(
self.moveColor,
self.move_type,
self.target_color,
self.target_type,
)
def step(self, action):
preCarrying = self.carrying
obs, reward, terminated, truncated, info = super().step(action)
u, v = self.dir_vec
ox, oy = (self.agent_pos[0] + u, self.agent_pos[1] + v)
tx, ty = self.target_pos
# If we picked up the wrong object, terminate the episode
if action == self.actions.pickup and self.carrying:
if (
self.carrying.type != self.move_type
or self.carrying.color != self.moveColor
):
terminated = True
# If successfully dropping an object near the target
if action == self.actions.drop and preCarrying:
if self.grid.get(ox, oy) is preCarrying:
if abs(ox - tx) <= 1 and abs(oy - ty) <= 1:
reward = self._reward()
terminated = True
return obs, reward, terminated, truncated, info
def limit_tokens_from_string(string: str, model: str, limit: int) -> str:
"""Limits the string to a number of tokens (estimated)."""
import tiktoken
try:
encoding = tiktoken.encoding_for_model(model)
except:
encoding = tiktoken.encoding_for_model("gpt2") # Fallback for others.
encoded = encoding.encode(string)
return encoding.decode(encoded[:limit])
def openai_call(
prompt: str,
model: str = LLM_MODEL,
temperature: float = OPENAI_TEMPERATURE,
max_tokens: int = 100,
):
while True:
try:
trimmed_prompt = prompt
# TODO: Enable trimmed prompt.
# Use 4000 instead of the real limit (4097) to give a bit of wiggle room for the encoding of roles.
# TODO: different limits for different models.
# trimmed_prompt = limit_tokens_from_string(prompt, model, 4000 - max_tokens)
# Use chat completion API
messages = [{"role": "system", "content": trimmed_prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
n=1,
stop=None,
)
return response.choices[0].message.content.strip()
except openai.error.RateLimitError:
print(
" *** The OpenAI API rate limit has been exceeded. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.Timeout:
print(
" *** OpenAI API timeout occurred. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.APIError:
print(
" *** OpenAI API error occurred. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.APIConnectionError:
print(
" *** OpenAI API connection error occurred. Check your network settings, proxy configuration, SSL certificates, or firewall rules. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.InvalidRequestError:
print(
" *** OpenAI API invalid request. Check the documentation for the specific API method you are calling and make sure you are sending valid and complete parameters. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
except openai.error.ServiceUnavailableError:
print(
" *** OpenAI API service unavailable. Waiting 10 seconds and trying again. ***"
)
time.sleep(10) # Wait 10 seconds and try again
else:
break
def get_objects(env: gym.Env) -> List[str]:
env_str = str(env.unwrapped)
objects = []
OBJECT_TO_STR = {
"wall": "W",
"floor": "F",
"door": "D",
"key": "K",
"ball": "A",
"box": "B",
"goal": "G",
"lava": "V",
}
STR_TO_OBJECT = {v: k for k, v in OBJECT_TO_STR.items()}
# Map agent's direction to short string
AGENT_DIR_TO_STR = {0: ">", 1: "V", 2: "<", 3: "^"}
STR_TO_AGENT_DIR = {v: k for k, v in AGENT_DIR_TO_STR.items()}
# Map of colors to short string
COLOR_TO_STR = {
"red": "R",
"green": "G",
"blue": "B",
"purple": "P",
"yellow": "Y",
}
STR_TO_COLOR = {v: k for k, v in COLOR_TO_STR.items()}
rows = env_str.split("\n")
n_rows = 6
n_cols = 6
for row in range(n_rows):
for col in range(n_cols):
cell = rows[row][2 * col : 2 * col + 2]
if cell == " ":
# empty cell
continue
elif cell[0] in STR_TO_AGENT_DIR and cell[0] == cell[1]:
# agent
continue
elif cell[0] in ("W", "F", "V"):
# wall, floor, or lava
# Skip for now
continue
# object_name = STR_TO_OBJECT[cell[0]]
else:
# interactable object
object_type = STR_TO_OBJECT[cell[0]]
object_color = STR_TO_COLOR[cell[1]]
object_name = f"{object_color} {object_type}"
objects.append(object_name)
return objects
def get_objects_in_view(obs: Dict) -> List[str]:
"""
Get objects in the agent's field of view.
"""
pass
def get_inventory(env: gym.Env) -> str:
object = env.unwrapped.carrying
if object is None:
return "nothing"
else:
return f"{object.color} {object.type}"
def describe_environment(env: gym.Env, obs: Dict) -> str:
objects = get_objects(env)
inventory = get_inventory(env)
# TODO: Only get visible objects
env_description = f"""
You are in a room.
You see: {', '.join(objects)}.
You are facing: {obs["direction"]}.
You are currently holding: {inventory}.
"""
return env_description
def planning_agent(env, obs, previous_goal: str) -> str:
prompt = f"""
You are controlling a simulated agent to complete tasks.
The overall goal is: {obs["mission"]}.
The previous goal was: {previous_goal}.
{describe_environment(env, obs)}
Describe the next goal in one sentence. Be concise.
"""
print(f"\n****PLANNING AGENT PROMPT****\n{prompt}\n")
response = openai_call(prompt)
print(f"\n****PLANNING AGENT RESPONSE****\n{response}\n")
return response.strip().lower()
def evaluation_agent(env, obs, current_goal: str):
prompt = f"""
You are controlling a simulated agent to complete tasks.
The overall goal is: {obs["mission"]}.
The current goal is: {current_goal}.
{describe_environment(env, obs)}
Has the current goal been reached? Answer yes or no.
"""
print(f"\n****EVALUATION AGENT PROMPT****\n{prompt}\n")
response = openai_call(prompt)
print(f"\n****EVALUATION AGENT RESPONSE****\n{response}\n")
return response.strip().lower()
from minigrid.core.actions import Actions
def key_handler(event, env) -> Optional[Actions]:
key: str = event.key
print("pressed", key)
if key == "escape":
env.close()
return
if key == "backspace":
env.reset()
return
key_to_action = {
"left": Actions.left,
"right": Actions.right,
"up": Actions.forward,
"space": Actions.toggle,
"pageup": Actions.pickup,
"pagedown": Actions.drop,
"tab": Actions.pickup,
"left shift": Actions.drop,
"enter": Actions.done,
}
return key_to_action.get(key)
def manual_control():
import pygame
env = PutNearEnv(size=6, numObjs=2, max_steps=50, render_mode="human")
obs, _ = env.reset()
env.render()
previous_goal = ""
current_goal = planning_agent(env, obs, previous_goal)
while True:
# Step the agent
# TODO: Implement CLI for manual control
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
event.key = pygame.key.name(int(event.key))
action = key_handler(event, env)
if action is None:
continue
obs, _, terminated, truncated, _ = env.step(action)
env.render()
# Evaluate the agent
evaluation = evaluation_agent(env, obs, current_goal)
if evaluation == "yes":
previous_goal = current_goal
current_goal = planning_agent(env, obs, previous_goal)
elif evaluation == "no":
pass
else:
raise ValueError(f"Invalid evaluation: {evaluation}")
def main():
env = PutNearEnv(size=6, numObjs=2, max_steps=50, render_mode="human")
obs, _ = env.reset()
env.render()
previous_goal = ""
current_goal = planning_agent(env, obs, previous_goal)
while True:
# Step the agent
action = env.action_space.sample()
obs, _, terminated, truncated, _ = env.step(action)
env.render()
# Evaluate the agent
evaluation = evaluation_agent(env, obs, current_goal)
if evaluation == "yes":
previous_goal = current_goal
current_goal = planning_agent(env, obs, previous_goal)
elif evaluation == "no":
pass
else:
raise ValueError(f"Invalid evaluation: {evaluation}")
if __name__ == "__main__":
manual_control()
| [
"mission"
] |
2024-01-10 | dtch1997/gpt-text-gym | gpt_text_gym~examples~dsp_example.py | import dsp
import openai
import dotenv
from gpt_text_gym import ROOT_DIR
LLM_MODEL = "text-davinci-002"
colbert_server = (
"http://ec2-44-228-128-229.us-west-2.compute.amazonaws.com:8893/api/search"
)
OPENAI_TEMPERATURE = 0.0
OPENAI_API_KEY = dotenv.get_key(ROOT_DIR / ".env", "API_KEY")
train = [
(
'Who produced the album that included a re-recording of "Lithium"?',
["Butch Vig"],
),
(
"Who was the director of the 2009 movie featuring Peter Outerbridge as William Easton?",
["Kevin Greutert"],
),
(
"The heir to the Du Pont family fortune sponsored what wrestling team?",
["Foxcatcher", "Team Foxcatcher", "Foxcatcher Team"],
),
("In what year was the star of To Hell and Back born?", ["1925"]),
(
"Which award did the first book of Gary Zukav receive?",
["U.S. National Book Award", "National Book Award"],
),
(
"What city was the victim of Joseph Druces working in?",
["Boston, Massachusetts", "Boston"],
),
]
train = [dsp.Example(question=question, answer=answer) for question, answer in train]
Question = dsp.Type(prefix="Question:", desc="${the question to be answered}")
Answer = dsp.Type(
prefix="Answer:",
desc="${a short factoid answer, often between 1 and 5 words}",
format=dsp.format_answers,
)
qa_template = dsp.Template(
instructions="Answer questions with short factoid answers.",
question=Question(),
answer=Answer(),
)
def vanilla_QA_LM(question: str) -> str:
demos = dsp.sample(train, k=7)
example = dsp.Example(question=question, demos=demos)
example, completions = dsp.generate(qa_template)(example, stage="qa")
return completions[0].answer
Context = dsp.Type(
prefix="Context:\n",
desc="${sources that may contain relevant content}",
format=dsp.passages2text,
)
qa_template_with_passages = dsp.Template(
instructions=qa_template.instructions,
context=Context(),
question=Question(),
answer=Answer(),
)
def retrieve_then_read_QA(question: str) -> str:
demos = dsp.sample(train, k=7)
passages = dsp.retrieve(question, k=1)
example = dsp.Example(question=question, context=passages, demos=demos)
example, completions = dsp.generate(qa_template_with_passages)(example, stage="qa")
return completions.answer
if __name__ == "__main__":
# Set up dsp
lm = dsp.GPT3(LLM_MODEL, OPENAI_API_KEY)
rm = dsp.ColBERTv2(url=colbert_server)
dsp.settings.configure(lm=lm, rm=rm)
question = "What is the capital of the United States?"
answer = vanilla_QA_LM(question)
print("Vanilla QA LM answer:")
lm.inspect_history(n=1)
# Doesn't work, because the retrieval model is no longer online
# answer = retrieve_then_read_QA(question)
# print("QA LM with retrieval answer:")
# lm.inspect_history(n=1)
| [
"Answer questions with short factoid answers."
] |
2024-01-10 | dtch1997/gpt-text-gym | gpt_text_gym~gpt~chat_completer.py | """ Interface to GPT model."""
import openai
import dotenv
from ml_collections import config_dict
from dataclasses import dataclass
from gpt_text_gym import ROOT_DIR
from typing import List, NewType, Dict, Optional
from gpt_text_gym.gpt.message import Message, RawMessage, default_system_message
from gpt_text_gym.gpt.utils import remove_leading_whitespace
def get_chatgpt_system_message():
content = """
You are ChatGPT, a large language model trained by OpenAI, based on the GPT-4 architecture.
Knowledge cutoff: 2021-09
Current date: 2023-06-26
"""
return Message(role="system", content=remove_leading_whitespace(content))
def openai_chat_completion_create(
model: str,
messages: List[RawMessage],
n: int,
temperature: float,
max_tokens: Optional[int],
**kwargs,
):
"""Wrapper around OpenAI's ChatCompletion.create method."""
return openai.ChatCompletion.create(
model=model,
messages=messages,
n=n,
temperature=temperature,
max_tokens=max_tokens,
**kwargs,
)
class GPTChatCompleter:
def __init__(
self,
model: str = "gpt-4",
temperature: float = 0.0,
max_tokens: Optional[int] = None,
n: int = 1,
):
openai.api_key = dotenv.get_key(ROOT_DIR / ".env", "API_KEY")
self.chat_history: List[Message] = []
self.model = model
self.temperature = temperature
self.max_tokens = max_tokens
self.n = n
def clear(self):
self.chat_history = []
def generate_chat_completion(self, **kwargs):
messages = [message.to_dict() for message in self.chat_history]
response = openai_chat_completion_create(
model=self.model,
messages=messages,
n=self.n,
temperature=self.temperature,
max_tokens=self.max_tokens,
**kwargs,
)
choice = response["choices"][0]
msg: Message = Message.from_dict(choice["message"])
return msg
def add_message(self, message: Message):
self.chat_history.append(message)
if __name__ == "__main__":
chatbot = GPTChatCompleter()
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{
"role": "user",
"content": "Translate the following English text to French: 'Hello, how are you?'",
},
]
messages = [Message.from_dict(message) for message in messages]
for message in messages:
print(message)
chatbot.add_message(message)
reply = chatbot.generate_chat_completion(messages)
print(reply)
| [
"Translate the following English text to French: 'Hello, how are you?'",
"You are a helpful assistant."
] |
2024-01-10 | bjollans/stable-dreamfusion | main.py | import torch
import argparse
import pandas as pd
import sys
from nerf.provider import NeRFDataset
from nerf.utils import *
# torch.autograd.set_detect_anomaly(True)
if __name__ == '__main__':
# See https://stackoverflow.com/questions/27433316/how-to-get-argparse-to-read-arguments-from-a-file-with-an-option-rather-than-pre
class LoadFromFile (argparse.Action):
def __call__ (self, parser, namespace, values, option_string = None):
with values as f:
# parse arguments in the file and store them in the target namespace
parser.parse_args(f.read().split(), namespace)
parser = argparse.ArgumentParser()
parser.add_argument('--file', type=open, action=LoadFromFile, help="specify a file filled with more arguments")
parser.add_argument('--text', default=None, help="text prompt")
parser.add_argument('--negative', default='', type=str, help="negative text prompt")
parser.add_argument('-O', action='store_true', help="equals --fp16 --cuda_ray")
parser.add_argument('-O2', action='store_true', help="equals --backbone vanilla")
parser.add_argument('--test', action='store_true', help="test mode")
parser.add_argument('--six_views', action='store_true', help="six_views mode: save the images of the six views")
parser.add_argument('--eval_interval', type=int, default=1, help="evaluate on the valid set every interval epochs")
parser.add_argument('--test_interval', type=int, default=100, help="test on the test set every interval epochs")
parser.add_argument('--workspace', type=str, default='workspace')
parser.add_argument('--seed', default=None)
parser.add_argument('--image', default=None, help="image prompt")
parser.add_argument('--image_rgb_loss', default=None, help="image prompt for zero123 inference")
parser.add_argument('--image_config', default=None, help="image config csv")
parser.add_argument('--known_view_interval', type=int, default=4, help="train default view with RGB loss every & iters, only valid if --image is not None.")
parser.add_argument('--IF', action='store_true', help="experimental: use DeepFloyd IF as the guidance model for nerf stage")
parser.add_argument('--guidance', type=str, nargs='*', default=['SD'], help='guidance model')
parser.add_argument('--guidance_scale', type=float, default=100, help="diffusion model classifier-free guidance scale")
parser.add_argument('--save_mesh', action='store_true', help="export an obj mesh with texture")
parser.add_argument('--mcubes_resolution', type=int, default=256, help="mcubes resolution for extracting mesh")
parser.add_argument('--decimate_target', type=int, default=5e4, help="target face number for mesh decimation")
parser.add_argument('--dmtet', action='store_true', help="use dmtet finetuning")
parser.add_argument('--tet_grid_size', type=int, default=128, help="tet grid size")
parser.add_argument('--init_with', type=str, default='', help="ckpt to init dmtet")
parser.add_argument('--lock_geo', action='store_true', help="disable dmtet to learn geometry")
### training options
parser.add_argument('--iters', type=int, default=10000, help="training iters")
parser.add_argument('--lr', type=float, default=1e-3, help="max learning rate")
parser.add_argument('--ckpt', type=str, default='latest', help="possible options are ['latest', 'scratch', 'best', 'latest_model']")
parser.add_argument('--cuda_ray', action='store_true', help="use CUDA raymarching instead of pytorch")
parser.add_argument('--taichi_ray', action='store_true', help="use taichi raymarching")
parser.add_argument('--max_steps', type=int, default=1024, help="max num steps sampled per ray (only valid when using --cuda_ray)")
parser.add_argument('--num_steps', type=int, default=64, help="num steps sampled per ray (only valid when not using --cuda_ray)")
parser.add_argument('--upsample_steps', type=int, default=32, help="num steps up-sampled per ray (only valid when not using --cuda_ray)")
parser.add_argument('--update_extra_interval', type=int, default=16, help="iter interval to update extra status (only valid when using --cuda_ray)")
parser.add_argument('--max_ray_batch', type=int, default=4096, help="batch size of rays at inference to avoid OOM (only valid when not using --cuda_ray)")
parser.add_argument('--latent_iter_ratio', type=float, default=0.2, help="training iters that only use albedo shading")
parser.add_argument('--albedo_iter_ratio', type=float, default=0, help="training iters that only use albedo shading")
parser.add_argument('--min_ambient_ratio', type=float, default=0.1, help="minimum ambient ratio to use in lambertian shading")
parser.add_argument('--textureless_ratio', type=float, default=0.2, help="ratio of textureless shading")
parser.add_argument('--jitter_pose', action='store_true', help="add jitters to the randomly sampled camera poses")
parser.add_argument('--jitter_center', type=float, default=0.2, help="amount of jitter to add to sampled camera pose's center (camera location)")
parser.add_argument('--jitter_target', type=float, default=0.2, help="amount of jitter to add to sampled camera pose's target (i.e. 'look-at')")
parser.add_argument('--jitter_up', type=float, default=0.02, help="amount of jitter to add to sampled camera pose's up-axis (i.e. 'camera roll')")
parser.add_argument('--uniform_sphere_rate', type=float, default=0, help="likelihood of sampling camera location uniformly on the sphere surface area")
parser.add_argument('--grad_clip', type=float, default=-1, help="clip grad of all grad to this limit, negative value disables it")
parser.add_argument('--grad_clip_rgb', type=float, default=-1, help="clip grad of rgb space grad to this limit, negative value disables it")
# model options
parser.add_argument('--bg_radius', type=float, default=1.4, help="if positive, use a background model at sphere(bg_radius)")
parser.add_argument('--density_activation', type=str, default='exp', choices=['softplus', 'exp'], help="density activation function")
parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied")
parser.add_argument('--blob_density', type=float, default=5, help="max (center) density for the density blob")
parser.add_argument('--blob_radius', type=float, default=0.2, help="control the radius for the density blob")
# network backbone
parser.add_argument('--backbone', type=str, default='grid', choices=['grid_tcnn', 'grid', 'vanilla', 'grid_taichi'], help="nerf backbone")
parser.add_argument('--optim', type=str, default='adan', choices=['adan', 'adam'], help="optimizer")
parser.add_argument('--sd_version', type=str, default='2.1', choices=['1.5', '2.0', '2.1'], help="stable diffusion version")
parser.add_argument('--hf_key', type=str, default=None, help="hugging face Stable diffusion model key")
# try this if CUDA OOM
parser.add_argument('--fp16', action='store_true', help="use float16 for training")
parser.add_argument('--vram_O', action='store_true', help="optimization for low VRAM usage")
# rendering resolution in training, increase these for better quality / decrease these if CUDA OOM even if --vram_O enabled.
parser.add_argument('--w', type=int, default=64, help="render width for NeRF in training")
parser.add_argument('--h', type=int, default=64, help="render height for NeRF in training")
parser.add_argument('--known_view_scale', type=float, default=1.5, help="multiply --h/w by this for known view rendering")
parser.add_argument('--known_view_noise_scale', type=float, default=2e-3, help="random camera noise added to rays_o and rays_d")
parser.add_argument('--dmtet_reso_scale', type=float, default=8, help="multiply --h/w by this for dmtet finetuning")
parser.add_argument('--batch_size', type=int, default=1, help="images to render per batch using NeRF")
### dataset options
parser.add_argument('--bound', type=float, default=1, help="assume the scene is bounded in box(-bound, bound)")
parser.add_argument('--dt_gamma', type=float, default=0, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)")
parser.add_argument('--min_near', type=float, default=0.01, help="minimum near distance for camera")
parser.add_argument('--radius_range', type=float, nargs='*', default=[3.0, 3.5], help="training camera radius range")
parser.add_argument('--theta_range', type=float, nargs='*', default=[45, 105], help="training camera range along the polar angles (i.e. up and down). See advanced.md for details.")
parser.add_argument('--phi_range', type=float, nargs='*', default=[-180, 180], help="training camera range along the azimuth angles (i.e. left and right). See advanced.md for details.")
parser.add_argument('--fovy_range', type=float, nargs='*', default=[10, 30], help="training camera fovy range")
parser.add_argument('--default_radius', type=float, default=3.2, help="radius for the default view")
parser.add_argument('--default_polar', type=float, default=90, help="polar for the default view")
parser.add_argument('--default_azimuth', type=float, default=0, help="azimuth for the default view")
parser.add_argument('--default_azimuth_rgb_loss', type=float, default=None, help="azimuth for the default view for z123 rendering")
parser.add_argument('--default_fovy', type=float, default=20, help="fovy for the default view")
parser.add_argument('--progressive_view', action='store_true', help="progressively expand view sampling range from default to full")
parser.add_argument('--progressive_view_init_ratio', type=float, default=0.2, help="initial ratio of final range, used for progressive_view")
parser.add_argument('--progressive_level', action='store_true', help="progressively increase gridencoder's max_level")
parser.add_argument('--angle_overhead', type=float, default=30, help="[0, angle_overhead] is the overhead region")
parser.add_argument('--angle_front', type=float, default=60, help="[0, angle_front] is the front region, [180, 180+angle_front] the back region, otherwise the side region.")
parser.add_argument('--t_range', type=float, nargs='*', default=[0.02, 0.98], help="stable diffusion time steps range")
parser.add_argument('--dont_override_stuff',action='store_true', help="Don't override t_range, etc.")
### regularizations
parser.add_argument('--lambda_entropy', type=float, default=1e-3, help="loss scale for alpha entropy")
parser.add_argument('--lambda_opacity', type=float, default=0, help="loss scale for alpha value")
parser.add_argument('--lambda_orient', type=float, default=1e-2, help="loss scale for orientation")
parser.add_argument('--lambda_tv', type=float, default=0, help="loss scale for total variation")
parser.add_argument('--lambda_wd', type=float, default=0, help="loss scale")
parser.add_argument('--lambda_mesh_normal', type=float, default=0.5, help="loss scale for mesh normal smoothness")
parser.add_argument('--lambda_mesh_laplacian', type=float, default=0.5, help="loss scale for mesh laplacian")
parser.add_argument('--lambda_guidance', type=float, default=1, help="loss scale for SDS")
parser.add_argument('--lambda_rgb', type=float, default=1000, help="loss scale for RGB")
parser.add_argument('--lambda_mask', type=float, default=500, help="loss scale for mask (alpha)")
parser.add_argument('--lambda_normal', type=float, default=0, help="loss scale for normal map")
parser.add_argument('--lambda_depth', type=float, default=10, help="loss scale for relative depth")
parser.add_argument('--lambda_2d_normal_smooth', type=float, default=0, help="loss scale for 2D normal image smoothness")
parser.add_argument('--lambda_3d_normal_smooth', type=float, default=0, help="loss scale for 3D normal image smoothness")
### debugging options
parser.add_argument('--save_guidance', action='store_true', help="save images of the per-iteration NeRF renders, added noise, denoised (i.e. guidance), fully-denoised. Useful for debugging, but VERY SLOW and takes lots of memory!")
parser.add_argument('--save_guidance_interval', type=int, default=10, help="save guidance every X step")
### GUI options
parser.add_argument('--gui', action='store_true', help="start a GUI")
parser.add_argument('--W', type=int, default=800, help="GUI width")
parser.add_argument('--H', type=int, default=800, help="GUI height")
parser.add_argument('--radius', type=float, default=5, help="default GUI camera radius from center")
parser.add_argument('--fovy', type=float, default=20, help="default GUI camera fovy")
parser.add_argument('--light_theta', type=float, default=60, help="default GUI light direction in [0, 180], corresponding to elevation [90, -90]")
parser.add_argument('--light_phi', type=float, default=0, help="default GUI light direction in [0, 360), azimuth")
parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel")
parser.add_argument('--zero123_config', type=str, default='./pretrained/zero123/sd-objaverse-finetune-c_concat-256.yaml', help="config file for zero123")
parser.add_argument('--zero123_ckpt', type=str, default='./pretrained/zero123/105000.ckpt', help="ckpt for zero123")
parser.add_argument('--zero123_grad_scale', type=str, default='angle', help="whether to scale the gradients based on 'angle' or 'None'")
parser.add_argument('--dataset_size_train', type=int, default=100, help="Length of train dataset i.e. # of iterations per epoch")
parser.add_argument('--dataset_size_valid', type=int, default=8, help="# of frames to render in the turntable video in validation")
parser.add_argument('--dataset_size_test', type=int, default=100, help="# of frames to render in the turntable video at test time")
parser.add_argument('--exp_start_iter', type=int, default=None, help="start iter # for experiment, to calculate progressive_view and progressive_level")
parser.add_argument('--exp_end_iter', type=int, default=None, help="end iter # for experiment, to calculate progressive_view and progressive_level")
opt = parser.parse_args()
if opt.image_rgb_loss is None:
opt.image_rgb_loss = opt.image
if opt.default_azimuth_rgb_loss is None:
opt.default_azimuth_rgb_loss = opt.default_azimuth
if opt.O:
opt.fp16 = True
opt.cuda_ray = True
elif opt.O2:
opt.fp16 = True
opt.backbone = 'vanilla'
opt.progressive_level = True
if opt.IF:
if 'SD' in opt.guidance:
opt.guidance.remove('SD')
opt.guidance.append('IF')
opt.latent_iter_ratio = 0 # must not do as_latent
opt.images, opt.images_rgb_loss, opt.ref_radii, opt.ref_polars, opt.ref_azimuths, opt.ref_azimuths_rgb_loss, opt.zero123_ws = [], [], [], [], [], [], []
opt.default_zero123_w = 1
opt.exp_start_iter = opt.exp_start_iter or 0
opt.exp_end_iter = opt.exp_end_iter or opt.iters
# parameters for image-conditioned generation
if opt.image is not None or opt.image_config is not None:
if opt.text is None:
# use zero123 guidance model when only providing image
opt.guidance = ['zero123']
if not opt.dont_override_stuff:
opt.fovy_range = [opt.default_fovy, opt.default_fovy] # fix fov as zero123 doesn't support changing fov
opt.guidance_scale = 5
opt.lambda_3d_normal_smooth = 10
else:
# use stable-diffusion when providing both text and image
opt.guidance = ['SD', 'clip']
if not opt.dont_override_stuff:
opt.guidance_scale = 10
opt.t_range = [0.2, 0.6]
opt.known_view_interval = 2
opt.lambda_3d_normal_smooth = 20
opt.bg_radius = -1
# smoothness
opt.lambda_entropy = 1
opt.lambda_orient = 1
# latent warmup is not needed
opt.latent_iter_ratio = 0
if not opt.dont_override_stuff:
opt.albedo_iter_ratio = 0
# make shape init more stable
opt.progressive_view = True
opt.progressive_level = True
if opt.image is not None:
opt.images += [opt.image]
opt.images_rgb_loss += [opt.image_rgb_loss]
opt.ref_radii += [opt.default_radius]
opt.ref_polars += [opt.default_polar]
opt.ref_azimuths += [opt.default_azimuth]
opt.ref_azimuths_rgb_loss += [opt.default_azimuth_rgb_loss]
opt.zero123_ws += [opt.default_zero123_w]
if opt.image_config is not None:
# for multiview (zero123)
conf = pd.read_csv(opt.image_config, skipinitialspace=True)
opt.images += list(conf.image)
opt.ref_radii += list(conf.radius)
opt.ref_polars += list(conf.polar)
opt.ref_azimuths += list(conf.azimuth)
opt.zero123_ws += list(conf.zero123_weight)
if opt.image is None:
opt.default_radius = opt.ref_radii[0]
opt.default_polar = opt.ref_polars[0]
opt.default_azimuth = opt.ref_azimuths[0]
opt.default_azimuth_rgb_loss = opt.ref_azimuths_rgb_loss[0]
opt.default_zero123_w = opt.zero123_ws[0]
# reset to None
if len(opt.images) == 0:
opt.images = None
# default parameters for finetuning
if opt.dmtet:
opt.h = int(opt.h * opt.dmtet_reso_scale)
opt.w = int(opt.w * opt.dmtet_reso_scale)
opt.known_view_scale = 1
if not opt.dont_override_stuff:
opt.t_range = [0.02, 0.50] # ref: magic3D
if opt.images is not None:
opt.lambda_normal = 0
opt.lambda_depth = 0
if opt.text is not None and not opt.dont_override_stuff:
opt.t_range = [0.20, 0.50]
# assume finetuning
opt.latent_iter_ratio = 0
opt.albedo_iter_ratio = 0
opt.progressive_view = False
# opt.progressive_level = False
# record full range for progressive view expansion
if opt.progressive_view:
if not opt.dont_override_stuff:
# disable as they disturb progressive view
opt.jitter_pose = False
opt.uniform_sphere_rate = 0
# back up full range
opt.full_radius_range = opt.radius_range
opt.full_theta_range = opt.theta_range
opt.full_phi_range = opt.phi_range
opt.full_fovy_range = opt.fovy_range
if opt.backbone == 'vanilla':
from nerf.network import NeRFNetwork
elif opt.backbone == 'grid':
from nerf.network_grid import NeRFNetwork
elif opt.backbone == 'grid_tcnn':
from nerf.network_grid_tcnn import NeRFNetwork
elif opt.backbone == 'grid_taichi':
opt.cuda_ray = False
opt.taichi_ray = True
import taichi as ti
from nerf.network_grid_taichi import NeRFNetwork
taichi_half2_opt = True
taichi_init_args = {"arch": ti.cuda, "device_memory_GB": 4.0}
if taichi_half2_opt:
taichi_init_args["half2_vectorization"] = True
ti.init(**taichi_init_args)
else:
raise NotImplementedError(f'--backbone {opt.backbone} is not implemented!')
print(opt)
if opt.seed is not None:
seed_everything(int(opt.seed))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = NeRFNetwork(opt).to(device)
if opt.dmtet and opt.init_with != '':
if opt.init_with.endswith('.pth'):
# load pretrained weights to init dmtet
state_dict = torch.load(opt.init_with, map_location=device)
model.load_state_dict(state_dict['model'], strict=False)
if opt.cuda_ray:
model.mean_density = state_dict['mean_density']
model.init_tet()
else:
# assume a mesh to init dmtet (experimental, not working well now!)
import trimesh
mesh = trimesh.load(opt.init_with, force='mesh', skip_material=True, process=False)
model.init_tet(mesh=mesh)
print(model)
if opt.six_views:
guidance = None # no need to load guidance model at test
trainer = Trainer(' '.join(sys.argv), 'df', opt, model, guidance, device=device, workspace=opt.workspace, fp16=opt.fp16, use_checkpoint=opt.ckpt)
test_loader = NeRFDataset(opt, device=device, type='six_views', H=opt.H, W=opt.W, size=6).dataloader(batch_size=1)
trainer.test(test_loader, write_video=False)
if opt.save_mesh:
trainer.save_mesh()
elif opt.test:
guidance = None # no need to load guidance model at test
trainer = Trainer(' '.join(sys.argv), 'df', opt, model, guidance, device=device, workspace=opt.workspace, fp16=opt.fp16, use_checkpoint=opt.ckpt)
if opt.gui:
from nerf.gui import NeRFGUI
gui = NeRFGUI(opt, trainer)
gui.render()
else:
test_loader = NeRFDataset(opt, device=device, type='test', H=opt.H, W=opt.W, size=opt.dataset_size_test).dataloader(batch_size=1)
trainer.test(test_loader)
if opt.save_mesh:
trainer.save_mesh()
else:
train_loader = NeRFDataset(opt, device=device, type='train', H=opt.h, W=opt.w, size=opt.dataset_size_train * opt.batch_size).dataloader()
if opt.optim == 'adan':
from optimizer import Adan
# Adan usually requires a larger LR
optimizer = lambda model: Adan(model.get_params(5 * opt.lr), eps=1e-8, weight_decay=2e-5, max_grad_norm=5.0, foreach=False)
else: # adam
optimizer = lambda model: torch.optim.Adam(model.get_params(opt.lr), betas=(0.9, 0.99), eps=1e-15)
if opt.backbone == 'vanilla':
scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, lambda iter: 0.1 ** min(iter / opt.iters, 1))
else:
scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, lambda iter: 1) # fixed
# scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, lambda iter: 0.1 ** min(iter / opt.iters, 1))
guidance = nn.ModuleDict()
if 'SD' in opt.guidance:
from guidance.sd_utils import StableDiffusion
guidance['SD'] = StableDiffusion(device, opt.fp16, opt.vram_O, opt.sd_version, opt.hf_key, opt.t_range)
if 'IF' in opt.guidance:
from guidance.if_utils import IF
guidance['IF'] = IF(device, opt.vram_O, opt.t_range)
if 'zero123' in opt.guidance:
from guidance.zero123_utils import Zero123
guidance['zero123'] = Zero123(device=device, fp16=opt.fp16, config=opt.zero123_config, ckpt=opt.zero123_ckpt, vram_O=opt.vram_O, t_range=opt.t_range, opt=opt)
if 'clip' in opt.guidance:
from guidance.clip_utils import CLIP
guidance['clip'] = CLIP(device)
trainer = Trainer(' '.join(sys.argv), 'df', opt, model, guidance, device=device, workspace=opt.workspace, optimizer=optimizer, ema_decay=0.95, fp16=opt.fp16, lr_scheduler=scheduler, use_checkpoint=opt.ckpt, scheduler_update_every_step=True)
trainer.default_view_data = train_loader._data.get_default_view_data()
if opt.gui:
from nerf.gui import NeRFGUI
gui = NeRFGUI(opt, trainer, train_loader)
gui.render()
else:
valid_loader = NeRFDataset(opt, device=device, type='val', H=opt.H, W=opt.W, size=opt.dataset_size_valid).dataloader(batch_size=1)
test_loader = NeRFDataset(opt, device=device, type='test', H=opt.H, W=opt.W, size=opt.dataset_size_test).dataloader(batch_size=1)
max_epoch = np.ceil(opt.iters / len(train_loader)).astype(np.int32)
trainer.train(train_loader, valid_loader, test_loader, max_epoch)
if opt.save_mesh:
trainer.save_mesh()
| [] |
2024-01-10 | ronaldosc/instruct-pix2pix | dataset_creation~generate_txt_dataset.py | from __future__ import annotations
import json
import time
from argparse import ArgumentParser
from pathlib import Path
from typing import Optional
import datasets
import numpy as np
import openai
from tqdm.auto import tqdm
DELIMITER_0 = "\n##\n"
DELIMITER_1 = "\n%%\n"
STOP = "\nEND"
def generate(
openai_model: str,
caption: str,
num_retries: int = 3,
max_tokens: int = 256,
temperature: float = 0.7,
top_p: float = 1.0,
frequency_penalty: float = 0.1,
presence_penalty: float = 0.0,
sleep_on_error: float = 1.0,
) -> Optional[tuple[str, str]]:
for _ in range(1 + num_retries):
try:
response = openai.Completion.create(
model=openai_model,
prompt=caption + DELIMITER_0,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
stop=[STOP],
)
except Exception as e:
print(e)
time.sleep(sleep_on_error)
continue
output = response["choices"][0]["text"].split(DELIMITER_1)
if len(output) == 2:
instruction, edited_caption = output
results = openai.Moderation.create([instruction, edited_caption])["results"]
if results[0]["flagged"] or results[1]["flagged"]:
continue
if caption.strip().strip(".!?").lower() != edited_caption.strip().strip(".!?").lower():
return instruction, edited_caption
def main(openai_model: str, num_samples: int, num_partitions: int, partition: int, seed: int):
dataset = datasets.load_dataset("ChristophSchuhmann/improved_aesthetics_6.5plus", split="train")
# Other datasets we considered that may be worth trying:
# dataset = datasets.load_dataset("ChristophSchuhmann/MS_COCO_2017_URL_TEXT", split="train")
# dataset = datasets.load_dataset("laion/laion-coco", split="train")
np.random.seed(seed)
permutation = np.array_split(np.random.permutation(len(dataset)), num_partitions)[partition]
dataset = dataset[permutation]
captions = dataset["TEXT"]
urls = dataset["URL"]
output_path = f"data/dataset=laion-aesthetics-6.5_model={openai_model}_samples={num_samples}_partition={partition}.jsonl" # fmt: skip
print(f"Prompt file path: {output_path}")
count = 0
caption_set = set()
url_set = set()
if Path(output_path).exists():
with open(output_path, "r") as f:
for line in tqdm(f, desc="Resuming from existing prompts"):
prompt = json.loads(line)
if prompt["caption"] not in caption_set and prompt["url"] not in url_set:
caption_set.add(prompt["caption"])
url_set.add(prompt["url"])
count += 1
with open(output_path, "a") as fp:
with tqdm(total=num_samples - count, desc="Generating instructions and edited captions") as progress_bar:
for caption, url in zip(captions, urls):
if caption in caption_set or url in url_set:
continue
if openai.Moderation.create(caption)["results"][0]["flagged"]:
continue
edit_output = generate(openai_model, caption)
if edit_output is not None:
edit, output = edit_output
fp.write(f"{json.dumps(dict(caption=caption, edit=edit, output=output, url=url))}\n")
count += 1
progress_bar.update()
caption_set.add(caption)
url_set.add(url)
if count == num_samples:
break
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--openai-api-key", required=True, type=str)
parser.add_argument("--openai-model", required=True, type=str)
parser.add_argument("--num-samples", default=10000, type=int)
parser.add_argument("--num-partitions", default=1, type=int)
parser.add_argument("--partition", default=0, type=int)
parser.add_argument("--seed", default=0, type=int)
args = parser.parse_args()
openai.api_key = args.openai_api_key
main(args.openai_model, args.num_samples, args.num_partitions, args.partition, args.seed)
| [
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | aarizat/packcircles4bims | circlespacking.py | # -*- coding: utf-8 -*-
"""
Module to define particular circular tangents in a closed polygon in
:math:`\\mathbb{R}^2`.
"""
# %%
class pckCirclesInPolygon:
'''Creates an instance of an object that defines circular particles tangent
in a fractal way inside of a closed polygon in :math:`\\mathbb{R}^2`.
Attributes:
coordinates ((n, 2) `numpy.ndarray`): Coordinates of vertices of the\
polygon.
depth (`int`): Depth fractal for each triangle that compose the\
triangular mesh. If this number is not given, then,\
the fractal generation of circles is done up to a circle\
reachs a radius to lower than the five percent of the\
incircle radius. Large values of `depth` might produce internal\
variables that tend to infinte, then a\
``ValueError`` is produced with a warning message\
``array must not contain infs or NaNs``.
Note:
The class ``pckCirclesInPolygon`` requires\
`NumPy <http://www.numpy.org/>`_,\
`Matplotlib <https://matplotlib.org/>`_ and\
`Triangle <http://dzhelil.info/triangle/>`_
Examples:
>>> from numpy import array
>>> from circlespacking import pckCirclesInPolygon
>>> coords = array([[1, 1], [2, 5], [4.5, 6], [8, 3], [7, 1], [4, 0]])
>>> pckCircles = pckCirclesInPolygon(coords)
>>> pckCircles.__dict__.keys()
dict_keys(['coordinates', 'depth', 'CDT', 'listCircles'])
'''
def __init__(self, coordinates, depth=None):
'''Method for initializing the attributes of the class.'''
self.coordinates = coordinates
self.depth = depth
# initializing methods
self.trianglesMesh()
self.generator()
def trianglesMesh(self):
'''Method to generate a triangles mesh in a polygon by using
`Constrained Delaunay triangulation\
<https://en.wikipedia.org/wiki/Constrained_Delaunay_triangulation>`_.
Return:
verts ((n, 3, 2) `numpy.ndarray`): Vertices of each triangle that\
compose the triangular mesh. n means the number of triangles;\
(3, 2) means the index vertices and the coordinates (x, y)\
respectively.
Examples:
>>> from numpy import array
>>> from basegeometry import Polygon
>>> from circlespacking import pckCirclesInPolygon
>>> coordinates = array([[1, 1], [2, 5], [4.5, 6], [6, 4], [8, 3],
[7, 1], [4.5, 1], [4, 0]])
>>> polygon = Polygon(coordinates)
>>> boundCoords = polygon.boundCoords
>>> pckCircles = pckCirclesInPolygon(boundCoords)
>>> verts = pckCircles.trianglesMesh()
>>> from numpy import array
>>> from basegeometry import Polygon
>>> from circlespacking import pckCirclesInPolygon
>>> coordinates = array([[2, 2], [2, 6], [8, 6], [8, 2]])
>>> polygon = Polygon(coordinates)
>>> boundCoords= polygon.boundCoords
>>> pckCircles = pckCirclesInPolygon(boundCoords)
>>> verts = pckCircles.trianglesMesh()
'''
import numpy as np
from triangle import triangulate
# polygon area by applying the gauss equation
area = 0.5*abs(sum(self.coordinates[:-1, 0] * self.coordinates[1:, 1] -
self.coordinates[:-1, 1] * self.coordinates[1:, 0]))
index = np.arange(len(self.coordinates[:-1]))
indexSegmts = np.column_stack((index, np.hstack((index[1:], [0]))))
# Max area of the triangles in the Constrained Delaunay triangulation
maxArea = np.random.uniform(0.25 * area)
steinerPts = np.random.uniform(5, 50)
# constrained Delaunay triangulation
self.CDT = triangulate(tri={'vertices': self.coordinates[:-1],
'segments': indexSegmts},
opts='pq20a'+str(maxArea)+'S'+str(steinerPts))
vertsIndex = self.CDT['vertices']
trianglesIndex = self.CDT['triangles']
verts = vertsIndex[trianglesIndex]
return verts
def generator(self):
'''Method to generate circular particles in each triangle of the
triangular mesh.
Returns:
listCircles (`list` of Circle objects): `list` that contain all\
the circles object packed in the polygon.
Examples:
>>> from numpy import array
>>> from circlespacking import pckCirclesInPolygon
>>> coords = array([[2, 2], [2, 6], [8, 6], [8, 2]])
>>> pckCircles = pckCirclesInPolygon(coords)
>>> lstCircles = pckCircles.generator() # list of circles
'''
from basegeometry import Triangle
vertsTriangles = self.trianglesMesh() # Triangles mesh in polygon
self.listCircles = list()
for vert in vertsTriangles:
self.listCircles += Triangle(vert).packCircles(depth=self.depth,
want2plot=False)
return self.listCircles
def plot(self, plotTriMesh=False):
'''Method for show a graphic of the circles generated within of the
polyhon.
Parameters:
plotTriMesh (`bool`): Variable to check if it also want to show\
the graph of the triangles mesh. The default value is ``False``
Examples:
.. plot::
from numpy import array
from basegeometry import Polygon
from circlespacking import pckCirclesInPolygon
coordinates = array([[1, 1], [2, 5], [4.5, 6], [8, 3], [7, 1],
[4, 0]])
polygon = Polygon(coordinates)
boundCoords = polygon.boundCoords
pckCircles = pckCirclesInPolygon(boundCoords, depth=5)
pckCircles.plot(plotTriMesh=True)
>>> from numpy import array
>>> from basegeometry import Polygon
>>> from circlespacking import pckCirclesInPolygon
>>> coordinates = array([[1, 1], [2, 5], [4.5, 6], [6, 4], [8, 3],
[7, 1], [4.5, 1], [4, 0]])
>>> polygon = Polygon(coordinates)
>>> boundCoords = polygon.boundCoords
>>> pckCircles = pckCirclesInPolygon(boundCoords)
>>> pckCircles.plot()
>>> from slopegeometry import AnthropicSlope
>>> from circlespacking import pckCirclesInPolygon
>>> slopeGeometry = AnthropicSlope(12, [1, 1.5], 10, 10)
>>> boundCoords = slopeGeometry.boundCoords
>>> pckCircles = pckCirclesInPolygon(boundCoords)
>>> pckCircles.plot(plotTriMesh=True)
.. plot::
from numpy import array
from slopegeometry import NaturalSlope
from circlespacking import pckCirclesInPolygon
surfaceCoords = array([[-2.4900, 18.1614],
[0.1022, 17.8824],
[1.6975, 17.2845],
[3.8909, 15.7301],
[5.8963, 14.3090],
[8.1183, 13.5779],
[9.8663, 13.0027],
[13.2865, 3.6058],
[20.2865, 3.6058],
[21.4347, 3.3231],
[22.2823, 2.7114],
[23.4751, 2.2252],
[24.6522, 1.2056],
[25.1701, 0.2488]])
slopeGeometry = NaturalSlope(surfaceCoords)
boundCoords = slopeGeometry.boundCoords
pckCircles = pckCirclesInPolygon(boundCoords)
pckCircles.plot(plotTriMesh=True)
'''
import numpy as np
import matplotlib.pyplot as plt
from triangle import plot as tplot
# plotting
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(np.hstack((self.coordinates[:, 0], self.coordinates[0, 0])),
np.hstack((self.coordinates[:, 1], self.coordinates[0, 1])),
'-k', lw=1.5, label='Polygon')
ax.axis('equal')
ax.set_xlabel('$x$ distance')
ax.set_ylabel('$y$ distance')
ax.grid(ls='--', lw=0.5)
for circle in self.listCircles:
ax.add_patch(plt.Circle(circle.center, circle.radius, fill=False,
lw=1, ec='black'))
# plotting triangular mesh
if plotTriMesh:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.grid(ls='--', lw=0.5)
tplot.plot(ax, **self.CDT)
ax.axis('equal')
return
def frecuencyHist(self):
'''Method to show the histogram of the diameters of the circular
particles packed in a closed polygon in :math:`\\mathbb{R}^2`.
Examples:
.. plot::
from numpy import array
from basegeometry import Polygon
from circlespacking import pckCirclesInPolygon
coordinates = array([[1, 1], [2, 5], [4.5, 6], [6, 4], [8, 3],
[7, 1], [4.5, 1], [4, 0]])
polygon = Polygon(coordinates)
boundCoords = polygon.boundCoords
pckCircles = pckCirclesInPolygon(boundCoords, 10)
pckCircles.frecuencyHist()
'''
import numpy as np
import math
import matplotlib.pyplot as plt
# Obtaining diameters histogram
n = len(self.listCircles) # simple size
# Number of bins according to Sturges equation
numBins = math.floor(1 + math.log(n, 2))
diams = [circle.diameter for circle in self.listCircles]
bins = np.linspace(min(diams), max(diams), numBins)
# plotting
plt.style.use('seaborn-white')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(diams, bins)
ax.grid(ls='--', lw=0.5)
ax.set_xlabel('Diameters')
ax.set_ylabel('Frecuency')
return
def loglogDiagram(self):
'''Method to show the log-log graph of the diameters and quantities
of circular particles packed in a closed polygon in
:math:`\\mathbb{R}^2`.
Examples:
.. plot::
from numpy import array
from basegeometry import Polygon
from circlespacking import pckCirclesInPolygon
coordinates = array([[1, 1], [2, 5], [4.5, 6], [6, 4], [8, 3],
[7, 1], [4.5, 1], [4, 0]])
polygon = Polygon(coordinates)
boundCoords = polygon.boundCoords
pckCircles = pckCirclesInPolygon(boundCoords, 10)
pckCircles.loglogDiagram()
'''
import matplotlib.pyplot as plt
import numpy as np
import math
# Obtaining diameters histogram
n = len(self.listCircles) # simple size
# Number of bins according to Sturges equation
numBins = math.floor(1 + math.log(n, 2))
diams = [circle.diameter for circle in self.listCircles]
bins = np.linspace(min(diams), max(diams), numBins)
hist, binEdges = np.histogram(diams, bins)
nonZeroIndx = [i for i, k in enumerate(hist) if k != 0]
histRed = hist[nonZeroIndx]
histRedRel = [float(k)/n * 100 for k in histRed]
nonZeroIndx4Bins = [k+1 for k in nonZeroIndx]
binEdgesRed = binEdges[nonZeroIndx4Bins]
d = binEdgesRed
nD = histRedRel
# plotting
fig = plt.figure()
ax = fig.add_subplot(111)
ax.loglog(d, nD, 'ko', basex=2)
ax.grid(ls='--', lw=0.5)
return
# %%
'''
BSD 2 license.
Copyright (c) 2018, Universidad Nacional de Colombia, Andres Ariza-Triana
and Ludger O. Suarez-Burgoa.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
| [] |
2024-01-10 | aarizat/packcircles4bims | docs~slopegeometry-1.py | from slopegeometry import AnthropicSlope
slopeGeometry = AnthropicSlope(12, [1, 1.5], 10, 10)
slopeGeometry.plotSlope() | [] |
2024-01-10 | lukerowen/Digital_Prophecies | Python%20Server~Oracle_Server.py | from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import unquote
import os
import openai
HOST = ''
PORT = 8080
# api_key = "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"
# org_id = "org-EdKL0QhYTM5c8KGxQ5XwtwjS"
Oracle_Prompt = "You are to act as the Oracle of Delphi from Greek mythology. " \
"You are not a digital assistant. " \
"You are only the Oracle of Delphi" \
"You should structure all of your responses as she would. " \
"The answer's you give should sound like something she either has said or would say. " \
"Use the mythology as the basis for determining what she would say. " \
"You are allowed to use all information available to you when creating responses. " \
"Answer all questions in a similar matter as she would. " \
"Do not provide an explanation to your answer. " \
"When you are ready, greet me."
model = "gpt-3.5-turbo"
openai.api_key = os.getenv("OPENAI_API_KEY")
gpt = openai.Model.retrieve(model)
messages = [{"role": "system", "content": Oracle_Prompt}]
def getCompletion(user_input):
if user_input == "AdminResetChatLog":
global messages
messages = [{"role": "system", "content": Oracle_Prompt}]
else:
messages.append({"role": "user", "content": user_input})
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
oracle_response = completion["choices"][0]["message"]["content"]
messages.append({"role": "assistant", "content": oracle_response})
return oracle_response
class OracleServer(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes("<html><body><h1>ORACLE SERVER IS RUNNING</h1></body></html>", "utf-8"))
def do_POST(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
content_length = int(self.headers.get('content-length', 0))
# Read in the input text, parse it out, convert it to normal words, remove leading '='
user_input = unquote(self.rfile.read(content_length).decode())[1:]
print(user_input)
oracle_response = getCompletion(user_input)
# self.wfile.write(oracle_response)
self.wfile.write(bytes(oracle_response, "utf-8"))
print("Server now running...")
HTTPServer((HOST, PORT), OracleServer).serve_forever()
print("Server Stopped")
# Thank you to : https://www.youtube.com/watch?v=DeFST8tvtuI | [
"You are to act as the Oracle of Delphi from Greek mythology. You are not a digital assistant. You are only the Oracle of DelphiYou should structure all of your responses as she would. The answer's you give should sound like something she either has said or would say. Use the mythology as the basis for determining what she would say. You are allowed to use all information available to you when creating responses. Answer all questions in a similar matter as she would. Do not provide an explanation to your answer. When you are ready, greet me."
] |
2024-01-10 | emmahuang03/trash-identifier | src~gptimpactandsuggestions.py | import openai
from keys import *
from chatgpt import categorizeObject
def giveImpactAndSuggestionsOpenai():
openai.api_key = api_key_openai
openai.api_base = api_base_openai
openai.api_type = 'azure'
openai.api_version = '2023-05-15' # this may change in the future
deployment_name='openai-gpt35'
# set the role of AI assistant and give examples
role = {"role": "system", "content": "You are an AI assistant that helps people understand the impact of their choices when recycling, trashing items, and composting them. You also sometimes give information on what they could do to make better choices."}
q1 = {"role": "user", "content": "give me specific stats for recycling a water bottle in 5 bullet points"}
a1 = {"role": "assistant", "content": "1. Recycling a water bottle conserves approximately 0.15 kg (0.33 lbs) of plastic material.\n2. Recycling a single water bottle saves about 16-24 MJ of energy.\n3. Recycling a water bottle helps avoid emitting approximately 0.3-0.45 kg (0.66-1 lb) of carbon dioxide equivalent (CO2e).\n4. Recycling a water bottle reduces landfill waste by approximately 0.15 kg (0.33 lbs).\n5. Recycling a water bottle contributes to the conservation of natural resources, such as petroleum, which is used to make plastic."}
q2 = {"role": "user", "content": "give me specific stats for composting an apple in 5 bullet points"}
a2 = {"role": "assistant", "content": "1. Composting a single apple diverts approximately 0.15 kg (0.33 lbs) of organic waste from landfills.\n2. The decomposition of an apple through composting produces nutrient-rich compost, typically around 0.01 kg (0.02 lbs) in weight.\n3. Composting an apple helps reduce greenhouse gas emissions, as organic waste in landfills produces methane, a potent greenhouse gas.\n4. The compost generated from an apple enriches the soil with essential nutrients, such as potassium, phosphorus, and nitrogen.\n5. Incorporating compost into soil enhances its water-holding capacity, promoting better moisture retention and reducing the need for excessive irrigation."}
q3 = {"role": "user", "content": "give me specific stats on the problem with throwing clothes in the trash in 3 bullet points and give me ways to avoid throwing clothes in the trash in 3 bullet points"}
a3 = {"role": "assistant", "content": "Stats on the problem with throwing clothes in the trash:\n1. Textile Waste Volume: Globally, approximately 92 million tons of textile waste is generated each year, with the equivalent of one garbage truck of textiles being landfilled or burned every second. (Source: Ellen MacArthur Foundation)\n2. Low Recycling Rates: Only about 13% of the total fiber input used for clothing is recycled globally, meaning the majority of discarded clothes end up in landfills or incinerators. (Source: Pulse of the Fashion Industry Report 2020)\n3. Environmental Impact: The fashion industry accounts for about 10% of global greenhouse gas emissions, more than international flights and maritime shipping combined. Additionally, clothing production uses an estimated 1.5 trillion liters of water annually. (Source: World Resources Institute)\nWays to avoid throwing clothes in the trash:\n1.Donate: Instead of throwing clothes away, consider donating them to local charities, thrift stores, or clothing banks. Many people can benefit from gently used clothing.\n2.Repair and repurpose: Extend the lifespan of your clothes by repairing them when they get damaged or giving them a new purpose through DIY projects, such as turning old t-shirts into rags or creating new items from old ones.\n3.Sell or swap: Consider hosting a clothing swap with friends or using online platforms to sell or exchange unwanted clothing items. This way, you can give your clothes a new home while reducing waste."}
# Send a completion call to generate an answer
image_detection_res = categorizeObject()
method_of_disposal = image_detection_res[0]
user_input = image_detection_res[1]
if method_of_disposal == "recycling":
user_q = {"role": "user", "content": "give me specific stats for recycling a {} in 5 bullet points".format(user_input)}
elif method_of_disposal == "compost":
user_q = {"role": "user", "content": "give me specific stats for composting a {} in 5 bullet points".format(user_input)}
elif method_of_disposal == "landfill trash":
user_q = {"role": "user", "content": "give me specific stats on the problem with throwing {0} in the trash in 3 bullet points and give me ways to avoid throwing {1} in the trash in 3 bullet points".format(user_input, user_input)}
else:
user_q = {"role": "user", "content": "why special processing facility?"}
response = openai.ChatCompletion.create(
engine=deployment_name,
messages=[
role,
q1, a1,
q2, a2,
q3, a3,
user_q
]
)
answer = response['choices'][0]['message']['content']
print(answer)
return answer
def giveImpactAndSuggestions():
return giveImpactAndSuggestionsOpenai()
| [
"You are an AI assistant that helps people understand the impact of their choices when recycling, trashing items, and composting them. You also sometimes give information on what they could do to make better choices.",
"1. Composting a single apple diverts approximately 0.15 kg (0.33 lbs) of organic waste from landfills.\n2. The decomposition of an apple through composting produces nutrient-rich compost, typically around 0.01 kg (0.02 lbs) in weight.\n3. Composting an apple helps reduce greenhouse gas emissions, as organic waste in landfills produces methane, a potent greenhouse gas.\n4. The compost generated from an apple enriches the soil with essential nutrients, such as potassium, phosphorus, and nitrogen.\n5. Incorporating compost into soil enhances its water-holding capacity, promoting better moisture retention and reducing the need for excessive irrigation.",
"give me specific stats on the problem with throwing PLACEHOLDER in the trash in 3 bullet points and give me ways to avoid throwing PLACEHOLDER in the trash in 3 bullet points",
"give me specific stats for recycling a PLACEHOLDER in 5 bullet points",
"1. Recycling a water bottle conserves approximately 0.15 kg (0.33 lbs) of plastic material.\n2. Recycling a single water bottle saves about 16-24 MJ of energy.\n3. Recycling a water bottle helps avoid emitting approximately 0.3-0.45 kg (0.66-1 lb) of carbon dioxide equivalent (CO2e).\n4. Recycling a water bottle reduces landfill waste by approximately 0.15 kg (0.33 lbs).\n5. Recycling a water bottle contributes to the conservation of natural resources, such as petroleum, which is used to make plastic.",
"give me specific stats for recycling a water bottle in 5 bullet points",
"[PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, PLACEHOLDER]",
"give me specific stats for composting a PLACEHOLDER in 5 bullet points",
"Stats on the problem with throwing clothes in the trash:\n1. Textile Waste Volume: Globally, approximately 92 million tons of textile waste is generated each year, with the equivalent of one garbage truck of textiles being landfilled or burned every second. (Source: Ellen MacArthur Foundation)\n2. Low Recycling Rates: Only about 13% of the total fiber input used for clothing is recycled globally, meaning the majority of discarded clothes end up in landfills or incinerators. (Source: Pulse of the Fashion Industry Report 2020)\n3. Environmental Impact: The fashion industry accounts for about 10% of global greenhouse gas emissions, more than international flights and maritime shipping combined. Additionally, clothing production uses an estimated 1.5 trillion liters of water annually. (Source: World Resources Institute)\nWays to avoid throwing clothes in the trash:\n1.Donate: Instead of throwing clothes away, consider donating them to local charities, thrift stores, or clothing banks. Many people can benefit from gently used clothing.\n2.Repair and repurpose: Extend the lifespan of your clothes by repairing them when they get damaged or giving them a new purpose through DIY projects, such as turning old t-shirts into rags or creating new items from old ones.\n3.Sell or swap: Consider hosting a clothing swap with friends or using online platforms to sell or exchange unwanted clothing items. This way, you can give your clothes a new home while reducing waste.",
"give me specific stats on the problem with throwing clothes in the trash in 3 bullet points and give me ways to avoid throwing clothes in the trash in 3 bullet points",
"why special processing facility?",
"give me specific stats for composting an apple in 5 bullet points"
] |
2024-01-10 | GeneralKugelBlitz/StoryGPT_agent | llms.py | from langchain import Clarifai
llm=Clarifai(pat='YOUR_API_KEY',user_id="meta",app_id="Llama-2", model_id="llama2-70b-chat") | [] |
2024-01-10 | wjkaufman/rl_pulse | rl_pulse~legacy~rl_pulse_base.py | """
Actor and critic classes adapted from
https://pemami4911.github.io/blog/2016/08/21/ddpg-rl.html
"""
import numpy as np
import scipy.linalg as spla
import random
from collections import deque
import tensorflow as tf
import tensorflow.keras as keras
import tensorflow.keras.layers as layers
import spin_simulation as ss
class Action:
def __init__(self, action, type='discrete'):
"""
Arguments:
action: If it's discrete, then the action encoding should be an
array of size 1*numActions. If it's continuous, then action
should be a tuple (phi, rot, time).
type: Either 'discrete' or 'continuous'.
"""
self.action = action
self.type = type
def __repr__(self):
return f'phi: {self.getPhi()/np.pi:.02f}pi,'+\
f'rot: {self.getRot()/np.pi:.02f}pi,'+\
f'dt: {self.getTime()/1e-6:.02f} microsec'
def getPhi(self):
"""Get the angle phi that specifies the axis of rotation in the
xy-plane. Should be a value in [0,2*pi].
"""
if self.type == 'discrete':
ind = np.nonzero(self.action)[0]
if ind.size > 0:
ind = ind[0]
else: # the action is null
return 0.
if ind in [0,1]: # X, Xbar
return 0.
elif ind in [2,3]: # Y, Ybar
return np.pi/2
elif ind == 4: # nothing
return 0.
elif self.type == 'continuous':
return np.mod(self.action[0] * np.pi/2, 2*np.pi)
def getRot(self):
"""Get the rotation angle from the action. Can be positive or negative.
"""
if self.type == 'discrete':
ind = np.nonzero(self.action)[0]
if ind.size > 0:
ind = ind[0]
else: # the action is null
return 0.
if ind in [0,2]:
return np.pi/2
elif ind in [1,3]:
return -np.pi/2
elif ind == 4:
return 0.
elif self.type == 'continuous':
return self.action[1] * 2*np.pi
def getTime(self):
"""Get the time (in seconds) from the action encoding.
Ideally want action-time mappings to be 0 -> 0, 1 -> 5e-6.
"""
if self.type == 'discrete':
ind = np.nonzero(self.action)[0]
if ind.size > 0:
ind = ind[0]
else: # the action is null
return 0.
if ind in [0,1,2,3]:
return 0.
elif ind == 4:
return 5e-6
elif self.type == 'continuous':
# return 10.0**((a[..., 2])*1.70757 - 7) -1e-7
return 10.0**((self.action[2]+1)*0.853785 - 7) -1e-7
def format(self):
if self.type == 'discrete':
ind = np.nonzero(self.action)[0]
if ind.size > 0:
ind = ind[0]
else:
return ''
return ['X', 'Xbar', 'Y', 'Ybar', 'delay'][ind]
else:
if self.getRot() != 0:
# non-zero rotation
return f"phi={self.getPhi()/np.pi:.02f}pi, " + \
f" rot={self.getRot()/np.pi:.02f}pi, " + \
f"t={self.getTime()*1e6:.02f} microsec"
else:
# no rotation -> delay
if self.getTime() != 0:
return f'delay, t={self.getTime()*1e6:.02f} microsec'
else:
# no rotation, no time
return ''
def clip(self):
"""Clip the action to give physically meaningful information.
"""
if self.type == 'continuous':
self.action = np.array([np.clip(self.action[0], -1, 1), \
np.clip(self.action[1], -1, 1), \
np.clip(self.action[2], -1, 1)])
def print(self):
print(self.format())
def get_propagator(self, N, dim, H, discretePropagators=None):
"""Convert an action a into the RF Hamiltonian H.
TODO: change the action encoding to (phi, strength, t) to more easily
constrain relevant parameters (minimum time, maximum strength)
Arguments:
a: Action performed on the system. The action is a 1x3 array
containing the relevant information for a rotation over some
time.
H: Time-independent Hamiltonian.
Returns:
The propagator U corresponding to the time-independent Hamiltonian and
the RF pulse
"""
# TODO make getting propagator easier for discrete actions
if self.type == 'discrete':
ind = np.nonzero(self.action)[0][0]
return discretePropagators[ind]
elif self.type == 'continuous':
J = ss.getAngMom(np.pi/2, self.getPhi(), N, dim)
rot = self.getRot()
time = self.getTime()
return spla.expm(-1j*(H*time + J*rot))
def formatActions(actions, type='discrete'):
"""Format a list of actions nicely
"""
str = ''
i=0
for a in actions:
strA = Action(a, type=type).format()
if strA != '':
str += f'{i}: ' + strA + '\n'
i += 1
return str
class Environment(object):
def __init__(self, N, dim, coupling, delta, sDim, Htarget, X, Y,\
type='discrete', delay=5e-6, delayAfter=False):
"""Initialize a new Environment object
Arguments:
delayAfter: Should there be a delay after every pulse/delay?
"""
self.N = N
self.dim = dim
self.coupling = coupling
self.delta = delta
self.Htarget = Htarget
self.X = X
self.Y = Y
self.sDim = sDim
self.type = type
self.delay = delay
self.delayAfter = delayAfter
self.reset()
def makeDiscretePropagators(self):
"""Make a discrete number of propagators so that I'm not re-calculating
the propagators over and over again.
To simplify calculations, define each action as a pulse (or no pulse)
followed by a delay
"""
Udelay = spla.expm(-1j*(self.Hint*self.delay))
Ux = spla.expm(-1j*(self.X*np.pi/2))
Uxbar = spla.expm(-1j*(self.X*-np.pi/2))
Uy = spla.expm(-1j*(self.Y*np.pi/2))
Uybar = spla.expm(-1j*(self.Y*-np.pi/2))
if self.delayAfter:
Ux = Udelay @ Ux
Uxbar = Udelay @ Uxbar
Uy = Udelay @ Uy
Uybar = Udelay @ Uybar
self.discretePropagators = [Ux, Uxbar, Uy, Uybar, Udelay]
def reset(self, randomize=True):
"""Resets the environment by setting all propagators to the identity
and setting t=0
"""
# randomize dipolar couplings and get Hint
if randomize:
_, self.Hint = ss.get_H(self.N, self.dim, \
self.coupling, self.delta)
# initialize propagators to delay
if self.delayAfter:
self.Uexp = ss.get_propagator(self.Hint, self.delay)
self.Utarget = ss.get_propagator(self.Htarget, self.delay)
else:
self.Uexp = np.eye(self.dim, dtype="complex128")
self.Utarget = np.copy(self.Uexp)
# initialize time
self.t = 0
if self.delayAfter:
self.t += self.delay
# for network training, define the "state" (sequence of actions)
self.state = np.zeros((32, self.sDim), dtype="float32")
# depending on time encoding, need to set this so that t=0
if self.type == 'continuous':
self.state[:,2] = -1
self.tInd = 0 # keep track of time index in state
# and recalculate propagators if discrete
if self.type == 'discrete':
self.makeDiscretePropagators()
def copy(self):
"""Return a copy of the environment
"""
return Environment(self.N, self.dim, self.coupling, self.delta, \
self.sDim, self.Htarget, self.X, self.Y, type=self.type, \
delay=self.delay, delayAfter=self.delayAfter)
def getState(self):
return np.copy(self.state)
def act(self, action):
"""Evolve the environment corresponding to an action and the
time-independent Hamiltonian
Arguments:
action: An instance of Action class.
"""
# TODO change below when doing finite pulse widths/errors
if self.delayAfter:
dt = self.delay
else:
dt = action.getTime()
if self.tInd < np.size(self.state, 0):
if self.type == 'discrete':
self.Uexp = action.get_propagator(self.N, self.dim, self.Hint, \
self.discretePropagators) @ self.Uexp
else:
self.Uexp = action.get_propagator(self.N, self.dim, self.Hint) \
@ self.Uexp
if dt > 0:
self.Utarget = ss.get_propagator(self.Htarget, dt) @ \
self.Utarget
self.t += dt
self.state[self.tInd,:] = action.action
self.tInd += 1
else:
print('ran out of room in state array, not evolving state')
print(self.state)
print(f'tInd: {self.tInd}, t: {self.t}')
def reward(self):
return -1.0 * (self.t > 1e-6) * \
np.log10((1-ss.fidelity(self.Utarget,self.Uexp))+1e-100)
# isTimeGood = 1/(1 + np.exp((15e-6-self.t)/2e-6))
# return -1.0 * isTimeGood * np.log10((1 - \
# np.power(ss.fidelity(self.Utarget, self.Uexp), 20e-6/self.t)) + \
# 1e-100)
#
# isTimeGood = self.t >= 15e-6
# isDelay = 1-self.state[self.tInd-1,1] # if there's no rotation
# return -1.0 * isTimeGood * isDelay * np.log10((1 - \
# np.power(ss.fidelity(self.Utarget, self.Uexp), 20e-6/self.t)) + \
# 1e-100)
def isDone(self):
"""Returns true if the environment has reached a certain time point
or once the number of state variable has been filled
TODO modify this when I move on from constrained (4-pulse) sequences
"""
return self.tInd >= np.size(self.state, 0)
class NoiseProcess(object):
"""A noise process that can have temporal autocorrelation
Scale should be a number between 0 and 1.
TODO need to add more sophisticated noise here...
"""
def __init__(self, scale):
self.scale = scale
def copy(self):
"""Copy noise process
"""
return NoiseProcess(self.scale)
def getNoise(self):
return np.array( \
[np.random.normal(loc=0, scale=.05*self.scale) + \
np.random.choice([-1,-.5,.5,1,0], \
p=[self.scale/4,self.scale/4,self.scale/4,self.scale/4,\
1-self.scale]), \
np.random.normal(loc=0, scale=.05*self.scale) + \
np.random.choice([-1,-.5,.5,1,0], \
p=[self.scale/4,self.scale/4,self.scale/4,self.scale/4,\
1-self.scale]), \
np.random.normal(loc=0, scale=.05*self.scale) + \
np.random.choice([-.5,.5,0], \
p=[self.scale/2,self.scale/2,1-self.scale])])
class ReplayBuffer(object):
"""Define a ReplayBuffer object to store experiences for training
"""
def __init__(self, bufferSize):
self.bufferSize = bufferSize
self.size = 0
self.buffer = deque()
def add(self, s, a, r, s1, d):
"""Add an experience to the buffer.
If the buffer is full, pop an old experience and
add the new experience.
Arguments:
s: Old environment state on which action was performed
a: Action performed
r: Reward from state-action pair
s1: New environment state from state-action pair
d: Is s1 terminal (if so, end the episode)
"""
exp = (s,a,r,s1,d)
if self.size < self.bufferSize:
self.buffer.append(exp)
self.size += 1
else:
self.buffer.popleft()
self.buffer.append(exp)
def getSampleBatch(self, batchSize, powerOfTwo=True):
"""Get a sample batch from the replayBuffer
Arguments:
batchSize: Size of the sample batch to return. If the replay buffer
doesn't have batchSize elements, return the entire buffer
powerOfTwo: Boolean, whether to return a sample batch of size 2^n.
Returns:
A tuple of arrays (states, actions, rewards, new states, and d)
"""
batch = []
size = np.minimum(self.size, batchSize)
if powerOfTwo:
size = int(2**(np.floor(np.log2(size))))
batch = random.sample(self.buffer, size)
sBatch = np.array([_[0] for _ in batch])
aBatch = np.array([_[1] for _ in batch])
rBatch = np.array([_[2] for _ in batch])
s1Batch = np.array([_[3] for _ in batch])
dBatch = np.array([_[4] for _ in batch])
return sBatch, aBatch, rBatch, s1Batch, dBatch
def clear(self):
self.buffer.clear()
self.size = 0
def mutateMat(mat, mutateStrength=1, mutateFrac=.1, \
superMutateProb=.01, resetProb=.01):
"""Method to perform mutations on a given nd-array
Arguments:
mat: Matrix on which to perform mutations.
mutateStrength: Strength of mutation. Corresponds to the variance of
the random number that the weight is multiplied by.
superMutateProb: Probability of a "super-mutation." This is the
probability _given_ that a mutation occurs.
resetProb: Probability of resetting the weight. This is the
probability _given_ that a mutation occurs.
"""
# choose which elements to mutate
mutateInd = np.random.choice(mat.size, \
int(mat.size * mutateFrac), replace=False)
superMutateInd = mutateInd[0:int(mat.size*mutateFrac*superMutateProb)]
resetInd = mutateInd[int(mat.size*mutateFrac*superMutateProb):\
int(mat.size*mutateFrac*(superMutateProb + resetProb))]
mutateInd = mutateInd[int(mat.size*mutateFrac*(superMutateProb+resetProb)):]
# perform mutations on mat
mat[np.unravel_index(superMutateInd, mat.shape)] *= \
np.random.normal(scale=100*mutateStrength, size=superMutateInd.size)
mat[np.unravel_index(resetInd, mat.shape)] = \
np.random.normal(size=resetInd.size)
mat[np.unravel_index(mutateInd, mat.shape)] *= \
np.random.normal(scale=mutateStrength, size=mutateInd.size)
class Actor(object):
"""Define an Actor object that learns the deterministic policy function
pi(s): state space -> action space
"""
def __init__(self, sDim=3, aDim=3, learningRate=1e-3, type='discrete'):
"""Initialize a new Actor object
Arguments:
sDim: Dimension of state space.
aDim: Dimension of action space. If discrete, it's the number of
actions that can be performed. If continuous, it's the degrees
of freedom for an action.
learningRate: Learning rate for optimizer.
type: The type of actor, either 'discrete' or 'continuous'. If
'discrete', then the actor learns a stochastic policy which
gives the propensity of performing a discrete number of
actions. If 'continuous', then the actor learns a deterministic
policy.
"""
self.sDim = sDim
self.aDim = aDim
self.learningRate = learningRate
self.type = type
self.model = None
self.optimizer = keras.optimizers.SGD(learning_rate=learningRate)
def createNetwork(self, lstmLayers, denseLayers, lstmUnits, denseUnits,\
normalizationType='layer'):
"""Create the network
Arguments:
lstmLayers: The number of LSTM layers to process state input
denseLayers: The number of fully connected layers
normalizationType: 'layer' uses layer normalization, 'batch' uses
batch normalization.
"""
self.model = keras.Sequential()
# add LSTM layers
if lstmLayers == 1:
self.model.add(layers.LSTM(lstmUnits,\
input_shape=(None,self.sDim,), \
# bias_initializer=tf.random_normal_initializer(stddev=.05), \
# unit_forget_bias=True, \
))
elif lstmLayers == 2:
self.model.add(layers.LSTM(lstmUnits, \
input_shape=(None,self.sDim,), \
# bias_initializer=tf.random_normal_initializer(stddev=.05), \
# unit_forget_bias=True, \
return_sequences=True, \
))
self.model.add(layers.LSTM(lstmUnits))
elif lstmLayers > 2:
self.model.add(layers.LSTM(lstmUnits, \
input_shape=(None, self.sDim,), \
# bias_initializer=tf.random_normal_initializer(stddev=.05), \
# unit_forget_bias=True, \
return_sequences=True, \
))
for i in range(lstmLayers-2):
self.model.add(layers.LSTM(lstmUnits, \
# bias_initializer=tf.random_normal_initializer(stddev=.05), \
# unit_forget_bias=True, \
return_sequences=True))
self.model.add(layers.LSTM(lstmUnits, \
# bias_initializer=tf.random_normal_initializer(stddev=.05), \
# unit_forget_bias=True,\
))
else:
raise("Problem making the network...")
# add dense layers
for i in range(denseLayers):
if normalizationType == 'layer':
self.model.add(layers.LayerNormalization())
elif normalizationType == 'batch':
self.model.add(layers.BatchNormalization())
else:
raise('Problem adding normalization layer')
self.model.add(layers.Dense(denseUnits, activation="elu"))
# add output layer
# depends on whether the actor is discrete or continuous
if self.type == 'discrete':
self.model.add(layers.Dense(self.aDim, activation='softmax'))
elif self.type == 'continuous':
self.model.add(layers.Dense(self.aDim, activation="elu", \
kernel_initializer=\
tf.random_uniform_initializer(minval=-1e-3,maxval=1e-3), \
bias_initializer=\
tf.random_uniform_initializer(minval=-1e-3,maxval=1e-3), \
))
else:
raise('problem creating output layer for actor')
def predict(self, states, training=False):
"""
Predict policy values from given states
Arguments:
states: A batchSize*timesteps*sDim array.
"""
if len(np.shape(states)) == 3:
# predicting on a batch of states
return self.model(states, training=training)
elif len(np.shape(states)) == 2:
# predicting on a single state
return self.model(np.expand_dims(states,0), training=training)[0]
#@tf.function
def trainStep(self, batch, critic):
"""Trains the actor's policy network one step
using the gradient specified by the DDPG algorithm (if continuous)
or using REINFORCE with baseline (if discrete)
Arguments:
batch: A batch of experiences from the replayBuffer. `batch` is
a tuple: (state, action, reward, new state, is terminal?).
critic: A critic to estimate the Q-function
"""
batchSize = len(batch[0])
# calculate gradient
if self.type == 'continuous':
with tf.GradientTape() as g:
Qsum = tf.math.reduce_sum( \
critic.predict(batch[0], \
self.predict(batch[0], training=True)))
# scale gradient by batch size and negate to do gradient ascent
Qsum = tf.math.multiply(Qsum, -1.0 / batchSize)
gradients = g.gradient(Qsum, self.model.trainable_variables)
self.optimizer.apply_gradients( \
zip(gradients, self.model.trainable_variables))
elif self.type == 'discrete':
# perform gradient ascent for actor-critic
with tf.GradientTape() as g:
# TODO include gamma factors? Ignoring for now...
# N*1 tensor of delta values
delta = batch[2] + tf.math.multiply(1-batch[4],\
critic.predict(batch[3])) - critic.predict(batch[0])
# N*1 tensor of policy values
policies = tf.math.multiply(self.predict(batch[0]), batch[1])
policies = tf.math.reduce_sum(policies, axis=1)
loss = tf.math.multiply(-1.0/batchSize, tf.math.reduce_sum( \
tf.math.multiply(delta, tf.math.log(policies))
))
gradients = g.gradient(loss, self.model.trainable_variables)
self.optimizer.apply_gradients( \
zip(gradients, self.model.trainable_variables))
def save_weights(self, filepath):
"""Save model weights in ckpt format
"""
self.model.save_weights(filepath)
def load_weights(self, filepath):
self.model.load_weights(filepath)
def getParams(self):
return self.model.get_weights()
def setParams(self, params):
return self.model.set_weights(params)
def copyParams(self, actor, polyak=0):
"""Update the network parameters from another actor, using
polyak averaging, so that
theta_self = (1-polyak) * theta_self + polyak * theta_a
Arguments:
polyak: Polyak averaging parameter between 0 and 1
"""
params = self.getParams()
aParams = actor.getParams()
copyParams = [params[i] * (1-polyak) + aParams[i] * polyak \
for i in range(len(params))]
self.setParams(copyParams)
def copy(self):
"""Copy the actor and return a new actor with same model
and model parameters.
"""
copy = Actor(self.sDim, self.aDim, self.learningRate, type=self.type)
copy.model = keras.models.clone_model(self.model)
copy.setParams(self.getParams())
return copy
def paramDiff(self, a):
"""Calculate the Frobenius norm for network parameters between network
and another network.
"""
diff = [np.mean((_[0] - _[1])**2) for _ in \
zip(self.getParams(), a.getParams())]
# diff = np.linalg.norm(diff)
return diff
def getAction(self, state, noiseProcess=None):
"""Get action from policy.
"""
a = self.predict(state)
if self.type == 'continuous':
if noiseProcess is not None:
a += noiseProcess.getNoise()
a = Action(a, type=self.type)
a.clip()
if self.type == 'discrete':
# pick an action according to probability distribution
p = np.array(a, dtype='float32')
p = p/p.sum(0)
ind = np.random.choice(self.aDim, p=p)
a = np.zeros((self.aDim), dtype='float32')
a[ind] = 1
a = Action(a, type=self.type)
return a
def evaluate(self, env, replayBuffer=None, noiseProcess=None, numEval=1,\
candidatesFile=None):
"""Perform a complete play-through of an episode, and
return the total rewards from the episode.
"""
fTot = 0.
# delay = Action(np.array([0,0,0,0,1]), type='discrete')
for i in range(numEval):
f=0.
env.reset()
# env.act(delay) # start with delay
s = env.getState()
done = False
ind = 0
while not done:
a = self.getAction(s, noiseProcess)
env.act(a)
# env.act(delay) # add delay
r = env.reward()
s1 = env.getState()
done = env.isDone()
if replayBuffer is not None:
replayBuffer.add(s,a.action,r,s1, done)
s = s1
f = np.maximum(f, r)
if f == r:
fInd = ind
ind += 1
if f > 5 and candidatesFile is not None:
candidatesFile.write('Candidate pulse sequence identified:\n'+\
formatActions(s, type=self.type) + '\n\n')
candidatesFile.write(f'Fitness:\t{f:.02f} (fInd: {fInd})\n')
fTot += f
return fTot/numEval, fInd
def test(self, env, critic=None):
"""Test the actor's ability without noise. Return the actions it
performs and the rewards it gets through the episode
Arguments:
env: Environment instance
critic: Critic. If it's passed, then evaluate the state-action or
state values as predicted by the critic. Return as the third
element of the tuple.
"""
rMat = []
criticMat = []
env.reset()
# delay = Action(np.array([0,0,0,0,1]), type='discrete')
# env.act(delay) # add delay
s = env.getState()
done = False
while not done:
if critic is not None:
criticMat.append(critic.predict(s))
a = self.getAction(s)
env.act(a)
# env.act(delay) # add delay
rMat.append(env.reward())
s = env.getState()
done = env.isDone()
if critic is None:
return s, rMat
else:
return s, rMat, criticMat
def crossover(self, p1, p2, weight=0.5):
"""Perform evolutionary crossover with two parent actors. Using
both parents' parameters, copies their "genes" to this actor.
Many choices for crossover methods exist. This implements the simplest
uniform crossover, which picks "genes" from either parent with
probabilities weighted by
Arguments:
p1, p2: Actors whose parameters are crossed, then copied to self.
weight: Probability of selecting p1's genes to pass to child.
Should probably be dependent on the relative fitness of the
two parents.
"""
childParams = self.getParams()
p1Params = p1.getParams()
p2Params = p2.getParams()
for i in range(len(p1Params)):
if np.random.rand() < weight:
childParams[i] = p1Params[i]
else:
childParams[i] = p2Params[i]
self.setParams(childParams)
def mutate(self, mutateStrength=1, mutateFrac=.1, \
superMutateProb=.01, resetProb=.01):
"""Mutate the parameters for the neural network.
Arguments:
mutateFrac: Fraction of weights that will be mutated.
superMutateProb: Probability that a "super mutation" occurs
(i.e. the weight is multiplied by a higher-variance random
number).
resetProb: Probability that the weight is reset to a random value.
"""
params = self.getParams()
for i in range(len(params)):
mutateMat(params[i], mutateStrength, mutateFrac, superMutateProb, \
resetProb)
class Critic(object):
"""Define a Critic that learns the Q-function or value function for
associated policy.
Q: state space * action space -> R
which gives the total expected return by performing action a in state s
then following policy
V: state space -> total expected rewards
"""
def __init__(self, sDim=3, aDim=3, gamma=.99, learningRate=1e-3, type='V'):
"""Initialize a new Actor object
Arguments:
sDim: Dimension of state space
aDim: Dimension of action space
gamma: discount rate for future rewards
learningRate: Learning rate for optimizer.
type: Q function ('Q') or value function ('V').
"""
self.sDim = sDim
self.aDim = aDim
self.gamma = gamma
self.learningRate = learningRate
self.type = type
self.model = None
self.optimizer = keras.optimizers.SGD(learning_rate=learningRate)
self.loss = keras.losses.MeanSquaredError()
def createNetwork(self, lstmLayers, denseLayers, lstmUnits, denseUnits,\
normalizationType='layer'):
"""Create the network, either a Q-network or value network depending
on the critic type.
Arguments:
lstmLayers: The number of LSTM layers to process state input
denseLayers: The number of fully connected layers
normalizationType: Same as for actor... TODO update
"""
stateInput = layers.Input(shape=(None, self.sDim,), name="stateInput")
if self.type == 'Q':
actionInput = layers.Input(shape=(self.aDim,), name="actionInput")
# add LSTM layers
if lstmLayers == 1:
stateLSTM = layers.LSTM(lstmUnits, \
# bias_initializer=tf.random_normal_initializer(stddev=.05), \
# unit_forget_bias=True,
)(stateInput)
elif lstmLayers == 2:
stateLSTM = layers.LSTM(lstmUnits, \
# bias_initializer=tf.random_normal_initializer(stddev=.05), \
# unit_forget_bias=True, \
return_sequences=True)(stateInput)
stateLSTM = layers.LSTM(lstmUnits)(stateLSTM)
elif lstmLayers > 2:
stateLSTM = layers.LSTM(lstmUnits, \
# bias_initializer=tf.random_normal_initializer(stddev=.05), \
# unit_forget_bias=True, \
return_sequences=True)(stateInput)
for i in range(lstmLayers-2):
stateLSTM=layers.LSTM(lstmUnits, \
# bias_initializer=tf.random_normal_initializer(stddev=.05), \
# unit_forget_bias=True, \
return_sequences=True)(stateLSTM)
stateLSTM = layers.LSTM(lstmUnits, \
# bias_initializer=tf.random_normal_initializer(stddev=.05), \
# unit_forget_bias=True, \
)(stateLSTM)
else:
print("Problem making the network...")
raise
if self.type == 'Q':
# stateHidden = layers.Dense(int(denseUnits/2))(stateLSTM)
stateHidden = stateLSTM
# actionHidden = layers.Dense(int(denseUnits/2))(actionInput)
actionHidden = layers.Dense(denseUnits)(actionInput)
# concatenate state, action inputs
x = layers.concatenate([stateHidden, actionHidden])
else:
# creating value function, state input only
# x = layers.Dense(denseUnits)(stateLSTM)
x = stateLSTM
# add fully connected layers
for i in range(denseLayers):
if normalizationType == 'layer':
x = layers.LayerNormalization()(x)
elif normalizationType == 'batch':
x = layers.BatchNormalization()(x)
else:
raise('Problem adding normalization layer')
x = layers.Dense(denseUnits, activation="elu")(x)
output = layers.Dense(1, name="output", \
kernel_initializer=\
tf.random_uniform_initializer(minval=-1e-3,maxval=1e-3), \
bias_initializer=\
tf.random_uniform_initializer(minval=-1e-3,maxval=1e-3), \
)(x)
if self.type == 'Q':
self.model = keras.Model(inputs=[stateInput, actionInput], \
outputs=[output])
elif self.type == 'V':
self.model = keras.Model(inputs=[stateInput], outputs=[output])
else:
raise('Whoops, problem making critic network')
def predict(self, states, actions=None, training=False):
"""
Predict Q-values or state values for given inputs
"""
if len(np.shape(states)) == 3:
# predicting on a batch of states/actions
if self.type == 'Q':
return self.model({"stateInput": states,\
"actionInput": actions}, \
training=training)
else:
return self.model({"stateInput": states}, training=training)
elif len(np.shape(states)) == 2:
# predicting on a single state/action
if self.type == 'Q':
return self.model({"stateInput": np.expand_dims(states,0), \
"actionInput": np.expand_dims(actions,0)}, \
training=training)[0][0]
else:
return self.model({"stateInput": np.expand_dims(states,0)}, \
training=training)[0][0]
#@tf.function
def trainStep(self, batch, actorTarget=None, criticTarget=None):
"""Trains the critic's Q/value function one step
using the gradient specified by the DDPG algorithm
Arguments:
batch: A batch of experiences from the replayBuffer
actorTarget: Target actor
criticTarget: Target critic
"""
batchSize = len(batch[0])
if self.type == 'Q':
# learn Q function, based on DDPG
targets = batch[2] + self.gamma * (1-batch[4]) * \
criticTarget.predict(batch[3], actorTarget.predict(batch[3]))
# calculate gradient according to DDPG algorithm
with tf.GradientTape() as g:
predictions = self.predict(batch[0], batch[1], training=True)
predLoss = self.loss(predictions, targets)
predLoss = tf.math.multiply(predLoss, 1.0 / batchSize)
gradients = g.gradient(predLoss, self.model.trainable_variables)
self.optimizer.apply_gradients( \
zip(gradients, self.model.trainable_variables))
else:
# learn value function
# implementation from Barto and Sutton
# delta = batch[2] + \
# self.gamma * tf.math.multiply(1-batch[4], \
# self.predict(batch[3])) -\
# self.predict(batch[0])
# with tf.GradientTape() as g:
# values = self.predict(batch[0], training=True)
# loss = tf.math.multiply(-1.0/batchSize, \
# tf.math.reduce_sum(tf.multiply(delta, values)))
# implementation from OpenAI and others
# calculate target values using reward and discounted future value
targets = batch[2] + \
self.gamma * tf.math.multiply(1-batch[4], \
self.predict(batch[3]))
with tf.GradientTape() as g:
values = self.predict(batch[0], training=True)
loss = self.loss(values, targets)
gradients = g.gradient(loss, self.model.trainable_variables)
self.optimizer.apply_gradients( \
zip(gradients, self.model.trainable_variables))
def save_weights(self, filepath):
"""Save model weights in ckpt format
"""
self.model.save_weights(filepath)
def load_weights(self, filepath):
self.model.load_weights(filepath)
def getParams(self):
return self.model.get_weights()
def setParams(self, params):
return self.model.set_weights(params)
def copyParams(self, a, polyak=0):
"""Update the network parameters from another network, using
polyak averaging, so that
theta_self = (1-polyak) * theta_self + polyak * theta_a
Arguments:
polyak: Polyak averaging parameter between 0 and 1
"""
params = self.getParams()
aParams = a.getParams()
copyParams = [params[i] * (1-polyak) + aParams[i] * polyak \
for i in range(len(params))]
self.setParams(copyParams)
def copy(self):
"""Copy the critic and return a new critic with same model
and model parameters.
"""
copy = Critic(self.sDim, self.aDim, self.gamma, self.learningRate,\
type=self.type)
copy.model = keras.models.clone_model(self.model)
copy.setParams(self.getParams())
return copy
def paramDiff(self, c):
"""Calculate the Frobenius norm for network parameters between network
and another network.
Returns:
A list of scalars, where each element is the norm for a particular
layer in the neural network.
"""
diff = [np.mean((_[0] - _[1])**2) for _ in \
zip(self.getParams(), c.getParams())]
# diff = np.linalg.norm(diff)
return diff
class Population(object):
"""Population of actors, for evolutionary reinforcement learning.
"""
def __init__(self, size=10):
self.size = size
self.fitnesses = np.full((self.size,), -1e100, dtype=float)
self.fitnessInds = np.zeros((self.size), dtype=int)
self.pop = np.full((self.size,), None, dtype=object)
# when actor was synced to population from gradient actor
self.synced = np.zeros((self.size), dtype=int)
# when actor was last mutated
self.mutated = np.zeros((self.size), dtype=int)
def startPopulation(self, sDim, aDim, learningRate, type='discrete', \
lstmLayers=1, denseLayers=4, lstmUnits=64, denseUnits=32,\
normalizationType='layer'):
for i in range(self.size):
self.pop[i] = Actor(sDim, aDim, learningRate, type=type)
self.pop[i].createNetwork(lstmLayers,denseLayers,\
lstmUnits, denseUnits, normalizationType)
def evaluate(self, env, replayBuffer, noiseProcess=None, numEval=1,\
candidatesFile=None):
"""Evaluate the fitnesses of each member of the population.
Arguments:
candidatesFile: If not none, pass to each individual when it's
evaluated and save high-reward pulse sequences.
"""
for i in range(self.size):
print(f'evaluating individual {i+1}/{self.size},\t', end='')
self.fitnesses[i], self.fitnessInds[i] = self.pop[i].evaluate(env,\
replayBuffer, noiseProcess, numEval,\
candidatesFile=candidatesFile)
print(f'fitness is {self.fitnesses[i]:.02f}')
def iterate(self, eliteFrac=0.1, tourneyFrac=.2, crossoverProb=.25, \
mutateProb = .25, mutateStrength=1, mutateFrac=.1, \
superMutateProb=.01, resetProb=.01, generation=0):
"""Iterate the population to create the next generation
of members.
Arguments:
eliteFrac: Fraction of total population that will be marked as
"elites".
tourneyFrac: Fraction of population to include in each tournament
("tourney").
"""
# TODO maybe put a bunch of prints in here
# to make sure it's working as expected
# sort population by fitness
indSorted = np.argsort(self.fitnesses)
numElite = int(np.ceil(self.size * eliteFrac))
indElite = indSorted[-numElite:]
indOther = indSorted[:-numElite]
elites = self.pop[indElite]
eliteFitness = self.fitnesses[indElite]
print('selected elites')
# perform tournament selection to get rest of population
selected = np.full((self.size-numElite), None, dtype=object)
selectedSynced = np.zeros((self.size-numElite), dtype=int)
selectedMutated = np.copy(selectedSynced)
tourneySize = int(np.ceil(self.size * tourneyFrac))
for i in range(self.size-numElite):
# pick random subset of population for tourney
ind = np.random.choice(self.size, tourneySize, replace=False)
# pick the winner according to highest fitness
indWinner = ind[np.argmax(self.fitnesses[ind])]
winner = self.pop[indWinner]
selectedSynced[i] = self.synced[indWinner]
selectedMutated[i] = self.mutated[indWinner]
if winner not in selected and winner not in elites:
selected[i] = winner
else:
selected[i] = winner.copy()
print('selected rest of population')
# do crossover/mutations with individuals in selected
for sInd, s in enumerate(selected):
if np.random.rand() < crossoverProb:
selectedMutated[sInd] = generation
eInd = np.random.choice(numElite)
e = elites[eInd]
s.crossover(e, s)
if np.random.rand() < mutateProb:
selectedMutated[sInd] = generation
s.mutate(mutateStrength, mutateFrac, \
superMutateProb, resetProb)
print('mutated non-elite individuals')
# then reassign them to the population
self.pop[indElite] = elites
self.pop[indOther] = selected
self.synced[indOther] = selectedSynced
self.mutated[indOther] = selectedMutated
# reset fitnesses
self.fitnesses = np.full((self.size,), -1e100, dtype=float)
def sync(self, newMember, generation=0):
"""Replace the weakest (lowest-fitness) member with a new member.
"""
ind = np.argmin(self.fitnesses)
self.pop[ind] = newMember.copy()
self.synced[ind] = generation
self.mutated[ind] = generation
self.fitnesses[ind] = -1e100
| [] |
2024-01-10 | Jerry022002/MG-CS50 | chord~chord_ver2~test_musenet.py | import openai
# Set up your OpenAI API key
openai.api_key = "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"
# Define input parameters
chords = "Cmaj7, Dm7, G7, Cmaj7" # Example chord progression
tempo = "120" # BPM
time_signature = "4/4" # Time signature
instrument = "piano" # Instrument type
# Generate music using MuseNet
prompt = f"Generate music in {time_signature} time signature, at {tempo} BPM, using {instrument}, based on the chord progression: {chords}"
response = openai.Completion.create(
engine="text-davinci-003", # Choose the appropriate engine
prompt=prompt,
max_tokens=200 # Adjust the length of the generated music
)
# Extract the generated music from the response
generated_music = response.choices[0].text.strip()
# Save the generated music to a MIDI file or use it as needed
with open("generated_music.mid", "w") as f:
f.write(generated_music)
| [
"Generate music in time_signatured2c87cc6-790c-4b96-97e9-4d5975923912 time signature, at tempod2c87cc6-790c-4b96-97e9-4d5975923912 BPM, using piano, based on the chord progression: chordsd2c87cc6-790c-4b96-97e9-4d5975923912",
"Generate music in 4/4 time signature, at 120 BPM, using piano, based on the chord progression: Cmaj7, Dm7, G7, Cmaj7"
] |
2024-01-10 | pgasawa/origin | backend~ChatBot~ingest_data.py | from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter
from langchain.document_loaders import UnstructuredFileLoader
from langchain.vectorstores.faiss import FAISS
from langchain.embeddings import OpenAIEmbeddings
import pickle
import nltk
import os
import openai
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
openai.api_key = os.environ["OPENAI_API_KEY"]
def ingestion(id):
# Load Data
loader = UnstructuredFileLoader(f"{id}_output.txt")
raw_documents = loader.load()
# Split text
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
documents = text_splitter.split_documents(raw_documents)
# Load Data to vectorstore
embeddings = OpenAIEmbeddings()
vectorstore = FAISS.from_documents(documents, embeddings)
# Save vectorstore
with open(f"vectorstore{id}.pkl", "wb") as f:
pickle.dump(vectorstore, f) | [] |
2024-01-10 | pgasawa/origin | backend~MindMap~display_knowledge_graph.py | from gpt_index import SimpleDirectoryReader, LLMPredictor
from gpt_index.indices.knowledge_graph.base import GPTKnowledgeGraphIndex
from langchain import OpenAI
from IPython.display import Markdown, display
from pyvis.network import Network
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-002"))
new_index = GPTKnowledgeGraphIndex.load_from_disk('index_kg.json', llm_predictor=llm_predictor)
g = new_index.get_networkx_graph()
net = Network(notebook=False, cdn_resources="in_line", directed=True)
net.from_nx(g)
net.show("example_2048_8.html") | [] |
2024-01-10 | pgasawa/origin | backend~embeddings.py | import requests
from sentence_transformers import SentenceTransformer
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
import openai
import os
openai.api_key = os.environ["OPENAI_API_KEY"]
HF_TOKEN = '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'
embedder = SentenceTransformer('paraphrase-MiniLM-L6-v2', cache_folder='embedder')
# texts = [
# "How do I get a replacement Medicare card?",
# "What is the monthly premium for Medicare Part B?",
# "How do I terminate my Medicare Part B (medical insurance)?",
# "How do I sign up for Medicare?",
# "Can I sign up for Medicare Part B if I am working and have health insurance through an employer?",
# "How do I sign up for Medicare Part B if I already have Part A?",
# "What are Medicare late enrollment penalties?",
# "What is Medicare and who can get it?",
# "How can I get help with my Medicare Part A and Part B premiums?",
# "What are the different parts of Medicare?",
# "Will my Medicare premiums be higher because of my higher income?",
# "What is TRICARE ?",
# "Should I sign up for Medicare Part B if I have Veterans' Benefits?",
# "Strawberry Fields Forever: A Guide to Growing Your Own Berries",
# "Strawberry Recipes for Every Occasion: From Smoothies to Shortcake",
# "The Health Benefits of Strawberries: Why You Should Be Eating More",
# "Strawberry Farming Techniques: Tips and Tricks from the Pros",
# "Strawberry Season: Where to Pick Your Own Berries Near You",
# "All About Strawberries: From Seed to Harvest",
# "Strawberry Nutrition: Facts and Figures You Need to Know",
# "The Best Strawberry Varieties for Your Garden: A Comprehensive Guide",
# "Cooking with Strawberries: Sweet and Savory Recipes to Try at Home",
# "Strawberry Shortcake: A Classic Dessert Recipe with a Modern Twist"
# ]
def get_high_dim_embeddings(texts):
embeddings = embedder.encode(texts)
embeddings = np.array(embeddings)
return embeddings
def run_kmeans_2(texts, num_clusters=2):
# Define number of clusters and fit k-means model
vec_embeddings = get_high_dim_embeddings(texts)
kmeans = KMeans(n_clusters=num_clusters, random_state=0).fit(vec_embeddings)
# Get the labels and indices of vectors in each cluster
labels = kmeans.labels_
cluster_indices = {}
for i, label in enumerate(labels):
if label not in cluster_indices:
cluster_indices[label] = [i]
else:
cluster_indices[label].append(i)
# # Print indices of vectors in each cluster
for cluster, indices in cluster_indices.items():
print(f"Cluster {cluster} has {len(indices)} vectors with indices: {indices}")
cluster_titles = {}
for i, label in enumerate(labels):
if label not in cluster_titles:
cluster_titles[label] = [texts[i]]
else:
cluster_titles[label].append(texts[i])
titles = []
# print(cluster_titles)
for cluster in cluster_titles:
prompt = "What noun describes the following texts? Please output only the noun and no other text. If you are unsure, please only output a noun that is most similar to the texts and nothing more: \n\n \"" + ' '.join(cluster_titles[cluster]).replace("\n","") + "\""
print(prompt)
# prompt = prompt[:14000]
response = openai.Completion.create(
engine='text-curie-001',
prompt=prompt,
max_tokens=50,
n=1,
stop=None,
temperature=0,
)
print(response.choices)
titles.append(response.choices[0].text.strip())
return kmeans, vec_embeddings, cluster_indices, titles
def run_kmeans(texts, num_clusters=2):
# Define number of clusters and fit k-means model
vec_embeddings = get_high_dim_embeddings(texts)
kmeans = KMeans(n_clusters=num_clusters, random_state=0).fit(vec_embeddings)
# return kmeans
# # Get the labels and indices of vectors in each cluster
labels = kmeans.labels_
cluster_titles = {}
for i, label in enumerate(labels):
if label not in cluster_titles:
cluster_titles[label] = [texts[i]]
else:
cluster_titles[label].append(texts[i])
titles = []
# print(cluster_titles)
for cluster in cluster_titles:
prompt = "What noun describes the following texts? Please output only the noun and no other text. If you are unsure, please only output a noun that is most similar to the texts and nothing more: \"" + ' '.join(cluster_titles[cluster]) + "\""
print(prompt)
# prompt = prompt[:14000]
response = openai.Completion.create(
engine='text-davinci-003',
prompt=prompt,
max_tokens=50,
n=1,
stop=None,
temperature=0,
)
titles.append(response.choices[0].text.strip())
return kmeans, titles
# # Print indices of vectors in each cluster
# for cluster, indices in cluster_indices.items():
# print(f"Cluster {cluster} has {len(indices)} vectors with indices: {indices}")
# # read in test_titles.txt as a list of sentences per line
# with open('ChatBot/test_titles.txt', 'r') as f:
# test_titles = f.readlines()
# print(run_kmeans(test_titles, num_clusters=10)) | [
"\n",
"What noun describes the following texts? Please output only the noun and no other text. If you are unsure, please only output a noun that is most similar to the texts and nothing more: \"",
"\"",
"What noun describes the following texts? Please output only the noun and no other text. If you are unsure, please only output a noun that is most similar to the texts and nothing more: \n\n \"",
" "
] |
2024-01-10 | Dhravya/snapshop-backend | api_calls.py | import httpx
from openai import OpenAI
from funcs import functions, other_func
import asyncify
from helpers.redis_helpers import get_user, create_generation
import asyncio
import json
from helpers.upload_image import upload_image
import requests
client = OpenAI()
async def ask_shopwise(item_name: str):
print("Asking shopwise for " + item_name)
response = requests.get(
f"https://dropit2-production.up.railway.app/googleSearch?itemName={item_name}",
)
try:
if response.status_code == 200:
return response.json()
else:
print("ShopWise API failed")
return None
except Exception as e:
print(e)
return None
@asyncify
def get_fashion_image(base64_image: str, user_gender: str):
image = upload_image(base64_image)
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{
"role": "user",
"content": [
{
"type": "text",
"text": "What’s in this image? Describe it in as detail as possible. You are a fashion stylist. Be as descriptive about the fashion items as possible.",
},
{
"type": "image_url",
"image_url": {
"url": f"{image}",
},
},
],
}
],
max_tokens=250,
)
function_response = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=[
{
"role": "user",
"content": [
{
"type": "text",
"text": f"What’s in this image? Describe it in as detail as possible. You are a fashion stylist. Be as descriptive about the fashion items as possible. Here's the description of the image: {response.choices[0]}. The gender of the user is {user_gender}",
},
],
}
],
max_tokens=500,
functions=functions,
function_call="auto",
)
function_response = json.loads(
function_response.choices[0].message.function_call.arguments
)
return function_response
@asyncify
def get_fashion_recommendation(user_context: str, user_gender: str):
function_response = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages=[
{
"role": "user",
"content": [
{
"type": "text",
"text": f"Create a fashion recommendation for the user. Here's the user's context: {user_context}. Keep in mind the gender of the user which is {user_gender}",
},
],
}
],
max_tokens=500,
functions=other_func,
function_call="auto",
)
function_response = json.loads(
function_response.choices[0].message.function_call.arguments
)
return function_response
async def get_fashion_and_user_image(original_image: str, user_email: str):
user = get_user(user_email)
if user is None:
raise Exception("User not found")
output_json = await get_fashion_image(original_image, user.email)
print(output_json)
shopping_links = output_json["fashion_items_as_keywords"]
print(shopping_links)
shopping_links = await asyncio.gather(
*[ask_shopwise(keyword) for keyword in shopping_links]
)
output_json["fashion_items_as_keywords"] = [link for link in shopping_links if link]
output_json["original_image"] = upload_image(original_image)
if not isinstance(output_json, dict):
output_json = json.loads(output_json)
print(output_json)
try:
create_generation(generated_json_output_as_dict=output_json)
except Exception as e:
print(e)
return output_json
async def get_fashion_recommendation_with_shopping_links(
user_context: str, user_email: str
):
user = get_user(user_email)
if user is None:
raise Exception("User not found")
output_json = await get_fashion_recommendation(user_context, user.email)
print(output_json)
shopping_links = output_json["fashion_items_as_keywords"]
print(shopping_links)
shopping_links = await asyncio.gather(
*[ask_shopwise(keyword) for keyword in shopping_links]
)
output_json["fashion_items_as_keywords"] = [link for link in shopping_links if link]
output_json["original_image"] = user_context
if not isinstance(output_json, dict):
output_json = json.loads(output_json)
try:
create_generation(generated_json_output_as_dict=output_json)
except Exception as e:
print(e)
return output_json
| [
"[{'type': 'text', 'text': 'What’s in this image? Describe it in as detail as possible. You are a fashion stylist. Be as descriptive about the fashion items as possible.'}, {'type': 'image_url', 'image_url': {'url': 'PLACEHOLDER'}}]",
"[{'type': 'text', 'text': \"Create a fashion recommendation for the user. Here's the user's context: PLACEHOLDER. Keep in mind the gender of the user which is PLACEHOLDER\"}]"
] |
2024-01-10 | coinse/libro | scripts~llm_api.py | import os
import json
import requests
import random
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
HF_KEY = os.getenv("HF_API_KEY")
AVAILABLE_MODEL_INFO = {
'OpenAI/text-curie-001': {
'query_type': 'openai',
'uses_chat': False,
},
'OpenAI/text-davinci-002': {
'query_type': 'openai',
'uses_chat': False,
},
'OpenAI/text-davinci-003': {
'query_type': 'openai',
'uses_chat': False,
},
'OpenAI/gpt-3.5-turbo': {
'query_type': 'openai',
'uses_chat': True,
},
'OpenAI/gpt-4': {
'query_type': 'openai',
'uses_chat': True,
},
'bigscience/bloom': {
'query_type': 'hf_hosted',
'uses_chat': False,
},
'bigscience/bloom-7b1': {
'query_type': 'huggingface',
'uses_chat': False,
},
'bigscience/bloom-3b': {
'query_type': 'huggingface',
'uses_chat': False,
},
'bigscience/bloomz': {
'query_type': 'hf_hosted',
'uses_chat': False,
},
'bigscience/bloomz-7b1': {
'query_type': 'huggingface',
'uses_chat': False,
},
'bigscience/bloomz-3b': {
'query_type': 'huggingface',
'uses_chat': False,
},
'facebook/incoder-6B': {
'query_type': 'huggingface',
'uses_chat': False,
},
'facebook/incoder-1B': {
'query_type': 'huggingface',
'uses_chat': False,
},
'Salesforce/codegen-16B-multi': {
'query_type': 'huggingface',
'uses_chat': False,
},
'Salesforce/codegen-6B-multi': {
'query_type': 'huggingface',
'uses_chat': False,
},
'Salesforce/codegen-2B-multi': {
'query_type': 'huggingface',
'uses_chat': False,
},
'Salesforce/codegen-350M-multi': {
'query_type': 'huggingface',
'uses_chat': False,
},
'EleutherAI/gpt-neox-20b': {
'query_type': 'hf_hosted',
'uses_chat': False,
},
'EleutherAI/gpt-neo-2.7b': {
'query_type': 'huggingface',
'uses_chat': False,
},
'EleutherAI/gpt-neo-1.3b': {
'query_type': 'huggingface',
'uses_chat': False,
},
'EleutherAI/gpt-neo-125m': {
'query_type': 'huggingface',
'uses_chat': False,
},
'Databricks/dolly-v2-12b': {
'query_type': 'unknown',
'uses_chat': False,
},
'BlinkDL/rwkv-4-pile-14b': {
'query_type': 'unknown',
'uses_chat': False,
},
'BlinkDL/rwkv-4-pile-7b': {
'query_type': 'unknown',
'uses_chat': False,
},
'BlinkDL/rwkv-4-raven': {
'query_type': 'unknown',
'uses_chat': True,
},
'Salesforce/codegen2-16B': {
'query_type': 'self_hosted',
'uses_chat': False,
}
}
AVAILABLE_MODELS = AVAILABLE_MODEL_INFO.keys() # just for clean code
TEMP = 0.7
# Helper functions
def model_is_chat(model):
return AVAILABLE_MODEL_INFO[model]['uses_chat']
def tiny_noise(scale=1/1000):
return scale*random.random()-0.5*scale
# Query functions
def query_hf_hosted_llm(prompt, model, stop_tokens, use_cache=False, end_len=1000*4):
def single_query(wip_prompt):
use_temp = TEMP if use_cache else (TEMP + tiny_noise())
data = json.dumps({'inputs': wip_prompt, 'parameters': {'temperature': use_temp}})
API_URL = f"https://api-inference.huggingface.co/models/{model}"
headers = {"Authorization": f"Bearer {HF_KEY}", "Content-Type": "application/json"}
response = requests.request("POST", API_URL, headers=headers, data=data)
assert response.status_code == 200, f'Response status was non-normal ({response.status_code}): {response.content}'
return json.loads(response.content.decode("utf-8"))[0]['generated_text']
new_content = ''
while (all(t not in new_content for t in stop_tokens) and
len(new_content) < end_len):
full_gen_text = single_query(prompt + new_content)
if full_gen_text == prompt+new_content: # predicting end token
return new_content
new_content = full_gen_text[len(prompt):]
earliest_stop_loc = min([len(new_content)] +
[new_content.index(t) for t in stop_tokens if new_content.index(t) >= 0])
return new_content[:earliest_stop_loc]
def query_self_hosted_llm(prompt, stop_tokens, max_tokens=256):
payload = {
'text': prompt,
'max_new_tokens': max_tokens,
}
json_payload = json.dumps(payload)
headers = {'Content-Type': 'application/json'}
response = requests.post('http://localhost:23623', data=json_payload, headers=headers)
gen_content = response.json()['result']
earliest_stop_loc = min([len(gen_content)] +
[gen_content.index(t) for t in stop_tokens if t in gen_content])
return gen_content[:earliest_stop_loc]
def query_chat_llm(prompt, model, stop_tokens):
assert model in AVAILABLE_MODEL_INFO, f'Unknown model {model}'
model_info = AVAILABLE_MODEL_INFO[model]
assert type(prompt) == list
assert model_info['uses_chat']
if model_info['query_type'] == 'openai':
base_model_name = model.split('/')[-1]
response = openai.ChatCompletion.create(
model=base_model_name,
messages=prompt, # chat-style prompt
n=1,
frequency_penalty=0,
presence_penalty=0,
stop=stop_tokens
)
gen_result = response["choices"][0]["message"]["content"]
if "```" in gen_result:
gen_result = gen_result.split("```")[1]
gen_result = gen_result.removeprefix('java')
elif model_info['query_type'] == 'hf_hosted':
raise NotImplementedError
else:
raise NotImplementedError(f'Unknown query type {model_info["query_type"]}')
return gen_result
def query_string_llm(prompt, model, stop_tokens):
assert model in AVAILABLE_MODEL_INFO, f'Unknown model {model}'
model_info = AVAILABLE_MODEL_INFO[model]
assert type(prompt) == str
assert not model_info['uses_chat']
if model_info['query_type'] == 'openai':
base_model_name = model.split('/')[-1]
response = openai.Completion.create(
engine=base_model_name,
prompt=prompt,
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=stop_tokens
)
gen_result = response['choices'][0]['text']
elif model_info['query_type'] == 'hf_hosted':
gen_result = query_hf_hosted_llm(prompt, model, stop_tokens)
elif model_info['query_type'] == 'self_hosted':
gen_result = query_self_hosted_llm(prompt, stop_tokens, max_tokens=256)
else:
raise NotImplementedError(f'Unknown query type {model_info["query_type"]}')
return gen_result
def query_llm(prompt, model, stop_tokens):
# sanity checks
assert model in AVAILABLE_MODELS, f'Unknown model {model}'
model_info = AVAILABLE_MODEL_INFO[model]
if model_info['uses_chat']:
assert type(prompt) == list
else:
assert type(prompt) == str
# actual execution
query_func = query_chat_llm if model_info['uses_chat'] else query_string_llm
return query_func(prompt, model, stop_tokens)
| [] |
2024-01-10 | qinb/Langchain-Chatchat | startup.py | import asyncio
import multiprocessing as mp
import os
import subprocess
import sys
from multiprocessing import Process, Queue
from pprint import pprint
# 设置numexpr最大线程数,默认为CPU核心数
try:
import numexpr
n_cores = numexpr.utils.detect_number_of_cores()
os.environ["NUMEXPR_MAX_THREADS"] = str(n_cores)
except:
pass
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from configs.model_config import EMBEDDING_MODEL, llm_model_dict, LLM_MODEL, LOG_PATH, \
logger
from configs.server_config import (WEBUI_SERVER, API_SERVER, FSCHAT_CONTROLLER,
FSCHAT_OPENAI_API, )
from server.utils import (fschat_controller_address, fschat_model_worker_address,
fschat_openai_api_address, set_httpx_timeout,
get_model_worker_config, get_all_model_worker_configs,
MakeFastAPIOffline, FastAPI, llm_device, embedding_device)
import argparse
from typing import Tuple, List, Dict
from configs import VERSION
def create_controller_app(
dispatch_method: str,
log_level: str = "INFO",
) -> FastAPI:
import fastchat.constants
fastchat.constants.LOGDIR = LOG_PATH
from fastchat.serve.controller import app, Controller, logger
logger.setLevel(log_level)
controller = Controller(dispatch_method)
sys.modules["fastchat.serve.controller"].controller = controller
MakeFastAPIOffline(app)
app.title = "FastChat Controller"
app._controller = controller
return app
def create_model_worker_app(log_level: str = "INFO", **kwargs) -> Tuple[argparse.ArgumentParser, FastAPI]:
import fastchat.constants
fastchat.constants.LOGDIR = LOG_PATH
from fastchat.serve.model_worker import app, GptqConfig, AWQConfig, ModelWorker, worker_id, logger
import argparse
import threading
import fastchat.serve.model_worker
logger.setLevel(log_level)
# workaround to make program exit with Ctrl+c
# it should be deleted after pr is merged by fastchat
def _new_init_heart_beat(self):
self.register_to_controller()
self.heart_beat_thread = threading.Thread(
target=fastchat.serve.model_worker.heart_beat_worker, args=(self,), daemon=True,
)
self.heart_beat_thread.start()
ModelWorker.init_heart_beat = _new_init_heart_beat
parser = argparse.ArgumentParser()
args = parser.parse_args([])
# default args. should be deleted after pr is merged by fastchat
args.gpus = None
args.max_gpu_memory = "20GiB"
args.load_8bit = False
args.cpu_offloading = None
args.gptq_ckpt = None
args.gptq_wbits = 16
args.gptq_groupsize = -1
args.gptq_act_order = False
args.awq_ckpt = None
args.awq_wbits = 16
args.awq_groupsize = -1
args.num_gpus = 1
args.model_names = []
args.conv_template = None
args.limit_worker_concurrency = 5
args.stream_interval = 2
args.no_register = False
for k, v in kwargs.items():
setattr(args, k, v)
if args.gpus:
if args.num_gpus is None:
args.num_gpus = len(args.gpus.split(','))
if len(args.gpus.split(",")) < args.num_gpus:
raise ValueError(
f"Larger --num-gpus ({args.num_gpus}) than --gpus {args.gpus}!"
)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
# 在线模型API
if worker_class := kwargs.get("worker_class"):
worker = worker_class(model_names=args.model_names,
controller_addr=args.controller_address,
worker_addr=args.worker_address)
# 本地模型
else:
# workaround to make program exit with Ctrl+c
# it should be deleted after pr is merged by fastchat
def _new_init_heart_beat(self):
self.register_to_controller()
self.heart_beat_thread = threading.Thread(
target=fastchat.serve.model_worker.heart_beat_worker, args=(self,), daemon=True,
)
self.heart_beat_thread.start()
ModelWorker.init_heart_beat = _new_init_heart_beat
gptq_config = GptqConfig(
ckpt=args.gptq_ckpt or args.model_path,
wbits=args.gptq_wbits,
groupsize=args.gptq_groupsize,
act_order=args.gptq_act_order,
)
awq_config = AWQConfig(
ckpt=args.awq_ckpt or args.model_path,
wbits=args.awq_wbits,
groupsize=args.awq_groupsize,
)
worker = ModelWorker(
controller_addr=args.controller_address,
worker_addr=args.worker_address,
worker_id=worker_id,
model_path=args.model_path,
model_names=args.model_names,
limit_worker_concurrency=args.limit_worker_concurrency,
no_register=args.no_register,
device=args.device,
num_gpus=args.num_gpus,
max_gpu_memory=args.max_gpu_memory,
load_8bit=args.load_8bit,
cpu_offloading=args.cpu_offloading,
gptq_config=gptq_config,
awq_config=awq_config,
stream_interval=args.stream_interval,
conv_template=args.conv_template,
)
sys.modules["fastchat.serve.model_worker"].args = args
sys.modules["fastchat.serve.model_worker"].gptq_config = gptq_config
sys.modules["fastchat.serve.model_worker"].worker = worker
MakeFastAPIOffline(app)
app.title = f"FastChat LLM Server ({args.model_names[0]})"
app._worker = worker
return app
def create_openai_api_app(
controller_address: str,
api_keys: List = [],
log_level: str = "INFO",
) -> FastAPI:
import fastchat.constants
fastchat.constants.LOGDIR = LOG_PATH
from fastchat.serve.openai_api_server import app, CORSMiddleware, app_settings
from fastchat.utils import build_logger
logger = build_logger("openai_api", "openai_api.log")
logger.setLevel(log_level)
app.add_middleware(
CORSMiddleware,
allow_credentials=True,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
sys.modules["fastchat.serve.openai_api_server"].logger = logger
app_settings.controller_address = controller_address
app_settings.api_keys = api_keys
MakeFastAPIOffline(app)
app.title = "FastChat OpeanAI API Server"
return app
def _set_app_seq(app: FastAPI, q: Queue, run_seq: int):
if q is None or not isinstance(run_seq, int):
return
if run_seq == 1:
@app.on_event("startup")
async def on_startup():
set_httpx_timeout()
q.put(run_seq)
elif run_seq > 1:
@app.on_event("startup")
async def on_startup():
set_httpx_timeout()
while True:
no = q.get()
if no != run_seq - 1:
q.put(no)
else:
break
q.put(run_seq)
def run_controller(q: Queue, run_seq: int = 1, log_level: str = "INFO", e: mp.Event = None):
import uvicorn
import httpx
from fastapi import Body
import time
import sys
app = create_controller_app(
dispatch_method=FSCHAT_CONTROLLER.get("dispatch_method"),
log_level=log_level,
)
_set_app_seq(app, q, run_seq)
@app.on_event("startup")
def on_startup():
if e is not None:
e.set()
# add interface to release and load model worker
@app.post("/release_worker")
def release_worker(
model_name: str = Body(..., description="要释放模型的名称", samples=["chatglm-6b"]),
# worker_address: str = Body(None, description="要释放模型的地址,与名称二选一", samples=[fschat_controller_address()]),
new_model_name: str = Body(None, description="释放后加载该模型"),
keep_origin: bool = Body(False, description="不释放原模型,加载新模型")
) -> Dict:
available_models = app._controller.list_models()
if new_model_name in available_models:
msg = f"要切换的LLM模型 {new_model_name} 已经存在"
logger.info(msg)
return {"code": 500, "msg": msg}
if new_model_name:
logger.info(f"开始切换LLM模型:从 {model_name} 到 {new_model_name}")
else:
logger.info(f"即将停止LLM模型: {model_name}")
if model_name not in available_models:
msg = f"the model {model_name} is not available"
logger.error(msg)
return {"code": 500, "msg": msg}
worker_address = app._controller.get_worker_address(model_name)
if not worker_address:
msg = f"can not find model_worker address for {model_name}"
logger.error(msg)
return {"code": 500, "msg": msg}
r = httpx.post(worker_address + "/release",
json={"new_model_name": new_model_name, "keep_origin": keep_origin})
if r.status_code != 200:
msg = f"failed to release model: {model_name}"
logger.error(msg)
return {"code": 500, "msg": msg}
if new_model_name:
timer = 300 # wait 5 minutes for new model_worker register
while timer > 0:
models = app._controller.list_models()
if new_model_name in models:
break
time.sleep(1)
timer -= 1
if timer > 0:
msg = f"sucess change model from {model_name} to {new_model_name}"
logger.info(msg)
return {"code": 200, "msg": msg}
else:
msg = f"failed change model from {model_name} to {new_model_name}"
logger.error(msg)
return {"code": 500, "msg": msg}
else:
msg = f"sucess to release model: {model_name}"
logger.info(msg)
return {"code": 200, "msg": msg}
host = FSCHAT_CONTROLLER["host"]
port = FSCHAT_CONTROLLER["port"]
if log_level == "ERROR":
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
uvicorn.run(app, host=host, port=port, log_level=log_level.lower())
def run_model_worker(
model_name: str = LLM_MODEL,
controller_address: str = "",
q: Queue = None,
run_seq: int = 2,
log_level: str = "INFO",
):
import uvicorn
from fastapi import Body
import sys
kwargs = get_model_worker_config(model_name)
host = kwargs.pop("host")
port = kwargs.pop("port")
kwargs["model_names"] = [model_name]
kwargs["controller_address"] = controller_address or fschat_controller_address()
kwargs["worker_address"] = fschat_model_worker_address(model_name)
model_path = kwargs.get("local_model_path", "")
kwargs["model_path"] = model_path
app = create_model_worker_app(log_level=log_level, **kwargs)
_set_app_seq(app, q, run_seq)
if log_level == "ERROR":
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
# add interface to release and load model
@app.post("/release")
def release_model(
new_model_name: str = Body(None, description="释放后加载该模型"),
keep_origin: bool = Body(False, description="不释放原模型,加载新模型")
) -> Dict:
if keep_origin:
if new_model_name:
q.put(["start", new_model_name])
else:
if new_model_name:
q.put(["replace", new_model_name])
else:
q.put(["stop"])
return {"code": 200, "msg": "done"}
uvicorn.run(app, host=host, port=port, log_level=log_level.lower())
def run_openai_api(q: Queue, run_seq: int = 3, log_level: str = "INFO"):
import uvicorn
import sys
controller_addr = fschat_controller_address()
app = create_openai_api_app(controller_addr, log_level=log_level) # TODO: not support keys yet.
_set_app_seq(app, q, run_seq)
host = FSCHAT_OPENAI_API["host"]
port = FSCHAT_OPENAI_API["port"]
if log_level == "ERROR":
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
uvicorn.run(app, host=host, port=port)
def run_api_server(q: Queue, run_seq: int = 4):
from server.api import create_app
import uvicorn
app = create_app()
_set_app_seq(app, q, run_seq)
host = API_SERVER["host"]
port = API_SERVER["port"]
uvicorn.run(app, host=host, port=port)
def run_webui(q: Queue, run_seq: int = 5):
host = WEBUI_SERVER["host"]
port = WEBUI_SERVER["port"]
if q is not None and isinstance(run_seq, int):
while True:
no = q.get()
if no != run_seq - 1:
q.put(no)
else:
break
q.put(run_seq)
p = subprocess.Popen(["streamlit", "run", "webui.py",
"--server.address", host,
"--server.port", str(port)])
p.wait()
def parse_args() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument(
"-a",
"--all-webui",
action="store_true",
help="run fastchat's controller/openai_api/model_worker servers, run api.py and webui.py",
dest="all_webui",
)
parser.add_argument(
"--all-api",
action="store_true",
help="run fastchat's controller/openai_api/model_worker servers, run api.py",
dest="all_api",
)
parser.add_argument(
"--llm-api",
action="store_true",
help="run fastchat's controller/openai_api/model_worker servers",
dest="llm_api",
)
parser.add_argument(
"-o",
"--openai-api",
action="store_true",
help="run fastchat's controller/openai_api servers",
dest="openai_api",
)
parser.add_argument(
"-m",
"--model-worker",
action="store_true",
help="run fastchat's model_worker server with specified model name. specify --model-name if not using default LLM_MODEL",
dest="model_worker",
)
parser.add_argument(
"-n",
"--model-name",
type=str,
default=LLM_MODEL,
help="specify model name for model worker.",
dest="model_name",
)
parser.add_argument(
"-c",
"--controller",
type=str,
help="specify controller address the worker is registered to. default is server_config.FSCHAT_CONTROLLER",
dest="controller_address",
)
parser.add_argument(
"--api",
action="store_true",
help="run api.py server",
dest="api",
)
parser.add_argument(
"-p",
"--api-worker",
action="store_true",
help="run online model api such as zhipuai",
dest="api_worker",
)
parser.add_argument(
"-w",
"--webui",
action="store_true",
help="run webui.py server",
dest="webui",
)
parser.add_argument(
"-q",
"--quiet",
action="store_true",
help="减少fastchat服务log信息",
dest="quiet",
)
args = parser.parse_args()
return args, parser
def dump_server_info(after_start=False, args=None):
import platform
import langchain
import fastchat
from server.utils import api_address, webui_address
print("\n")
print("=" * 30 + "Langchain-Chatchat Configuration" + "=" * 30)
print(f"操作系统:{platform.platform()}.")
print(f"python版本:{sys.version}")
print(f"项目版本:{VERSION}")
print(f"langchain版本:{langchain.__version__}. fastchat版本:{fastchat.__version__}")
print("\n")
model = LLM_MODEL
if args and args.model_name:
model = args.model_name
print(f"当前LLM模型:{model} @ {llm_device()}")
pprint(llm_model_dict[model])
print(f"当前Embbedings模型: {EMBEDDING_MODEL} @ {embedding_device()}")
if after_start:
print("\n")
print(f"服务端运行信息:")
if args.openai_api:
print(f" OpenAI API Server: {fschat_openai_api_address()}/v1")
print(" (请确认llm_model_dict中配置的api_base_url与上面地址一致。)")
if args.api:
print(f" Chatchat API Server: {api_address()}")
if args.webui:
print(f" Chatchat WEBUI Server: {webui_address()}")
print("=" * 30 + "Langchain-Chatchat Configuration" + "=" * 30)
print("\n")
async def start_main_server():
import time
import signal
def handler(signalname):
"""
Python 3.9 has `signal.strsignal(signalnum)` so this closure would not be needed.
Also, 3.8 includes `signal.valid_signals()` that can be used to create a mapping for the same purpose.
"""
def f(signal_received, frame):
raise KeyboardInterrupt(f"{signalname} received")
return f
# This will be inherited by the child process if it is forked (not spawned)
signal.signal(signal.SIGINT, handler("SIGINT"))
signal.signal(signal.SIGTERM, handler("SIGTERM"))
mp.set_start_method("spawn")
manager = mp.Manager()
queue = manager.Queue()
args, parser = parse_args()
if args.all_webui:
args.openai_api = True
args.model_worker = True
args.api = True
args.api_worker = True
args.webui = True
elif args.all_api:
args.openai_api = True
args.model_worker = True
args.api = True
args.api_worker = True
args.webui = False
elif args.llm_api:
args.openai_api = True
args.model_worker = True
args.api_worker = True
args.api = False
args.webui = False
dump_server_info(args=args)
if len(sys.argv) > 1:
logger.info(f"正在启动服务:")
logger.info(f"如需查看 llm_api 日志,请前往 {LOG_PATH}")
processes = {"online-api": []}
def process_count():
return len(processes) + len(processes["online-api"]) - 1
if args.quiet:
log_level = "ERROR"
else:
log_level = "INFO"
controller_started = manager.Event()
if args.openai_api:
process = Process(
target=run_controller,
name=f"controller",
args=(queue, process_count() + 1, log_level, controller_started),
daemon=True,
)
processes["controller"] = process
process = Process(
target=run_openai_api,
name=f"openai_api",
args=(queue, process_count() + 1),
daemon=True,
)
processes["openai_api"] = process
if args.model_worker:
config = get_model_worker_config(args.model_name)
if not config.get("online_api"):
process = Process(
target=run_model_worker,
name=f"model_worker - {args.model_name}",
args=(args.model_name, args.controller_address, queue, process_count() + 1, log_level),
daemon=True,
)
processes["model_worker"] = process
if args.api_worker:
configs = get_all_model_worker_configs()
for model_name, config in configs.items():
if config.get("online_api") and config.get("worker_class"):
process = Process(
target=run_model_worker,
name=f"model_worker - {model_name}",
args=(model_name, args.controller_address, queue, process_count() + 1, log_level),
daemon=True,
)
processes["online-api"].append(process)
if args.api:
process = Process(
target=run_api_server,
name=f"API Server",
args=(queue, process_count() + 1),
daemon=True,
)
processes["api"] = process
if args.webui:
process = Process(
target=run_webui,
name=f"WEBUI Server",
args=(queue, process_count() + 1),
daemon=True,
)
processes["webui"] = process
if process_count() == 0:
parser.print_help()
else:
try:
# 保证任务收到SIGINT后,能够正常退出
if p:= processes.get("controller"):
p.start()
p.name = f"{p.name} ({p.pid})"
controller_started.wait()
if p:= processes.get("openai_api"):
p.start()
p.name = f"{p.name} ({p.pid})"
if p:= processes.get("model_worker"):
p.start()
p.name = f"{p.name} ({p.pid})"
for p in processes.get("online-api", []):
p.start()
p.name = f"{p.name} ({p.pid})"
if p:= processes.get("api"):
p.start()
p.name = f"{p.name} ({p.pid})"
if p:= processes.get("webui"):
p.start()
p.name = f"{p.name} ({p.pid})"
while True:
no = queue.get()
if no == process_count():
time.sleep(0.5)
dump_server_info(after_start=True, args=args)
break
else:
queue.put(no)
if model_worker_process := processes.get("model_worker"):
model_worker_process.join()
for process in processes.get("online-api", []):
process.join()
for name, process in processes.items():
if name not in ["model_worker", "online-api"]:
if isinstance(p, list):
for work_process in p:
work_process.join()
else:
process.join()
except Exception as e:
# if model_worker_process := processes.pop("model_worker", None):
# model_worker_process.terminate()
# for process in processes.pop("online-api", []):
# process.terminate()
# for process in processes.values():
#
# if isinstance(process, list):
# for work_process in process:
# work_process.terminate()
# else:
# process.terminate()
logger.error(e)
logger.warning("Caught KeyboardInterrupt! Setting stop event...")
finally:
# Send SIGINT if process doesn't exit quickly enough, and kill it as last resort
# .is_alive() also implicitly joins the process (good practice in linux)
# while alive_procs := [p for p in processes.values() if p.is_alive()]:
for p in processes.values():
logger.warning("Sending SIGKILL to %s", p)
# Queues and other inter-process communication primitives can break when
# process is killed, but we don't care here
if isinstance(p, list):
for process in p:
process.kill()
else:
p.kill()
for p in processes.values():
logger.info("Process status: %s", p)
if __name__ == "__main__":
if sys.version_info < (3, 10):
loop = asyncio.get_event_loop()
else:
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
# 同步调用协程代码
loop.run_until_complete(start_main_server())
# 服务启动后接口调用示例:
# import openai
# openai.api_key = "EMPTY" # Not support yet
# openai.api_base = "http://localhost:8888/v1"
# model = "chatglm2-6b"
# # create a chat completion
# completion = openai.ChatCompletion.create(
# model=model,
# messages=[{"role": "user", "content": "Hello! What is your name?"}]
# )
# # print the completion
# print(completion.choices[0].message.content)
| [] |
2024-01-10 | qinb/Langchain-Chatchat | document_loaders~mypdfloader.py | from typing import List
from langchain.document_loaders.unstructured import UnstructuredFileLoader
class RapidOCRPDFLoader(UnstructuredFileLoader):
def _get_elements(self) -> List:
def pdf2text(filepath):
import fitz # pyMuPDF里面的fitz包,不要与pip install fitz混淆
from rapidocr_onnxruntime import RapidOCR
import numpy as np
ocr = RapidOCR()
doc = fitz.open(filepath)
resp = ""
for page in doc:
# TODO: 依据文本与图片顺序调整处理方式
text = page.get_text("")
resp += text + "\n"
img_list = page.get_images()
for img in img_list:
pix = fitz.Pixmap(doc, img[0])
img_array = np.frombuffer(pix.samples, dtype=np.uint8).reshape(pix.height, pix.width, -1)
result, _ = ocr(img_array)
if result:
ocr_result = [line[1] for line in result]
resp += "\n".join(ocr_result)
return resp
text = pdf2text(self.file_path)
from unstructured.partition.text import partition_text
return partition_text(text=text, **self.unstructured_kwargs)
if __name__ == "__main__":
loader = RapidOCRPDFLoader(file_path="../tests/samples/ocr_test.pdf")
docs = loader.load()
print(docs)
| [] |
2024-01-10 | qinb/Langchain-Chatchat | server~knowledge_base~kb_doc_api.py | import os
import urllib
from fastapi import File, Form, Body, Query, UploadFile
from configs.model_config import (DEFAULT_VS_TYPE, EMBEDDING_MODEL, VECTOR_SEARCH_TOP_K, SCORE_THRESHOLD)
from server.utils import BaseResponse, ListResponse
from server.knowledge_base.utils import validate_kb_name, list_files_from_folder, KnowledgeFile
from fastapi.responses import StreamingResponse, FileResponse
import json
from server.knowledge_base.kb_service.base import KBServiceFactory
from typing import List, Dict
from langchain.docstore.document import Document
class DocumentWithScore(Document):
score: float = None
def search_docs(query: str = Body(..., description="用户输入", examples=["你好"]),
knowledge_base_name: str = Body(..., description="知识库名称", examples=["samples"]),
top_k: int = Body(VECTOR_SEARCH_TOP_K, description="匹配向量数"),
score_threshold: float = Body(SCORE_THRESHOLD, description="知识库匹配相关度阈值,取值范围在0-1之间,SCORE越小,相关度越高,取到1相当于不筛选,建议设置在0.5左右", ge=0, le=1),
) -> List[DocumentWithScore]:
kb = KBServiceFactory.get_service_by_name(knowledge_base_name)
if kb is None:
return []
docs = kb.search_docs(query, top_k, score_threshold)
data = [DocumentWithScore(**x[0].dict(), score=x[1]) for x in docs]
return data
async def list_files(
knowledge_base_name: str
) -> ListResponse:
if not validate_kb_name(knowledge_base_name):
return ListResponse(code=403, msg="Don't attack me", data=[])
knowledge_base_name = urllib.parse.unquote(knowledge_base_name)
kb = KBServiceFactory.get_service_by_name(knowledge_base_name)
if kb is None:
return ListResponse(code=404, msg=f"未找到知识库 {knowledge_base_name}", data=[])
else:
all_doc_names = kb.list_files()
return ListResponse(data=all_doc_names)
async def upload_doc(file: UploadFile = File(..., description="上传文件"),
knowledge_base_name: str = Form(..., description="知识库名称", examples=["kb1"]),
override: bool = Form(False, description="覆盖已有文件"),
not_refresh_vs_cache: bool = Form(False, description="暂不保存向量库(用于FAISS)"),
) -> BaseResponse:
if not validate_kb_name(knowledge_base_name):
return BaseResponse(code=403, msg="Don't attack me")
kb = KBServiceFactory.get_service_by_name(knowledge_base_name)
if kb is None:
return BaseResponse(code=404, msg=f"未找到知识库 {knowledge_base_name}")
file_content = await file.read() # 读取上传文件的内容
try:
kb_file = KnowledgeFile(filename=file.filename,
knowledge_base_name=knowledge_base_name)
if (os.path.exists(kb_file.filepath)
and not override
and os.path.getsize(kb_file.filepath) == len(file_content)
):
# TODO: filesize 不同后的处理
file_status = f"文件 {kb_file.filename} 已存在。"
return BaseResponse(code=404, msg=file_status)
with open(kb_file.filepath, "wb") as f:
f.write(file_content)
except Exception as e:
print(e)
return BaseResponse(code=500, msg=f"{kb_file.filename} 文件上传失败,报错信息为: {e}")
try:
kb.add_doc(kb_file, not_refresh_vs_cache=not_refresh_vs_cache)
except Exception as e:
print(e)
return BaseResponse(code=500, msg=f"{kb_file.filename} 文件向量化失败,报错信息为: {e}")
return BaseResponse(code=200, msg=f"成功上传文件 {kb_file.filename}")
async def delete_doc(knowledge_base_name: str = Body(..., examples=["samples"]),
doc_name: str = Body(..., examples=["file_name.md"]),
delete_content: bool = Body(False),
not_refresh_vs_cache: bool = Body(False, description="暂不保存向量库(用于FAISS)"),
) -> BaseResponse:
if not validate_kb_name(knowledge_base_name):
return BaseResponse(code=403, msg="Don't attack me")
knowledge_base_name = urllib.parse.unquote(knowledge_base_name)
kb = KBServiceFactory.get_service_by_name(knowledge_base_name)
if kb is None:
return BaseResponse(code=404, msg=f"未找到知识库 {knowledge_base_name}")
if not kb.exist_doc(doc_name):
return BaseResponse(code=404, msg=f"未找到文件 {doc_name}")
try:
kb_file = KnowledgeFile(filename=doc_name,
knowledge_base_name=knowledge_base_name)
kb.delete_doc(kb_file, delete_content, not_refresh_vs_cache=not_refresh_vs_cache)
except Exception as e:
print(e)
return BaseResponse(code=500, msg=f"{kb_file.filename} 文件删除失败,错误信息:{e}")
return BaseResponse(code=200, msg=f"{kb_file.filename} 文件删除成功")
async def update_doc(
knowledge_base_name: str = Body(..., examples=["samples"]),
file_name: str = Body(..., examples=["file_name"]),
not_refresh_vs_cache: bool = Body(False, description="暂不保存向量库(用于FAISS)"),
) -> BaseResponse:
'''
更新知识库文档
'''
if not validate_kb_name(knowledge_base_name):
return BaseResponse(code=403, msg="Don't attack me")
kb = KBServiceFactory.get_service_by_name(knowledge_base_name)
if kb is None:
return BaseResponse(code=404, msg=f"未找到知识库 {knowledge_base_name}")
try:
kb_file = KnowledgeFile(filename=file_name,
knowledge_base_name=knowledge_base_name)
if os.path.exists(kb_file.filepath):
kb.update_doc(kb_file, not_refresh_vs_cache=not_refresh_vs_cache)
return BaseResponse(code=200, msg=f"成功更新文件 {kb_file.filename}")
except Exception as e:
print(e)
return BaseResponse(code=500, msg=f"{kb_file.filename} 文件更新失败,错误信息是:{e}")
return BaseResponse(code=500, msg=f"{kb_file.filename} 文件更新失败")
async def download_doc(
knowledge_base_name: str = Query(..., examples=["samples"]),
file_name: str = Query(..., examples=["test.txt"]),
):
'''
下载知识库文档
'''
if not validate_kb_name(knowledge_base_name):
return BaseResponse(code=403, msg="Don't attack me")
kb = KBServiceFactory.get_service_by_name(knowledge_base_name)
if kb is None:
return BaseResponse(code=404, msg=f"未找到知识库 {knowledge_base_name}")
try:
kb_file = KnowledgeFile(filename=file_name,
knowledge_base_name=knowledge_base_name)
if os.path.exists(kb_file.filepath):
return FileResponse(
path=kb_file.filepath,
filename=kb_file.filename,
media_type="multipart/form-data")
except Exception as e:
print(e)
return BaseResponse(code=500, msg=f"{kb_file.filename} 读取文件失败,错误信息是:{e}")
return BaseResponse(code=500, msg=f"{kb_file.filename} 读取文件失败")
async def recreate_vector_store(
knowledge_base_name: str = Body(..., examples=["samples"]),
allow_empty_kb: bool = Body(True),
vs_type: str = Body(DEFAULT_VS_TYPE),
embed_model: str = Body(EMBEDDING_MODEL),
):
'''
recreate vector store from the content.
this is usefull when user can copy files to content folder directly instead of upload through network.
by default, get_service_by_name only return knowledge base in the info.db and having document files in it.
set allow_empty_kb to True make it applied on empty knowledge base which it not in the info.db or having no documents.
'''
def output():
kb = KBServiceFactory.get_service(knowledge_base_name, vs_type, embed_model)
if not kb.exists() and not allow_empty_kb:
yield {"code": 404, "msg": f"未找到知识库 ‘{knowledge_base_name}’"}
else:
kb.create_kb()
kb.clear_vs()
docs = list_files_from_folder(knowledge_base_name)
for i, doc in enumerate(docs):
try:
kb_file = KnowledgeFile(doc, knowledge_base_name)
yield json.dumps({
"code": 200,
"msg": f"({i + 1} / {len(docs)}): {doc}",
"total": len(docs),
"finished": i,
"doc": doc,
}, ensure_ascii=False)
if i == len(docs) - 1:
not_refresh_vs_cache = False
else:
not_refresh_vs_cache = True
kb.add_doc(kb_file, not_refresh_vs_cache=not_refresh_vs_cache)
except Exception as e:
print(e)
yield json.dumps({
"code": 500,
"msg": f"添加文件‘{doc}’到知识库‘{knowledge_base_name}’时出错:{e}。已跳过。",
})
return StreamingResponse(output(), media_type="text/event-stream")
| [] |
2024-01-10 | qinb/Langchain-Chatchat | server~knowledge_base~kb_service~faiss_kb_service.py | import os
import shutil
from configs.model_config import (
KB_ROOT_PATH,
CACHED_VS_NUM,
EMBEDDING_MODEL,
SCORE_THRESHOLD
)
from server.knowledge_base.kb_service.base import KBService, SupportedVSType
from functools import lru_cache
from server.knowledge_base.utils import get_vs_path, load_embeddings, KnowledgeFile
from langchain.vectorstores import FAISS
from langchain.embeddings.base import Embeddings
from typing import List, Dict, Optional
from langchain.docstore.document import Document
from server.utils import torch_gc, embedding_device
_VECTOR_STORE_TICKS = {}
@lru_cache(CACHED_VS_NUM)
def load_faiss_vector_store(
knowledge_base_name: str,
embed_model: str = EMBEDDING_MODEL,
embed_device: str = embedding_device(),
embeddings: Embeddings = None,
tick: int = 0, # tick will be changed by upload_doc etc. and make cache refreshed.
) -> FAISS:
print(f"loading vector store in '{knowledge_base_name}'.")
vs_path = get_vs_path(knowledge_base_name)
if embeddings is None:
embeddings = load_embeddings(embed_model, embed_device)
if not os.path.exists(vs_path):
os.makedirs(vs_path)
if "index.faiss" in os.listdir(vs_path):
search_index = FAISS.load_local(vs_path, embeddings, normalize_L2=True)
else:
# create an empty vector store
doc = Document(page_content="init", metadata={})
search_index = FAISS.from_documents([doc], embeddings, normalize_L2=True)
ids = [k for k, v in search_index.docstore._dict.items()]
search_index.delete(ids)
search_index.save_local(vs_path)
if tick == 0: # vector store is loaded first time
_VECTOR_STORE_TICKS[knowledge_base_name] = 0
return search_index
def refresh_vs_cache(kb_name: str):
"""
make vector store cache refreshed when next loading
"""
_VECTOR_STORE_TICKS[kb_name] = _VECTOR_STORE_TICKS.get(kb_name, 0) + 1
print(f"知识库 {kb_name} 缓存刷新:{_VECTOR_STORE_TICKS[kb_name]}")
class FaissKBService(KBService):
vs_path: str
kb_path: str
def vs_type(self) -> str:
return SupportedVSType.FAISS
def get_vs_path(self):
return os.path.join(self.get_kb_path(), "vector_store")
def get_kb_path(self):
return os.path.join(KB_ROOT_PATH, self.kb_name)
def load_vector_store(self) -> FAISS:
return load_faiss_vector_store(
knowledge_base_name=self.kb_name,
embed_model=self.embed_model,
tick=_VECTOR_STORE_TICKS.get(self.kb_name, 0),
)
def save_vector_store(self, vector_store: FAISS = None):
vector_store = vector_store or self.load_vector_store()
vector_store.save_local(self.vs_path)
return vector_store
def refresh_vs_cache(self):
refresh_vs_cache(self.kb_name)
def get_doc_by_id(self, id: str) -> Optional[Document]:
vector_store = self.load_vector_store()
return vector_store.docstore._dict.get(id)
def do_init(self):
self.kb_path = self.get_kb_path()
self.vs_path = self.get_vs_path()
def do_create_kb(self):
if not os.path.exists(self.vs_path):
os.makedirs(self.vs_path)
self.load_vector_store()
def do_drop_kb(self):
self.clear_vs()
shutil.rmtree(self.kb_path)
def do_search(self,
query: str,
top_k: int,
score_threshold: float = SCORE_THRESHOLD,
embeddings: Embeddings = None,
) -> List[Document]:
search_index = self.load_vector_store()
docs = search_index.similarity_search_with_score(query, k=top_k, score_threshold=score_threshold)
return docs
def do_add_doc(self,
docs: List[Document],
**kwargs,
) -> List[Dict]:
vector_store = self.load_vector_store()
ids = vector_store.add_documents(docs)
doc_infos = [{"id": id, "metadata": doc.metadata} for id, doc in zip(ids, docs)]
torch_gc()
if not kwargs.get("not_refresh_vs_cache"):
vector_store.save_local(self.vs_path)
self.refresh_vs_cache()
return doc_infos
def do_delete_doc(self,
kb_file: KnowledgeFile,
**kwargs):
vector_store = self.load_vector_store()
ids = [k for k, v in vector_store.docstore._dict.items() if v.metadata["source"] == kb_file.filepath]
if len(ids) == 0:
return None
vector_store.delete(ids)
if not kwargs.get("not_refresh_vs_cache"):
vector_store.save_local(self.vs_path)
self.refresh_vs_cache()
return vector_store
def do_clear_vs(self):
shutil.rmtree(self.vs_path)
os.makedirs(self.vs_path)
self.refresh_vs_cache()
def exist_doc(self, file_name: str):
if super().exist_doc(file_name):
return "in_db"
content_path = os.path.join(self.kb_path, "content")
if os.path.isfile(os.path.join(content_path, file_name)):
return "in_folder"
else:
return False
if __name__ == '__main__':
faissService = FaissKBService("test")
faissService.add_doc(KnowledgeFile("README.md", "test"))
faissService.delete_doc(KnowledgeFile("README.md", "test"))
faissService.do_drop_kb()
print(faissService.search_docs("如何启动api服务")) | [] |
2024-01-10 | qinb/Langchain-Chatchat | server~knowledge_base~kb_service~milvus_kb_service.py | from typing import List, Dict, Optional
import numpy as np
from faiss import normalize_L2
from langchain.embeddings.base import Embeddings
from langchain.schema import Document
from langchain.vectorstores import Milvus
from sklearn.preprocessing import normalize
from configs.model_config import SCORE_THRESHOLD, kbs_config
from server.knowledge_base.kb_service.base import KBService, SupportedVSType, EmbeddingsFunAdapter, \
score_threshold_process
from server.knowledge_base.utils import KnowledgeFile
class MilvusKBService(KBService):
milvus: Milvus
@staticmethod
def get_collection(milvus_name):
from pymilvus import Collection
return Collection(milvus_name)
def get_doc_by_id(self, id: str) -> Optional[Document]:
if self.milvus.col:
data_list = self.milvus.col.query(expr=f'pk == {id}', output_fields=["*"])
if len(data_list) > 0:
data = data_list[0]
text = data.pop("text")
return Document(page_content=text, metadata=data)
@staticmethod
def search(milvus_name, content, limit=3):
search_params = {
"metric_type": "L2",
"params": {"nprobe": 10},
}
c = MilvusKBService.get_collection(milvus_name)
return c.search(content, "embeddings", search_params, limit=limit, output_fields=["content"])
def do_create_kb(self):
pass
def vs_type(self) -> str:
return SupportedVSType.MILVUS
def _load_milvus(self, embeddings: Embeddings = None):
if embeddings is None:
embeddings = self._load_embeddings()
self.milvus = Milvus(embedding_function=EmbeddingsFunAdapter(embeddings),
collection_name=self.kb_name, connection_args=kbs_config.get("milvus"))
def do_init(self):
self._load_milvus()
def do_drop_kb(self):
if self.milvus.col:
self.milvus.col.drop()
def do_search(self, query: str, top_k: int, score_threshold: float, embeddings: Embeddings):
self._load_milvus(embeddings=EmbeddingsFunAdapter(embeddings))
return score_threshold_process(score_threshold, top_k, self.milvus.similarity_search_with_score(query, top_k))
def do_add_doc(self, docs: List[Document], **kwargs) -> List[Dict]:
ids = self.milvus.add_documents(docs)
doc_infos = [{"id": id, "metadata": doc.metadata} for id, doc in zip(ids, docs)]
return doc_infos
def do_delete_doc(self, kb_file: KnowledgeFile, **kwargs):
if self.milvus.col:
filepath = kb_file.filepath.replace('\\', '\\\\')
delete_list = [item.get("pk") for item in
self.milvus.col.query(expr=f'source == "{filepath}"', output_fields=["pk"])]
self.milvus.col.delete(expr=f'pk in {delete_list}')
def do_clear_vs(self):
if self.milvus.col:
self.milvus.col.drop()
if __name__ == '__main__':
# 测试建表使用
from server.db.base import Base, engine
Base.metadata.create_all(bind=engine)
milvusService = MilvusKBService("test")
# milvusService.add_doc(KnowledgeFile("README.md", "test"))
print(milvusService.get_doc_by_id("444022434274215486"))
# milvusService.delete_doc(KnowledgeFile("README.md", "test"))
# milvusService.do_drop_kb()
# print(milvusService.search_docs("如何启动api服务"))
| [] |
2024-01-10 | qinb/Langchain-Chatchat | server~llm_api.py | from multiprocessing import Process, Queue
import multiprocessing as mp
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from configs.model_config import llm_model_dict, LLM_MODEL, LOG_PATH, logger
from server.utils import MakeFastAPIOffline, set_httpx_timeout, llm_device
host_ip = "0.0.0.0"
controller_port = 20001
model_worker_port = 20002
openai_api_port = 8888
base_url = "http://127.0.0.1:{}"
def create_controller_app(
dispatch_method="shortest_queue",
):
import fastchat.constants
fastchat.constants.LOGDIR = LOG_PATH
from fastchat.serve.controller import app, Controller
controller = Controller(dispatch_method)
sys.modules["fastchat.serve.controller"].controller = controller
MakeFastAPIOffline(app)
app.title = "FastChat Controller"
return app
def create_model_worker_app(
worker_address=base_url.format(model_worker_port),
controller_address=base_url.format(controller_port),
model_path=llm_model_dict[LLM_MODEL].get("local_model_path"),
device=llm_device(),
gpus=None,
max_gpu_memory="20GiB",
load_8bit=False,
cpu_offloading=None,
gptq_ckpt=None,
gptq_wbits=16,
gptq_groupsize=-1,
gptq_act_order=False,
awq_ckpt=None,
awq_wbits=16,
awq_groupsize=-1,
model_names=[LLM_MODEL],
num_gpus=1, # not in fastchat
conv_template=None,
limit_worker_concurrency=5,
stream_interval=2,
no_register=False,
):
import fastchat.constants
fastchat.constants.LOGDIR = LOG_PATH
from fastchat.serve.model_worker import app, GptqConfig, AWQConfig, ModelWorker, worker_id
import argparse
import threading
import fastchat.serve.model_worker
# workaround to make program exit with Ctrl+c
# it should be deleted after pr is merged by fastchat
def _new_init_heart_beat(self):
self.register_to_controller()
self.heart_beat_thread = threading.Thread(
target=fastchat.serve.model_worker.heart_beat_worker, args=(self,), daemon=True,
)
self.heart_beat_thread.start()
ModelWorker.init_heart_beat = _new_init_heart_beat
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.model_path = model_path
args.model_names = model_names
args.device = device
args.load_8bit = load_8bit
args.gptq_ckpt = gptq_ckpt
args.gptq_wbits = gptq_wbits
args.gptq_groupsize = gptq_groupsize
args.gptq_act_order = gptq_act_order
args.awq_ckpt = awq_ckpt
args.awq_wbits = awq_wbits
args.awq_groupsize = awq_groupsize
args.gpus = gpus
args.num_gpus = num_gpus
args.max_gpu_memory = max_gpu_memory
args.cpu_offloading = cpu_offloading
args.worker_address = worker_address
args.controller_address = controller_address
args.conv_template = conv_template
args.limit_worker_concurrency = limit_worker_concurrency
args.stream_interval = stream_interval
args.no_register = no_register
if args.gpus:
if len(args.gpus.split(",")) < args.num_gpus:
raise ValueError(
f"Larger --num-gpus ({args.num_gpus}) than --gpus {args.gpus}!"
)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
if gpus and num_gpus is None:
num_gpus = len(gpus.split(','))
args.num_gpus = num_gpus
gptq_config = GptqConfig(
ckpt=gptq_ckpt or model_path,
wbits=args.gptq_wbits,
groupsize=args.gptq_groupsize,
act_order=args.gptq_act_order,
)
awq_config = AWQConfig(
ckpt=args.awq_ckpt or args.model_path,
wbits=args.awq_wbits,
groupsize=args.awq_groupsize,
)
# torch.multiprocessing.set_start_method('spawn')
worker = ModelWorker(
controller_addr=args.controller_address,
worker_addr=args.worker_address,
worker_id=worker_id,
model_path=args.model_path,
model_names=args.model_names,
limit_worker_concurrency=args.limit_worker_concurrency,
no_register=args.no_register,
device=args.device,
num_gpus=args.num_gpus,
max_gpu_memory=args.max_gpu_memory,
load_8bit=args.load_8bit,
cpu_offloading=args.cpu_offloading,
gptq_config=gptq_config,
awq_config=awq_config,
stream_interval=args.stream_interval,
conv_template=args.conv_template,
)
sys.modules["fastchat.serve.model_worker"].worker = worker
sys.modules["fastchat.serve.model_worker"].args = args
sys.modules["fastchat.serve.model_worker"].gptq_config = gptq_config
MakeFastAPIOffline(app)
app.title = f"FastChat LLM Server ({LLM_MODEL})"
return app
def create_openai_api_app(
controller_address=base_url.format(controller_port),
api_keys=[],
):
import fastchat.constants
fastchat.constants.LOGDIR = LOG_PATH
from fastchat.serve.openai_api_server import app, CORSMiddleware, app_settings
app.add_middleware(
CORSMiddleware,
allow_credentials=True,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
app_settings.controller_address = controller_address
app_settings.api_keys = api_keys
MakeFastAPIOffline(app)
app.title = "FastChat OpeanAI API Server"
return app
def run_controller(q):
import uvicorn
app = create_controller_app()
@app.on_event("startup")
async def on_startup():
set_httpx_timeout()
q.put(1)
uvicorn.run(app, host=host_ip, port=controller_port)
def run_model_worker(q, *args, **kwargs):
import uvicorn
app = create_model_worker_app(*args, **kwargs)
@app.on_event("startup")
async def on_startup():
set_httpx_timeout()
while True:
no = q.get()
if no != 1:
q.put(no)
else:
break
q.put(2)
uvicorn.run(app, host=host_ip, port=model_worker_port)
def run_openai_api(q):
import uvicorn
app = create_openai_api_app()
@app.on_event("startup")
async def on_startup():
set_httpx_timeout()
while True:
no = q.get()
if no != 2:
q.put(no)
else:
break
q.put(3)
uvicorn.run(app, host=host_ip, port=openai_api_port)
if __name__ == "__main__":
mp.set_start_method("spawn")
queue = Queue()
logger.info(llm_model_dict[LLM_MODEL])
model_path = llm_model_dict[LLM_MODEL]["local_model_path"]
logger.info(f"如需查看 llm_api 日志,请前往 {LOG_PATH}")
if not model_path:
logger.error("local_model_path 不能为空")
else:
controller_process = Process(
target=run_controller,
name=f"controller({os.getpid()})",
args=(queue,),
daemon=True,
)
controller_process.start()
model_worker_process = Process(
target=run_model_worker,
name=f"model_worker({os.getpid()})",
args=(queue,),
# kwargs={"load_8bit": True},
daemon=True,
)
model_worker_process.start()
openai_api_process = Process(
target=run_openai_api,
name=f"openai_api({os.getpid()})",
args=(queue,),
daemon=True,
)
openai_api_process.start()
try:
model_worker_process.join()
controller_process.join()
openai_api_process.join()
except KeyboardInterrupt:
model_worker_process.terminate()
controller_process.terminate()
openai_api_process.terminate()
# 服务启动后接口调用示例:
# import openai
# openai.api_key = "EMPTY" # Not support yet
# openai.api_base = "http://localhost:8888/v1"
# model = "chatglm2-6b"
# # create a chat completion
# completion = openai.ChatCompletion.create(
# model=model,
# messages=[{"role": "user", "content": "Hello! What is your name?"}]
# )
# # print the completion
# print(completion.choices[0].message.content)
| [] |
2024-01-10 | qinb/Langchain-Chatchat | server~knowledge_base~kb_service~pg_kb_service.py | import json
from typing import List, Dict, Optional
from langchain.embeddings.base import Embeddings
from langchain.schema import Document
from langchain.vectorstores import PGVector
from langchain.vectorstores.pgvector import DistanceStrategy
from sqlalchemy import text
from configs.model_config import EMBEDDING_DEVICE, kbs_config
from server.knowledge_base.kb_service.base import SupportedVSType, KBService, EmbeddingsFunAdapter, \
score_threshold_process
from server.knowledge_base.utils import load_embeddings, KnowledgeFile
from server.utils import embedding_device as get_embedding_device
class PGKBService(KBService):
pg_vector: PGVector
def _load_pg_vector(self, embedding_device: str = get_embedding_device(), embeddings: Embeddings = None):
_embeddings = embeddings
if _embeddings is None:
_embeddings = load_embeddings(self.embed_model, embedding_device)
self.pg_vector = PGVector(embedding_function=EmbeddingsFunAdapter(_embeddings),
collection_name=self.kb_name,
distance_strategy=DistanceStrategy.EUCLIDEAN,
connection_string=kbs_config.get("pg").get("connection_uri"))
def get_doc_by_id(self, id: str) -> Optional[Document]:
with self.pg_vector.connect() as connect:
stmt = text("SELECT document, cmetadata FROM langchain_pg_embedding WHERE collection_id=:id")
results = [Document(page_content=row[0], metadata=row[1]) for row in
connect.execute(stmt, parameters={'id': id}).fetchall()]
if len(results) > 0:
return results[0]
def do_init(self):
self._load_pg_vector()
def do_create_kb(self):
pass
def vs_type(self) -> str:
return SupportedVSType.PG
def do_drop_kb(self):
with self.pg_vector.connect() as connect:
connect.execute(text(f'''
-- 删除 langchain_pg_embedding 表中关联到 langchain_pg_collection 表中 的记录
DELETE FROM langchain_pg_embedding
WHERE collection_id IN (
SELECT uuid FROM langchain_pg_collection WHERE name = '{self.kb_name}'
);
-- 删除 langchain_pg_collection 表中 记录
DELETE FROM langchain_pg_collection WHERE name = '{self.kb_name}';
'''))
connect.commit()
def do_search(self, query: str, top_k: int, score_threshold: float, embeddings: Embeddings):
self._load_pg_vector(embeddings=embeddings)
return score_threshold_process(score_threshold, top_k,
self.pg_vector.similarity_search_with_score(query, top_k))
def do_add_doc(self, docs: List[Document], **kwargs) -> List[Dict]:
ids = self.pg_vector.add_documents(docs)
doc_infos = [{"id": id, "metadata": doc.metadata} for id, doc in zip(ids, docs)]
return doc_infos
def do_delete_doc(self, kb_file: KnowledgeFile, **kwargs):
with self.pg_vector.connect() as connect:
filepath = kb_file.filepath.replace('\\', '\\\\')
connect.execute(
text(
''' DELETE FROM langchain_pg_embedding WHERE cmetadata::jsonb @> '{"source": "filepath"}'::jsonb;'''.replace(
"filepath", filepath)))
connect.commit()
def do_clear_vs(self):
self.pg_vector.delete_collection()
if __name__ == '__main__':
from server.db.base import Base, engine
# Base.metadata.create_all(bind=engine)
pGKBService = PGKBService("test")
# pGKBService.create_kb()
# pGKBService.add_doc(KnowledgeFile("README.md", "test"))
# pGKBService.delete_doc(KnowledgeFile("README.md", "test"))
# pGKBService.drop_kb()
print(pGKBService.get_doc_by_id("f1e51390-3029-4a19-90dc-7118aaa25772"))
# print(pGKBService.search_docs("如何启动api服务"))
| [] |
2024-01-10 | qinb/Langchain-Chatchat | server~chat~knowledge_base_chat.py | from fastapi import Body, Request
from fastapi.responses import StreamingResponse
from configs.model_config import (llm_model_dict, LLM_MODEL, PROMPT_TEMPLATE,
VECTOR_SEARCH_TOP_K, SCORE_THRESHOLD)
from server.chat.utils import wrap_done
from server.utils import BaseResponse
from langchain.chat_models import ChatOpenAI
from langchain import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable, List, Optional
import asyncio
from langchain.prompts.chat import ChatPromptTemplate
from server.chat.utils import History
from server.knowledge_base.kb_service.base import KBService, KBServiceFactory
import json
import os
from urllib.parse import urlencode
from server.knowledge_base.kb_doc_api import search_docs
def knowledge_base_chat(query: str = Body(..., description="用户输入", examples=["你好"]),
knowledge_base_name: str = Body(..., description="知识库名称", examples=["samples"]),
top_k: int = Body(VECTOR_SEARCH_TOP_K, description="匹配向量数"),
score_threshold: float = Body(SCORE_THRESHOLD, description="知识库匹配相关度阈值,取值范围在0-1之间,SCORE越小,相关度越高,取到1相当于不筛选,建议设置在0.5左右", ge=0, le=1),
history: List[History] = Body([],
description="历史对话",
examples=[[
{"role": "user",
"content": "我们来玩成语接龙,我先来,生龙活虎"},
{"role": "assistant",
"content": "虎头虎脑"}]]
),
stream: bool = Body(False, description="流式输出"),
model_name: str = Body(LLM_MODEL, description="LLM 模型名称。"),
local_doc_url: bool = Body(False, description="知识文件返回本地路径(true)或URL(false)"),
request: Request = None,
):
kb = KBServiceFactory.get_service_by_name(knowledge_base_name)
if kb is None:
return BaseResponse(code=404, msg=f"未找到知识库 {knowledge_base_name}")
history = [History.from_data(h) for h in history]
async def knowledge_base_chat_iterator(query: str,
kb: KBService,
top_k: int,
history: Optional[List[History]],
model_name: str = LLM_MODEL,
) -> AsyncIterable[str]:
callback = AsyncIteratorCallbackHandler()
model = ChatOpenAI(
streaming=True,
verbose=True,
callbacks=[callback],
openai_api_key=llm_model_dict[model_name]["api_key"],
openai_api_base=llm_model_dict[model_name]["api_base_url"],
model_name=model_name,
openai_proxy=llm_model_dict[model_name].get("openai_proxy")
)
docs = search_docs(query, knowledge_base_name, top_k, score_threshold)
context = "\n".join([doc.page_content for doc in docs])
input_msg = History(role="user", content=PROMPT_TEMPLATE).to_msg_template(False)
chat_prompt = ChatPromptTemplate.from_messages(
[i.to_msg_template() for i in history] + [input_msg])
chain = LLMChain(prompt=chat_prompt, llm=model)
# Begin a task that runs in the background.
task = asyncio.create_task(wrap_done(
chain.acall({"context": context, "question": query}),
callback.done),
)
source_documents = []
for inum, doc in enumerate(docs):
filename = os.path.split(doc.metadata["source"])[-1]
if local_doc_url:
url = "file://" + doc.metadata["source"]
else:
parameters = urlencode({"knowledge_base_name": knowledge_base_name, "file_name":filename})
url = f"{request.base_url}knowledge_base/download_doc?" + parameters
text = f"""出处 [{inum + 1}] [{filename}]({url}) \n\n{doc.page_content}\n\n"""
source_documents.append(text)
if stream:
async for token in callback.aiter():
# Use server-sent-events to stream the response
yield json.dumps({"answer": token,
"docs": source_documents},
ensure_ascii=False)
else:
answer = ""
async for token in callback.aiter():
answer += token
yield json.dumps({"answer": answer,
"docs": source_documents},
ensure_ascii=False)
await task
return StreamingResponse(knowledge_base_chat_iterator(query, kb, top_k, history, model_name),
media_type="text/event-stream")
| [
"虎头虎脑",
"我们来玩成语接龙,我先来,生龙活虎"
] |
2024-01-10 | qinb/Langchain-Chatchat | document_loaders~myimgloader.py | from typing import List
from langchain.document_loaders.unstructured import UnstructuredFileLoader
class RapidOCRLoader(UnstructuredFileLoader):
def _get_elements(self) -> List:
def img2text(filepath):
from rapidocr_onnxruntime import RapidOCR
resp = ""
ocr = RapidOCR()
result, _ = ocr(filepath)
if result:
ocr_result = [line[1] for line in result]
resp += "\n".join(ocr_result)
return resp
text = img2text(self.file_path)
from unstructured.partition.text import partition_text
return partition_text(text=text, **self.unstructured_kwargs)
if __name__ == "__main__":
loader = RapidOCRLoader(file_path="../tests/samples/ocr_test.jpg")
docs = loader.load()
print(docs)
| [] |
2024-01-10 | vananh-ng/bigdataproject | archive~test_Be_your_own_DJ_with_MelodyMap.py | import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import streamlit as st
from dotenv import load_dotenv, find_dotenv
import os
from sklearn.neighbors import NearestNeighbors
import plotly.express as px
import streamlit.components.v1 as components
import pandas as pd
import plotly.express as px
from openai import OpenAI
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
# API Management
load_dotenv(find_dotenv(".env"))
# Set the OpenAI API key
# Spotify API Keys
client_id = os.getenv("CLIENT_ID")
client_secret = os.getenv("CLIENT_SECRET")
client_credentials_manager = SpotifyClientCredentials(client_id=client_id, client_secret=client_secret)
sp = spotipy.Spotify(client_credentials_manager = client_credentials_manager)
# COLOURS
spotifyGreen = '#1dda63'
bg_color_cas = "#9bf0e1"
grey = "#979797"
lightgrey = "#bdbdbd"
def main():
# Set page config
st.set_page_config(page_title="Spotify Big Data Project",
#page_icon=":musical_note:",
layout="wide")
title = "Be your own DJ with MelodyMap!"
st.title(title)
st.markdown("##")
st.write("💚 Create your own playlist based on your mood!")
# GPT-based recommendation engine
def get_completion(messages, model="gpt-3.5-turbo", temperature=0.7):
response = client.chat.completions.create(model=model,
messages=messages,
temperature=temperature)
content = response['choices'][0]['message']['content']
return content
def run_model(system_message, user_message):
messages = [
{'role': 'system',
'content': system_message},
{'role': 'user',
'content': user_message}
]
response = get_completion(messages)
return response
system_message = "You are a Spotify playist recommender. Your task is to give users song recommendations based on their mood. Keep your answers to a maximum of 10 songs. Next to each recommendations, provide a URL link to that song on Spotify"
user_message = st.text_input("How's your mood today?")
if st.button("Send"):
response = run_model(system_message, user_message)
st.write(response)
#@st.cache(allow_output_mutation=True)
# Song recommendations based on genre and audio features
@st.cache_data()
def load_data():
df = pd.read_csv('app/data/SpotGenTrack/filtered_track_df.csv')
df['genres'] = df['genres'].apply(lambda x: x.replace("'", "").replace("[", "").replace("]", "").split(", "))
exploded_track_df = df.explode('genres')
return exploded_track_df
genre_names = ['Dance Pop', 'Electronic', 'Electropop', 'Pop', 'Pop Dance', 'Pop Rap', 'Rap', 'Tropical House']
audio_feats = ["acousticness", "danceability", "energy", "instrumentalness", "liveness", "loudness", "speechiness", "tempo", "valence"]
exploded_track_df = load_data()
# knn model
def n_neighbors_uri_audio(genre, test_feat):
genre = genre.lower()
genre_data = exploded_track_df[(exploded_track_df["genres"]==genre)]
genre_data = genre_data.sort_values(by='popularity', ascending=False)[:500]
neigh = NearestNeighbors()
neigh.fit(genre_data[audio_feats].to_numpy())
n_neighbors = neigh.kneighbors([test_feat], n_neighbors=len(genre_data), return_distance=False)[0]
uris = genre_data.iloc[n_neighbors]["uri"].tolist()
audios = genre_data.iloc[n_neighbors][audio_feats].to_numpy()
return uris, audios
### Search for a song ###
def search_song(song_name):
"""
Function to search for a song on Spotify
"""
results = sp.search(q=song_name, limit=1) # limit set to 1 to return only top result
if results['tracks']['items'] == []:
return "Song not found."
track = results['tracks']['items'][0]
song_info = {
'name': track['name'],
'artist': track['artists'][0]['name'],
'album': track['album']['name'],
'release_date': track['album']['release_date'],
'popularity': track['popularity'],
'uri': track['uri'].split(':')[2],
'audio_features': sp.audio_features(track['uri'])[0]
}
return song_info
# Use the sidebar method for the input and button
song_name = st.sidebar.text_input("Enter a Song Name:", value='Viva La Vida')
song_info = search_song(song_name)
if song_info == "Song not found.":
st.sidebar.write(song_info)
else:
st.sidebar.write("Song Name:", song_info['name'])
st.sidebar.write("Artist:", song_info['artist'])
st.sidebar.write("Release Date:", song_info['release_date'])
# Create the Spotify embed in the sidebar
st.sidebar.markdown(
f'<iframe src="https://open.spotify.com/embed/track/{song_info["uri"]}" width="200" height="300" frameborder="0" allowtransparency="true" allow="encrypted-media"></iframe>',
unsafe_allow_html=True,
)
# Song Recommendation
st.markdown("##")
st.write("💚 Create your own playlist by choosing your favourite genre and features!")
with st.container():
col1, col2,col3,col4 = st.columns((2,0.5,0.5,0.5))
with col3:
st.markdown("***Choose your genre:***")
genre = st.radio(
"Select your genre:",
genre_names, index=genre_names.index("Pop"))
with col1:
st.markdown("***Choose features to customize:***")
acousticness = st.slider(
'Acousticness',
0.0, 1.0, 0.5)
danceability = st.slider(
'Danceability',
0.0, 1.0, 0.5)
energy = st.slider(
'Energy',
0.0, 1.0, 0.5)
instrumentalness = st.slider(
'Instrumentalness',
0.0, 1.0, 0.0)
valence = st.slider(
'Valence',
0.0, 1.0, 0.45)
tempo = st.slider(
'Tempo',
0.0, 244.0, 118.0)
liveness = st.slider(
'Liveness', 0.0, 1.0, 0.5)
loudness = st.slider(
'Loudness', -60.0, 0.0, -12.0) # Note: loudness is typically in the range from -60 to 0 dB
speechiness = st.slider(
'Speechiness', 0.0, 1.0, 0.5)
tracks_per_page = 6
test_feat = [acousticness, danceability, energy, instrumentalness, liveness, loudness, speechiness, valence, tempo]
uris, audios = n_neighbors_uri_audio(genre, test_feat)
tracks = []
for uri in uris:
track = """<iframe src="https://open.spotify.com/embed/track/{}" width="260" height="380" frameborder="0" allowtransparency="true" allow="encrypted-media"></iframe>""".format(uri)
tracks.append(track)
if 'previous_inputs' not in st.session_state:
st.session_state['previous_inputs'] = [genre] + test_feat
current_inputs = [genre] + test_feat
if current_inputs != st.session_state['previous_inputs']:
if 'start_track_i' in st.session_state:
st.session_state['start_track_i'] = 0
st.session_state['previous_inputs'] = current_inputs
if 'start_track_i' not in st.session_state:
st.session_state['start_track_i'] = 0
with st.container():
col1, col2, col3 = st.columns([2,1,2])
if st.button("Recommend More Songs"):
if st.session_state['start_track_i'] < len(tracks):
st.session_state['start_track_i'] += tracks_per_page
current_tracks = tracks[st.session_state['start_track_i']: st.session_state['start_track_i'] + tracks_per_page]
current_audios = audios[st.session_state['start_track_i']: st.session_state['start_track_i'] + tracks_per_page]
if st.session_state['start_track_i'] < len(tracks):
for i, (track, audio) in enumerate(zip(current_tracks, current_audios)):
if i%2==0:
with col1:
components.html(
track,
height=400,
)
with st.expander("See more details"):
df = pd.DataFrame(dict(
r=audio[:5],
theta=audio_feats[:5]))
fig = px.line_polar(df, r='r', theta='theta', line_close=True)
fig.update_layout(height=400, width=340)
st.plotly_chart(fig)
else:
with col3:
components.html(
track,
height=400,
)
with st.expander("See more details"):
df = pd.DataFrame(dict(
r=audio[:5],
theta=audio_feats[:5]))
fig = px.line_polar(df, r='r', theta='theta', line_close=True)
fig.update_layout(height=400, width=340)
st.plotly_chart(fig)
else:
st.write("No songs left to recommend")
if __name__ == "__main__":
main() | [
"You are a Spotify playist recommender. Your task is to give users song recommendations based on their mood. Keep your answers to a maximum of 10 songs. Next to each recommendations, provide a URL link to that song on Spotify"
] |
2024-01-10 | vananh-ng/bigdataproject | app~pages~02_Be_your_own_DJ.py | import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
import streamlit as st
from dotenv import load_dotenv, find_dotenv
import os
from sklearn.neighbors import NearestNeighbors
import plotly.express as px
import streamlit.components.v1 as components
import pandas as pd
import plotly.express as px
from openai import OpenAI
import openai
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
# API Management
load_dotenv(find_dotenv(".env"))
# Spotify API Keys
client_id = os.getenv("CLIENT_ID")
client_secret = os.getenv("CLIENT_SECRET")
client_credentials_manager = SpotifyClientCredentials(client_id=client_id, client_secret=client_secret)
sp = spotipy.Spotify(client_credentials_manager = client_credentials_manager)
# Set page config
st.set_page_config(page_title="Spotify Big Data Project",
#page_icon=":musical_note:",
layout="wide")
title = "Be your own DJ!"
col1, col2 = st.columns([7, 1])
with col1:
st.title(title)
with col2:
st.image('app/images/logo4.png', use_column_width='auto')
st.markdown("##")
st.subheader("💚 Create your own playlist based on your mood!")
# GPT-based recommendation engine
#@st.cache(suppress_st_warning=True, show_spinner=False)
def get_completion(system_message, user_message):
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
temperature=0.7,
messages=[
{"role": "system", "content": system_message},
{"role": "user", "content": user_message}
]
)
return completion.choices[0].message.content
#system_message = "As a Spotify playlist recommender, your task is to provide song recommendations based on users' description of their current mood. Your tone is fun, compassionate, and friendly."
#user_message = st.text_input("🤖 How's your mood today?")
# Streamlit UI Code
system_message = "Recommend 10 songs based on user's mood. Your tone is friendly and understanding.\
Your response should be in the form of a list of song titles.\
Your response should start with the emoji '🤖'"
# Creating two columns
col1, col2 = st.columns(2)
# Response display in the right column
with col1:
if 'response' not in st.session_state:
st.session_state['response'] = "Your response will appear here..."
# Text input in the left column
with col2:
user_message = st.text_input("User", key="user_input")
send_button = st.button("Send")
# Button outside the columns, spanning the full width
if send_button:
st.session_state['response'] = get_completion(system_message, user_message)
# Updating the response in the right column
with col1:
st.write(st.session_state['response'])
# Song recommendations based on genre and audio features
@st.cache_data()
def load_data():
df = pd.read_csv('app/data/SpotGenTrack/filtered_track_df.csv')
df['genres'] = df['genres'].apply(lambda x: x.replace("'", "").replace("[", "").replace("]", "").split(", "))
exploded_track_df = df.explode('genres')
return exploded_track_df
genre_names = ['Dance Pop', 'Electronic', 'Electropop', 'Pop', 'Pop Dance', 'Pop Rap', 'Rap', 'Tropical House']
audio_feats = ["acousticness", "danceability", "energy", "instrumentalness", "liveness", "loudness", "speechiness", "tempo", "valence"]
exploded_track_df = load_data()
# knn model
def n_neighbors_uri_audio(genre, start_year, end_year, test_feat):
genre = genre.lower()
genre_data = exploded_track_df[(exploded_track_df["genres"]==genre) & (exploded_track_df["release_year"]>=start_year) & (exploded_track_df["release_year"]<=end_year)]
genre_data = genre_data.sort_values(by='popularity', ascending=False)[:500]
neigh = NearestNeighbors()
neigh.fit(genre_data[audio_feats].to_numpy())
n_neighbors = neigh.kneighbors([test_feat], n_neighbors=len(genre_data), return_distance=False)[0]
uris = genre_data.iloc[n_neighbors]["uri"].tolist()
audios = genre_data.iloc[n_neighbors][audio_feats].to_numpy()
return uris, audios
### Search for a song ###
def search_song(song_name):
"""
Function to search for a song on Spotify
"""
results = sp.search(q=song_name, limit=1) # limit set to 1 to return only top result
if results['tracks']['items'] == []:
return "Song not found."
track = results['tracks']['items'][0]
song_info = {
'name': track['name'],
'artist': track['artists'][0]['name'],
'album': track['album']['name'],
'release_date': track['album']['release_date'],
'popularity': track['popularity'],
'uri': track['uri'].split(':')[2],
'audio_features': sp.audio_features(track['uri'])[0]
}
return song_info
# Use the sidebar method for the input and button
song_name = st.sidebar.text_input("Enter a Song Name:", value='Viva La Vida')
song_info = search_song(song_name)
if song_info == "Song not found.":
st.sidebar.write(song_info)
else:
st.sidebar.write("Song Name:", song_info['name'])
st.sidebar.write("Artist:", song_info['artist'])
st.sidebar.write("Release Date:", song_info['release_date'])
# Create the Spotify embed in the sidebar
st.sidebar.markdown(
f'<iframe src="https://open.spotify.com/embed/track/{song_info["uri"]}" width="300" height="280" frameborder="0" allowtransparency="true" allow="encrypted-media" ></iframe>',
unsafe_allow_html=True,
)
# Song Recommendation
st.markdown("##")
st.subheader("💚 Create your own playlist by choosing your favourite genre and features!")
with st.container():
col1, col2,col3,col4 = st.columns((2,0.5,0.5,0.5))
with col3:
st.markdown("***Choose your genre:***")
genre = st.radio(
"Select your genre:",
genre_names, index=genre_names.index("Pop"))
with col1:
st.markdown("***Choose features to customize:***")
start_year, end_year = st.slider(
'Select the year range',
1990, 2023, (2015, 2023))
acousticness = st.slider(
'Acousticness',
0.0, 1.0, 0.5)
danceability = st.slider(
'Danceability',
0.0, 1.0, 0.5)
energy = st.slider(
'Energy',
0.0, 1.0, 0.5)
instrumentalness = st.slider(
'Instrumentalness',
0.0, 1.0, 0.0)
valence = st.slider(
'Valence',
0.0, 1.0, 0.45)
tempo = st.slider(
'Tempo',
0.0, 244.0, 118.0)
liveness = st.slider(
'Liveness', 0.0, 1.0, 0.5)
loudness = st.slider(
'Loudness', -60.0, 0.0, -12.0) # Note: loudness is typically in the range from -60 to 0 dB
speechiness = st.slider(
'Speechiness', 0.0, 1.0, 0.5)
tracks_per_page = 6
test_feat = [acousticness, danceability, energy, instrumentalness, liveness, loudness, speechiness, valence, tempo]
uris, audios = n_neighbors_uri_audio(genre, start_year, end_year, test_feat)
tracks = []
for uri in uris:
track = """<iframe src="https://open.spotify.com/embed/track/{}" width="260" height="380" frameborder="0" allowtransparency="true" allow="encrypted-media"></iframe>""".format(uri)
tracks.append(track)
if 'previous_inputs' not in st.session_state:
st.session_state['previous_inputs'] = [genre, start_year, end_year] + test_feat
current_inputs = [genre, start_year, end_year] + test_feat
if current_inputs != st.session_state['previous_inputs']:
if 'start_track_i' in st.session_state:
st.session_state['start_track_i'] = 0
st.session_state['previous_inputs'] = current_inputs
if 'start_track_i' not in st.session_state:
st.session_state['start_track_i'] = 0
with st.container():
col1, col2, col3 = st.columns([2,1,2])
if st.button("Recommend More Songs"):
if st.session_state['start_track_i'] < len(tracks):
st.session_state['start_track_i'] += tracks_per_page
current_tracks = tracks[st.session_state['start_track_i']: st.session_state['start_track_i'] + tracks_per_page]
current_audios = audios[st.session_state['start_track_i']: st.session_state['start_track_i'] + tracks_per_page]
if st.session_state['start_track_i'] < len(tracks):
for i, (track, audio) in enumerate(zip(current_tracks, current_audios)):
if i%2==0:
with col1:
components.html(
track,
height=400,
)
with st.expander("See more details"):
df = pd.DataFrame(dict(
r=audio[:5],
theta=audio_feats[:5]))
fig = px.line_polar(df, r='r', theta='theta', line_close=True)
fig.update_layout(height=400, width=340)
st.plotly_chart(fig)
else:
with col3:
components.html(
track,
height=400,
)
with st.expander("See more details"):
df = pd.DataFrame(dict(
r=audio[:5],
theta=audio_feats[:5]))
fig = px.line_polar(df, r='r', theta='theta', line_close=True)
fig.update_layout(height=400, width=340)
st.plotly_chart(fig)
else:
st.write("No songs left to recommend")
| [
"Recommend 10 songs based on user's mood. Your tone is friendly and understanding. Your response should be in the form of a list of song titles. Your response should start with the emoji '🤖'"
] |
2024-01-10 | ObelusFamily/Anythink-Market-xzzghhg7 | backend~app~api~routes~items~items_resource.py | from typing import Optional
from fastapi import APIRouter, Body, Depends, HTTPException, Response
from starlette import status
from app.api.dependencies.items import (
check_item_modification_permissions,
get_item_by_slug_from_path,
get_items_filters,
)
from app.api.dependencies.authentication import get_current_user_authorizer
from app.api.dependencies.database import get_repository
from app.db.repositories.items import ItemsRepository
from app.models.domain.items import Item
from app.models.domain.users import User
from app.models.schemas.items import (
ItemForResponse,
ItemInCreate,
ItemInResponse,
ItemInUpdate,
ItemsFilters,
ListOfItemsInResponse,
)
from app.resources import strings
from app.services.items import check_item_exists, get_slug_for_item
from app.services.event import send_event
router = APIRouter()
import os
import openai
openai.api_key = os.getenv('OPENAI_API_KEY')
#openai.api_key =
@router.get("", response_model=ListOfItemsInResponse, name="items:list-items")
async def list_items(
items_filters: ItemsFilters = Depends(get_items_filters),
user: Optional[User] = Depends(get_current_user_authorizer(required=False)),
items_repo: ItemsRepository = Depends(get_repository(ItemsRepository)),
) -> ListOfItemsInResponse:
items = await items_repo.filter_items(
tag=items_filters.tag,
seller=items_filters.seller,
favorited=items_filters.favorited,
limit=items_filters.limit,
offset=items_filters.offset,
requested_user=user,
)
items_for_response = [
ItemForResponse.from_orm(item) for item in items
]
return ListOfItemsInResponse(
items=items_for_response,
items_count=len(items),
)
@router.post(
"",
status_code=status.HTTP_201_CREATED,
response_model=ItemInResponse,
name="items:create-item",
)
async def create_new_item(
item_create: ItemInCreate = Body(..., embed=True, alias="item"),
user: User = Depends(get_current_user_authorizer()),
items_repo: ItemsRepository = Depends(get_repository(ItemsRepository)),
) -> ItemInResponse:
slug = get_slug_for_item(item_create.title)
if await check_item_exists(items_repo, slug):
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail=strings.ITEM_ALREADY_EXISTS,
)
if not item_create.image:
response = openai.Image.create(
prompt=item_create.title,
n=1,
size='256x256'
)
item_create.image = response['data'][0]['url']
item = await items_repo.create_item(
slug=slug,
title=item_create.title,
description=item_create.description,
body=item_create.body,
seller=user,
tags=item_create.tags,
image=item_create.image
)
send_event('item_created', {'item': item_create.title})
return ItemInResponse(item=ItemForResponse.from_orm(item))
@router.get("/{slug}", response_model=ItemInResponse, name="items:get-item")
async def retrieve_item_by_slug(
item: Item = Depends(get_item_by_slug_from_path),
) -> ItemInResponse:
return ItemInResponse(item=ItemForResponse.from_orm(item))
@router.put(
"/{slug}",
response_model=ItemInResponse,
name="items:update-item",
dependencies=[Depends(check_item_modification_permissions)],
)
async def update_item_by_slug(
item_update: ItemInUpdate = Body(..., embed=True, alias="item"),
current_item: Item = Depends(get_item_by_slug_from_path),
items_repo: ItemsRepository = Depends(get_repository(ItemsRepository)),
) -> ItemInResponse:
slug = get_slug_for_item(item_update.title) if item_update.title else None
item = await items_repo.update_item(
item=current_item,
slug=slug,
**item_update.dict(),
)
return ItemInResponse(item=ItemForResponse.from_orm(item))
@router.delete(
"/{slug}",
status_code=status.HTTP_204_NO_CONTENT,
name="items:delete-item",
dependencies=[Depends(check_item_modification_permissions)],
response_class=Response,
)
async def delete_item_by_slug(
item: Item = Depends(get_item_by_slug_from_path),
items_repo: ItemsRepository = Depends(get_repository(ItemsRepository)),
) -> None:
await items_repo.delete_item(item=item)
| [] |
2024-01-10 | tomo823/Laf | store.py | # llama-index==0.7.0
# Upserting vectors into Pinecone
import json
from llama_index import SimpleDirectoryReader, StorageContext, VectorStoreIndex
from llama_index.vector_stores import PineconeVectorStore
import pinecone
import openai
import os
import logging
import sys
from dotenv import load_dotenv
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
pinecone_api_key = os.getenv("PINECONE_API_KEY")
if pinecone_api_key is None:
raise ValueError("Please set your PINECONE_API_KEY environment variable")
pinecone.init(api_key=pinecone_api_key, environment="gcp-starter")
folder_list = [
"./movies/【高校数学1】数と式",
"./movies/【中1数学】一次方程式",
"./movies/【中1数学】空間図形",
"./movies/【中1数学】正の数・負の数",
"./movies/【中1数学】比例・反比例",
"./movies/【中1数学】文字式",
"./movies/【中1数学】平面図形",
"./movies/【中1数学】資料の活用",
"./movies/【中2数学】一次関数",
"./movies/【中2数学】確率",
"./movies/【中2数学】三角形と四角形",
"./movies/【中2数学】式の計算",
"./movies/【中2数学】平行線・多角形・合同",
"./movies/【中2数学】連立方程式",
"./movies/【中3数学】三平方の定理",
"./movies/【中3数学】式の展開と因数分解",
"./movies/【中3数学】相似な図形",
"./movies/【中3数学】二次関数",
"./movies/【中3数学】二次方程式",
"./movies/【中3数学】平方根",
"./movies/【中3数学】円",
"./movies/【高校数学1】集合と命題",
"./movies/【高校数学1】データの分析/",
"./movies/【高校数学1】図形と計量",
]
pinecone_index = pinecone.Index("keyword-search")
vector_store = PineconeVectorStore(pinecone_index=pinecone_index)
# define storage context
storage_context = StorageContext.from_defaults(vector_store=vector_store)
url_map = {}
with open("URL.json") as f:
url_map = json.load(f)
def get_url_from_path(path: str):
value = path.split("/")[-1].split(".")[0]
urls = [k for k, v in url_map.items() if value == v]
return urls[0] if len(urls) > 0 else ""
def filename_fn(filename):
"""metadata"""
return {
"url": get_url_from_path(filename),
"file_path": filename,
}
for folder in folder_list:
# load documents
documents = SimpleDirectoryReader(folder, file_metadata=filename_fn).load_data()
# create index for vectors
index = VectorStoreIndex.from_documents(
documents, storage_context=storage_context, store_nodes_override=True
)
| [] |
2024-01-10 | tomo823/Laf | write_del.py | # download URL from YouTube and convert it into text files
# mp3 files which was downloaded will be deleted finally
# DONT push this file to GitHub beacause of API key
import openai
import os
import mimetypes
import logging
import sys
from dotenv import load_dotenv
from yt_dlp import YoutubeDL
import argparse
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
def youtube(URLS):
ydl_opts = {
"format": "mp3/bestaudio/best",
"ignoreerrors": True,
"postprocessors": [
{
"key": "FFmpegExtractAudio",
"preferredcodec": "mp3",
}
],
}
with YoutubeDL(ydl_opts) as ydl:
error_code = ydl.download(URLS)
if error_code != 0:
logging.error("Error: " + str(error_code))
else:
logging.info("Downloaded")
for files in os.listdir("."):
if mimetypes.guess_type(files)[0] == "audio/mpeg":
file_name = files.split(".mp3")
video_name.append(file_name[0])
def text(video):
f = open(f"{video}.mp3", "rb")
transcript = openai.Audio.transcribe("whisper-1", f)
with open(f"{video}.txt", "w", encoding="UTF-8") as file:
file.write(transcript["text"]) # type: ignore
file.close()
os.remove(f"{video}.mp3")
if __name__ == "__main__":
# python write_del.py --url $(PLAYLIST_URL) --title $(PLAYLIST_TITLE)
parser = argparse.ArgumentParser()
parser.add_argument("--url", help="URL of the playlist")
parser.add_argument("--title", help="Title of the playlist")
args = parser.parse_args()
if args.url is None:
input("URL: ")
if args.title is None:
input("Title: ")
video_name = []
youtube(args.url)
for video in video_name:
text(video)
| [] |
2024-01-10 | tomo823/Laf | respond.py | # llama-index==0.7.0
# File to respond to the query
import sys
import pinecone
import os
import openai
from llama_index.utils import truncate_text
from llama_index import VectorStoreIndex
from llama_index.vector_stores import PineconeVectorStore
import logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
# API-keyの設定
openai.api_key = os.getenv("OPENAI_API_KEY")
pinecone_api_key = os.getenv("PINECONE_API_KEY")
if pinecone_api_key is None:
raise ValueError("Please set your PINECONE_API_KEY environment variable")
pinecone.init(api_key=pinecone_api_key, environment="gcp-starter")
pinecone_index = pinecone.Index("keyword-search")
vector_store = PineconeVectorStore(pinecone_index=pinecone_index)
index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
query_engine = index.as_query_engine(
similarity_top_k=2,
)
def get_query(query: str):
response = query_engine.query(query)
logging.debug(response)
# reference for response
reference = truncate_text(
response.source_nodes[0].node.get_content().strip(), 350
).strip("...")
logging.debug(reference)
metadata: dict = response.metadata if response.metadata else {}
logging.debug(metadata)
urls = [metadata.get(key, {}).get("url", "") for key in metadata.keys()]
return {
"urls": urls,
"responce": str(response),
"reference": reference,
}
if __name__ == "__main__":
query = sys.argv[1] if len(sys.argv) > 1 else "三角形の内角の和は何度?"
res = get_query(query)
print(res["urls"])
| [] |
2024-01-10 | tomo823/Laf | pinecone_create.py | # Description: This file creates a Pinecone index for the keyword search
import pinecone
import os
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
pinecone_api_key = os.getenv("PINECONE_API_KEY")
if pinecone_api_key is None:
raise ValueError("Please set your PINECONE_API_KEY environment variable")
pinecone.init(api_key=pinecone_api_key, environment="gcp-starter")
pinecone.create_index("keyword-search", dimension=1536, metric="dotproduct")
| [] |
2024-01-10 | fiatrete/OpenDAN-Personal-AI-OS | src~aios_kernel~whisper_node.py | from asyncio import Queue
import asyncio
import openai
import os
import logging
from .compute_node import ComputeNode
from .compute_task import ComputeTask, ComputeTaskResult, ComputeTaskState, ComputeTaskType
logger = logging.getLogger(__name__)
class WhisperComputeNode(ComputeNode):
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance.is_start = False
return cls._instance
def __init__(self) -> None:
super().__init__()
if self.is_start is True:
logger.warn("WhisperComputeNode is already start")
return
self.is_start = True
self.node_id = "whisper_node"
self.enable = True
self.task_queue = Queue()
self.open_api_key = None
if self.open_api_key is None and os.getenv("OPENAI_API_KEY") is not None:
self.open_api_key = os.getenv("OPENAI_API_KEY")
if self.open_api_key is None:
raise Exception("WhisperComputeNode open_api_key is None")
self.start()
def start(self):
async def _run_task_loop():
while True:
task = await self.task_queue.get()
try:
result = self._run_task(task)
if result is not None:
task.state = ComputeTaskState.DONE
task.result = result
except Exception as e:
logger.error(f"whisper_node run task error: {e}")
task.state = ComputeTaskState.ERROR
task.result = ComputeTaskResult()
task.result.set_from_task(task)
task.result.worker_id = self.node_id
task.result.result_str = str(e)
asyncio.create_task(_run_task_loop())
def _run_task(self, task: ComputeTask):
task.state = ComputeTaskState.RUNNING
prompt = task.params["prompt"]
response_format = None
if "response_format" in task.params:
response_format = task.params["response_format"]
temperature = None
if "temperature" in task.params:
temperature = task.params["temperature"]
language = None
if "language" in task.params:
language = task.params["language"]
file = task.params["file"]
resp = openai.Audio.transcribe("whisper-1",
file,
self.open_api_key,
prompt=prompt,
response_format=response_format,
temperature=temperature,
language=language)
result = ComputeTaskResult()
result.set_from_task(task)
result.worker_id = self.node_id
result.result_str = resp["text"]
result.result = resp
return result
async def push_task(self, task: ComputeTask, proiority: int = 0):
logger.info(f"whisper_node push task: {task.display()}")
self.task_queue.put_nowait(task)
async def remove_task(self, task_id: str):
pass
def get_task_state(self, task_id: str):
pass
def display(self) -> str:
return f"WhisperComputeNode: {self.node_id}"
def get_capacity(self):
return 0
def is_support(self, task_type: ComputeTaskType) -> bool:
if task_type == ComputeTaskType.VOICE_2_TEXT:
return True
return False
def is_local(self) -> bool:
return False
| [] |
2024-01-10 | fiatrete/OpenDAN-Personal-AI-OS | src~aios_kernel~open_ai_node.py | import openai
import os
import asyncio
from asyncio import Queue
import logging
import json
from .compute_task import ComputeTask, ComputeTaskResult, ComputeTaskState, ComputeTaskType,ComputeTaskResultCode
from .compute_node import ComputeNode
from .storage import AIStorage,UserConfig
logger = logging.getLogger(__name__)
class OpenAI_ComputeNode(ComputeNode):
_instance = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = OpenAI_ComputeNode()
return cls._instance
@classmethod
def declare_user_config(cls):
if os.getenv("OPENAI_API_KEY_") is None:
user_config = AIStorage.get_instance().get_user_config()
user_config.add_user_config("openai_api_key","openai api key",False,None)
def __init__(self) -> None:
super().__init__()
self.is_start = False
# openai.organization = "org-AoKrOtF2myemvfiFfnsSU8rF" #buckycloud
self.openai_api_key = None
self.node_id = "openai_node"
self.task_queue = Queue()
async def initial(self):
if os.getenv("OPENAI_API_KEY") is not None:
self.openai_api_key = os.getenv("OPENAI_API_KEY")
else:
self.openai_api_key = AIStorage.get_instance().get_user_config().get_value("openai_api_key")
if self.openai_api_key is None:
logger.error("openai_api_key is None!")
return False
openai.api_key = self.openai_api_key
self.start()
return True
async def push_task(self, task: ComputeTask, proiority: int = 0):
logger.info(f"openai_node push task: {task.display()}")
self.task_queue.put_nowait(task)
async def remove_task(self, task_id: str):
pass
def _run_task(self, task: ComputeTask):
task.state = ComputeTaskState.RUNNING
result = ComputeTaskResult()
result.result_code = ComputeTaskResultCode.ERROR
result.set_from_task(task)
match task.task_type:
case ComputeTaskType.TEXT_EMBEDDING:
model_name = task.params["model_name"]
input = task.params["input"]
logger.info(f"call openai {model_name} input: {input}")
try:
resp = openai.Embedding.create(model=model_name,
input=input)
except Exception as e:
logger.error(f"openai run TEXT_EMBEDDING task error: {e}")
task.state = ComputeTaskState.ERROR
task.error_str = str(e)
result.error_str = str(e)
return result
# resp = {
# "object": "list",
# "data": [
# {
# "object": "embedding",
# "index": 0,
# "embedding": [
# -0.00930514745414257,
# 0.00765434792265296,
# -0.007167573552578688,
# -0.012373941019177437,
# -0.04884673282504082
# ]}]
# }
logger.info(f"openai response: {resp}")
task.state = ComputeTaskState.DONE
result.result_code = ComputeTaskResultCode.OK
result.worker_id = self.node_id
result.result_str = resp["data"][0]["embedding"]
return result
case ComputeTaskType.LLM_COMPLETION:
mode_name = task.params["model_name"]
prompts = task.params["prompts"]
max_token_size = task.params.get("max_token_size")
llm_inner_functions = task.params.get("inner_functions")
if max_token_size is None:
max_token_size = 4000
result_token = max_token_size
try:
if llm_inner_functions is None:
logger.info(f"call openai {mode_name} prompts: {prompts}")
resp = openai.ChatCompletion.create(model=mode_name,
messages=prompts,
#max_tokens=result_token,
temperature=0.7)
else:
logger.info(f"call openai {mode_name} prompts: {prompts} functions: {json.dumps(llm_inner_functions)}")
resp = openai.ChatCompletion.create(model=mode_name,
messages=prompts,
functions=llm_inner_functions,
#max_tokens=result_token,
temperature=0.7) # TODO: add temperature to task params?
except Exception as e:
logger.error(f"openai run LLM_COMPLETION task error: {e}")
task.state = ComputeTaskState.ERROR
task.error_str = str(e)
result.error_str = str(e)
return result
logger.info(f"openai response: {json.dumps(resp, indent=4)}")
status_code = resp["choices"][0]["finish_reason"]
token_usage = resp.get("usage")
match status_code:
case "function_call":
task.state = ComputeTaskState.DONE
case "stop":
task.state = ComputeTaskState.DONE
case _:
task.state = ComputeTaskState.ERROR
task.error_str = f"The status code was {status_code}."
result.error_str = f"The status code was {status_code}."
result.result_code = ComputeTaskResultCode.ERROR
return result
result.result_code = ComputeTaskResultCode.OK
result.worker_id = self.node_id
result.result_str = resp["choices"][0]["message"]["content"]
result.result["message"] = resp["choices"][0]["message"]
if token_usage:
result.result_refers["token_usage"] = token_usage
logger.info(f"openai success response: {result.result_str}")
return result
case _:
task.state = ComputeTaskState.ERROR
task.error_str = f"ComputeTask's TaskType : {task.task_type} not support!"
result.error_str = f"ComputeTask's TaskType : {task.task_type} not support!"
return None
def start(self):
if self.is_start is True:
return
self.is_start = True
async def _run_task_loop():
while True:
task = await self.task_queue.get()
logger.info(f"openai_node get task: {task.display()}")
result = self._run_task(task)
if result is not None:
task.state = ComputeTaskState.DONE
task.result = result
asyncio.create_task(_run_task_loop())
def display(self) -> str:
return f"OpenAI_ComputeNode: {self.node_id}"
def get_task_state(self, task_id: str):
pass
def get_capacity(self):
pass
def is_support(self, task: ComputeTask) -> bool:
if task.task_type == ComputeTaskType.LLM_COMPLETION:
if not task.params["model_name"]:
return True
model_name : str = task.params["model_name"]
if model_name.startswith("gpt-"):
return True
#if task.task_type == ComputeTaskType.TEXT_EMBEDDING:
# if task.params["model_name"] == "text-embedding-ada-002":
# return True
return False
def is_local(self) -> bool:
return False
| [] |
2024-01-10 | fiatrete/OpenDAN-Personal-AI-OS | PoC~agent_jarvis~jarvis~ai_agent~gpt_agent.py | import asyncio
import contextlib
import json
import time
from typing import Dict, List
from openai.error import RateLimitError
from jarvis import CFG
from jarvis.ai_agent.agent_utils import must_not_be_valid_json, get_thoughts, get_function, execute_function
from jarvis.ai_agent.base_agent import BaseAgent
from jarvis.functional_modules.functional_module import CallerContext, moduleRegistry
from jarvis.gpt import token_counter, gpt
from jarvis.json_utils.json_fix_llm import fix_json_using_multiple_techniques
from jarvis.json_utils.utilities import validate_json
from jarvis.logger import logger
def _generate_first_prompt():
return """I will ask you questions or ask you to do something. You should:
First, determine if you know the answer of the question or you can accomplish the task directly.
If so, response directly.
If not, try to complete the task by calling the functions below.
If you can't accomplish the task by yourself and no function is able to accomplish the task, say "Dear master, sorry, I'm not able to do that."
Your setup:
```
{
"author": "OpenDAN",
"name": "Jarvis",
}
```"""
class GptAgent(BaseAgent):
_system_prompt: str
_full_message_history: List[dict] = []
_message_tokens: List[int] = []
def __init__(self, caller_context: CallerContext):
super().__init__(caller_context)
self._system_prompt = _generate_first_prompt()
logger.debug(f"Using GptAgent, system prompt is: {self._system_prompt}")
logger.debug(f"{json.dumps(moduleRegistry.to_json_schema())}")
async def _feed_prompt_to_get_response(self, prompt):
reply_type, assistant_reply = await self._chat_with_ai(
self._system_prompt,
prompt,
CFG.token_limit,
)
if reply_type == "content":
return {
"speak": assistant_reply,
}
elif reply_type == "function_call":
arguments_string = assistant_reply["arguments"]
try:
arguments = json.loads(arguments_string)
except:
arguments = await fix_json_using_multiple_techniques()
return {
"function": assistant_reply["name"],
"arguments": arguments
}
async def feed_prompt(self, prompt):
# Send message to AI, get response
logger.debug(f"Trigger: {prompt}")
reply: Dict = None
for i in range(3):
try:
if i == 0:
reply = await self._feed_prompt_to_get_response(prompt)
else:
reply = await self._feed_prompt_to_get_response(
prompt + ". Remember to reply using the specified JSON form")
break
except Exception as e:
# TODO: Feed the error to ChatGPT?
logger.debug(f"Failed to get reply, try again! {str(e)}")
continue
if reply is None:
await self._caller_context.reply_text("Sorry, but I don't understand what you want me to do.")
return
# Execute function
function_name: str = reply.get("function")
if function_name is None:
await self._caller_context.reply_text(reply["speak"])
else:
arguments: Dict = reply["arguments"]
function_result = "Failed"
try:
function_result = await execute_function(self._caller_context, function_name, **arguments)
finally:
result = f"{function_result}"
# Check if there's a result from the function append it to the message
# history
if result is not None:
self.append_history_message_raw({"role": "function", "name": function_name, "content": result})
logger.debug(f"function: {result}")
else:
self.append_history_message_raw({"role": "function", "name": function_name, "content": "Unable to execute function"})
logger.debug("function: Unable to execute function")
def append_history_message(self, role: str, content: str):
self._full_message_history.append({'role': role, 'content': content})
self._message_tokens.append(-1)
def append_history_message_raw(self, msg: dict):
self._full_message_history.append(msg)
self._message_tokens.append(-1)
def clear_history_messages(self):
self._full_message_history.clear()
self._message_tokens.clear()
def save_history(self, to_where):
with open(to_where, "w") as f:
assert len(self._message_tokens) == len(self._full_message_history)
s = json.dumps([
self._message_tokens,
self._full_message_history,
])
f.write(s)
def load_history(self, from_where):
with contextlib.suppress(Exception):
with open(from_where, "r") as f:
tmp = json.loads(f.read())
if isinstance(tmp, list) and len(tmp[0]) == len(tmp[1]):
self._message_tokens = tmp[0]
self._full_message_history = tmp[1]
async def _chat_with_ai(
self, prompt, user_input, token_limit
):
"""Interact with the OpenAI API, sending the prompt, user input, message history,
and permanent memory."""
while True:
try:
model = CFG.llm_model
# Reserve 1000 tokens for the response
send_token_limit = token_limit - 1000
(
next_message_to_add_index,
current_tokens_used,
insertion_index,
current_context,
) = await self._generate_context(prompt, model)
current_tokens_used += await token_counter.count_message_tokens(
[{"role": "user", "content": user_input}], model
) # Account for user input (appended later)
# TODO: OpenAI does not say how to count function tokens, we use this method to roughly get the tokens count
# It's result looks much larger than OpenAI's result
current_tokens_used += await token_counter.count_message_tokens(
[{"role": "user", "content": json.dumps(moduleRegistry.to_json_schema())}], model
)
while next_message_to_add_index >= 0:
# print (f"CURRENT TOKENS USED: {current_tokens_used}")
tokens_to_add = await self._get_history_message_tokens(next_message_to_add_index, model)
if current_tokens_used + tokens_to_add > send_token_limit:
break
message_to_add = self._full_message_history[next_message_to_add_index]
# Add the most recent message to the start of the current context,
# after the two system prompts.
current_context.insert(insertion_index, message_to_add)
# Count the currently used tokens
current_tokens_used += tokens_to_add
# Move to the next most recent message in the full message history
next_message_to_add_index -= 1
# Append user input, the length of this is accounted for above
current_context.extend([{"role": "user", "content": user_input}])
# Calculate remaining tokens
tokens_remaining = token_limit - current_tokens_used
assert tokens_remaining >= 0
async def on_single_chat_timeout(will_retry):
await self._caller_context.push_notification(
f'Thinking timeout{", retry" if will_retry else ", give up"}.')
reply_type, assistant_reply = await gpt.acreate_chat_completion(
model=model,
messages=current_context,
temperature=CFG.temperature,
max_tokens=tokens_remaining,
on_single_request_timeout=on_single_chat_timeout,
functions=moduleRegistry.to_json_schema()
)
# Update full message history
if reply_type == "content":
self.append_history_message("user", user_input)
self.append_history_message("assistant", assistant_reply)
pass
elif reply_type == "function_call":
self.append_history_message("user", user_input)
self.append_history_message_raw({"role": "assistant", "function_call": assistant_reply, "content": None})
pass
else:
assert False, "Unexpected reply type"
return reply_type, assistant_reply
except RateLimitError:
# TODO: When we switch to langchain, or something else this is built in
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
await asyncio.sleep(10)
async def _generate_context(self, prompt, model):
# We use the timezone of the session
timestamp = time.time() + time.timezone + self._caller_context.get_tz_offset() * 3600
time_str = time.strftime('%c', time.localtime(timestamp))
current_context = [
{"role": "system", "content": prompt},
{"role": "system", "content": f"The current time and date is {time_str}"},
]
# Add messages from the full message history until we reach the token limit
next_message_to_add_index = len(self._full_message_history) - 1
insertion_index = len(current_context)
# Count the currently used tokens
current_tokens_used = await token_counter.count_message_tokens(current_context, model)
return (
next_message_to_add_index,
current_tokens_used,
insertion_index,
current_context,
)
async def _get_history_message_tokens(self, index, model: str = "gpt-3.5-turbo-0301") -> int:
if self._message_tokens[index] == -1:
# since couting token is relatively slow, we store it here
self._message_tokens[index] = await token_counter.count_message_tokens([self._full_message_history[index]], model)
return self._message_tokens[index]
| [
"Unable to execute function",
"The current time and date is PLACEHOLDER",
"None"
] |
2024-01-10 | fiatrete/OpenDAN-Personal-AI-OS | src~aios_kernel~__init__.py | from .environment import Environment,EnvironmentEvent
from .agent_message import AgentMsg,AgentMsgStatus,AgentMsgType
from .chatsession import AIChatSession
from .agent import AIAgent,AIAgentTemplete,AgentPrompt
from .compute_kernel import ComputeKernel,ComputeTask,ComputeTaskResult,ComputeTaskState,ComputeTaskType
from .compute_node import ComputeNode,LocalComputeNode
from .open_ai_node import OpenAI_ComputeNode
from .knowledge_base import KnowledgeBase, KnowledgeEnvironment
from .knowledge_pipeline import KnowledgeEmailSource, KnowledgeDirSource, KnowledgePipline
from .role import AIRole,AIRoleGroup
from .workflow import Workflow
from .bus import AIBus
from .workflow_env import WorkflowEnvironment,CalenderEnvironment,CalenderEvent,PaintEnvironment
from .local_llama_compute_node import LocalLlama_ComputeNode
from .whisper_node import WhisperComputeNode
from .google_text_to_speech_node import GoogleTextToSpeechNode
from .tunnel import AgentTunnel
from .tg_tunnel import TelegramTunnel
from .email_tunnel import EmailTunnel
from .storage import ResourceLocation,AIStorage,UserConfig,UserConfigItem
from .contact_manager import ContactManager,Contact,FamilyMember
from .text_to_speech_function import TextToSpeechFunction
from .workspace_env import WorkspaceEnvironment
from .local_stability_node import Local_Stability_ComputeNode
from .stability_node import Stability_ComputeNode
from .local_st_compute_node import LocalSentenceTransformer_Text_ComputeNode,LocalSentenceTransformer_Image_ComputeNode
from .compute_node_config import ComputeNodeConfig
AIOS_Version = "0.5.1, build 2023-9-28"
| [] |
2024-01-10 | zazikant/colab_pro_POST_Request | functions.py | from dotenv import find_dotenv, load_dotenv
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
import os
import openai
import pprint
import json
import pandas as pd
from pandasai.llm.openai import OpenAI
from dotenv import load_dotenv
import re
import requests
import csv
import matplotlib.pyplot as plt
import io
load_dotenv(find_dotenv())
load_dotenv()
from dotenv import find_dotenv, load_dotenv
import pandas as pd
from pandasai import SmartDataframe
from pandasai.llm import OpenAI
from langchain.document_loaders import PyPDFLoader
from dotenv import load_dotenv
import os
import openai
from langchain.llms import OpenAI
from dotenv import load_dotenv, find_dotenv
from langchain.chat_models import ChatOpenAI
from langchain.output_parsers import ResponseSchema
from langchain.output_parsers import StructuredOutputParser
from langchain.memory import ConversationSummaryBufferMemory
from langchain.chains.summarize import load_summarize_chain
from langchain.document_loaders import DirectoryLoader, CSVLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
# Laden Sie die Umgebungsvariablen aus der .env-Datei
load_dotenv()
from langchain.prompts import ChatPromptTemplate
from langchain import PromptTemplate, LLMChain
def parser(text):
llm = OpenAI()
context = text.strip()
email_schema = ResponseSchema(
name="email_parser",
description="extract the email id from the text. If required, strip and correct it in format like [email protected]. Only provide these words. If no email id is present, return [email protected]",
)
subject_schema = ResponseSchema(
name="content", description="Just extract the content removing email ids. Do not add any interpretation."
)
response_schemas = [email_schema, subject_schema]
parser = StructuredOutputParser.from_response_schemas(response_schemas)
format_instructions = parser.get_format_instructions()
template = """
Interprete the text and evaluate the text.
email_parser: extract the email id from the text. Only provide these words. If no email id is present, return [email protected]. Use 1 line.
content: Just extract the content removing email ids. Do not add any interpretation.
text: {context}
Just return the JSON, do not add ANYTHING, NO INTERPRETATION!
{format_instructions}:"""
#imprtant to have the format instructions in the template represented as {format_instructions}:"""
#very important to note that the format instructions is the json format that consists of the output key and value pair. It could be multiple key value pairs. All the context with input variables should be written above that in the template.
prompt = PromptTemplate(
input_variables=["context", "format_instructions"],
template=template
)
chain = LLMChain(llm=llm, prompt=prompt, output_key= "testi")
response = chain.run({"context": context, "format_instructions": format_instructions})
output_dict = parser.parse(response)
return output_dict
def draft_email(user_input):
loader = DirectoryLoader(
"./shashi", glob="**/*.csv", loader_cls=CSVLoader, show_progress=True
)
docs = loader.load()
#textsplitter-----------------
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=400,
chunk_overlap=2,
)
docs = text_splitter.split_documents(docs)
# print(docs[3].page_content)
#-----------------
from langchain.embeddings import OpenAIEmbeddings
openai_embeddings = OpenAIEmbeddings()
from langchain.vectorstores.faiss import FAISS
import pickle
#Very important - db below is used for similarity search and not been used by agents in tools
db = FAISS.from_documents(docs, openai_embeddings)
import pickle
with open("db.pkl", "wb") as f:
pickle.dump(db, f)
with open("db.pkl", "rb") as f:
db = pickle.load(f)
parser_output = parser(user_input)
email = parser_output["email_parser"]
content = parser_output["content"]
docs = db.similarity_search(content, k=8)
llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.7)
# template = """
# you are a pediatric dentist and you are writing a key features serial wise for following information:
# text: {context}
# """
map_prompt = """
Write a concise summary of the following:
"{text}"
CONCISE SUMMARY:
"""
map_prompt_template = PromptTemplate(template=map_prompt, input_variables=["text"])
combine_prompt = """
You are a summarisation expert. Focus on maintaining a coherent flow and using proper grammar and language. Write a detailed summary of the following text:
"{text}"
SUMMARY:
"""
combine_prompt_template = PromptTemplate(template=combine_prompt, input_variables=["text"])
summary_chain = load_summarize_chain(llm=llm,
chain_type='map_reduce',
map_prompt=map_prompt_template,
combine_prompt=combine_prompt_template)
response = summary_chain.run({"input_documents": docs})
return email, response | [
"\n Interprete the text and evaluate the text.\n email_parser: extract the email id from the text. Only provide these words. If no email id is present, return [email protected]. Use 1 line.\n content: Just extract the content removing email ids. Do not add any interpretation.\n\n text: {context}\n\n Just return the JSON, do not add ANYTHING, NO INTERPRETATION!\n {format_instructions}:",
"format_instructions",
"\n Write a concise summary of the following:\n \"{text}\"\n CONCISE SUMMARY:\n ",
"context",
"\n You are a summarisation expert. Focus on maintaining a coherent flow and using proper grammar and language. Write a detailed summary of the following text:\n \"{text}\"\n SUMMARY:\n "
] |
2024-01-10 | eduumach/bot-discord | bot~utils~openia.py | import openai
from bot.config import OPENAI_KEY
openai.api_key = OPENAI_KEY
def openia_api(prompt):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user",
"content": prompt}],
)
return response.choices[0].message.content
def openia_image(prompt):
response = openai.Image.create(
prompt=prompt,
n=1,
size="256x256",
)
return response["data"][0]["url"]
| [] |
2024-01-10 | Grifunf/mne-python | tutorials~simulation~80_dics.py | """
.. _tut-dics:
======================
DICS for power mapping
======================
In this tutorial, we'll simulate two signals originating from two
locations on the cortex. These signals will be sinusoids, so we'll be looking
at oscillatory activity (as opposed to evoked activity).
We'll use dynamic imaging of coherent sources (DICS) :footcite:`GrossEtAl2001`
to map out spectral power along the cortex. Let's see if we can find our two
simulated sources.
"""
# Author: Marijn van Vliet <[email protected]>
#
# License: BSD-3-Clause
# %%
# Setup
# -----
# We first import the required packages to run this tutorial and define a list
# of filenames for various things we'll be using.
import numpy as np
from matplotlib import pyplot as plt
from scipy.signal import coherence, unit_impulse, welch
import mne
from mne.beamformer import apply_dics_csd, make_dics
from mne.datasets import sample
from mne.minimum_norm import apply_inverse, make_inverse_operator
from mne.simulation import add_noise, simulate_raw
from mne.time_frequency import csd_morlet
# We use the MEG and MRI setup from the MNE-sample dataset
data_path = sample.data_path(download=False)
subjects_dir = data_path / "subjects"
# Filenames for various files we'll be using
meg_path = data_path / "MEG" / "sample"
raw_fname = meg_path / "sample_audvis_raw.fif"
fwd_fname = meg_path / "sample_audvis-meg-eeg-oct-6-fwd.fif"
cov_fname = meg_path / "sample_audvis-cov.fif"
fwd = mne.read_forward_solution(fwd_fname)
# Seed for the random number generator
rand = np.random.RandomState(42)
# %%
# Data simulation
# ---------------
#
# The following function generates a timeseries that contains an oscillator,
# whose frequency fluctuates a little over time, but stays close to 10 Hz.
# We'll use this function to generate our two signals.
sfreq = 50.0 # Sampling frequency of the generated signal
n_samp = int(round(10.0 * sfreq))
times = np.arange(n_samp) / sfreq # 10 seconds of signal
n_times = len(times)
def coh_signal_gen():
"""Generate an oscillating signal.
Returns
-------
signal : ndarray
The generated signal.
"""
t_rand = 0.001 # Variation in the instantaneous frequency of the signal
std = 0.1 # Std-dev of the random fluctuations added to the signal
base_freq = 10.0 # Base frequency of the oscillators in Hertz
n_times = len(times)
# Generate an oscillator with varying frequency and phase lag.
signal = np.sin(
2.0
* np.pi
* (
base_freq * np.arange(n_times) / sfreq
+ np.cumsum(t_rand * rand.randn(n_times))
)
)
# Add some random fluctuations to the signal.
signal += std * rand.randn(n_times)
# Scale the signal to be in the right order of magnitude (~100 nAm)
# for MEG data.
signal *= 100e-9
return signal
# %%
# Let's simulate two timeseries and plot some basic information about them.
signal1 = coh_signal_gen()
signal2 = coh_signal_gen()
fig, axes = plt.subplots(2, 2, figsize=(8, 4), layout="constrained")
# Plot the timeseries
ax = axes[0][0]
ax.plot(times, 1e9 * signal1, lw=0.5)
ax.set(
xlabel="Time (s)", xlim=times[[0, -1]], ylabel="Amplitude (Am)", title="Signal 1"
)
ax = axes[0][1]
ax.plot(times, 1e9 * signal2, lw=0.5)
ax.set(xlabel="Time (s)", xlim=times[[0, -1]], title="Signal 2")
# Power spectrum of the first timeseries
f, p = welch(signal1, fs=sfreq, nperseg=128, nfft=256)
ax = axes[1][0]
# Only plot the first 100 frequencies
ax.plot(f[:100], 20 * np.log10(p[:100]), lw=1.0)
ax.set(
xlabel="Frequency (Hz)",
xlim=f[[0, 99]],
ylabel="Power (dB)",
title="Power spectrum of signal 1",
)
# Compute the coherence between the two timeseries
f, coh = coherence(signal1, signal2, fs=sfreq, nperseg=100, noverlap=64)
ax = axes[1][1]
ax.plot(f[:50], coh[:50], lw=1.0)
ax.set(
xlabel="Frequency (Hz)",
xlim=f[[0, 49]],
ylabel="Coherence",
title="Coherence between the timeseries",
)
# %%
# Now we put the signals at two locations on the cortex. We construct a
# :class:`mne.SourceEstimate` object to store them in.
#
# The timeseries will have a part where the signal is active and a part where
# it is not. The techniques we'll be using in this tutorial depend on being
# able to contrast data that contains the signal of interest versus data that
# does not (i.e. it contains only noise).
# The locations on the cortex where the signal will originate from. These
# locations are indicated as vertex numbers.
vertices = [[146374], [33830]]
# Construct SourceEstimates that describe the signals at the cortical level.
data = np.vstack((signal1, signal2))
stc_signal = mne.SourceEstimate(
data, vertices, tmin=0, tstep=1.0 / sfreq, subject="sample"
)
stc_noise = stc_signal * 0.0
# %%
# Before we simulate the sensor-level data, let's define a signal-to-noise
# ratio. You are encouraged to play with this parameter and see the effect of
# noise on our results.
snr = 1.0 # Signal-to-noise ratio. Decrease to add more noise.
# %%
# Now we run the signal through the forward model to obtain simulated sensor
# data. To save computation time, we'll only simulate gradiometer data. You can
# try simulating other types of sensors as well.
#
# Some noise is added based on the baseline noise covariance matrix from the
# sample dataset, scaled to implement the desired SNR.
# Read the info from the sample dataset. This defines the location of the
# sensors and such.
info = mne.io.read_raw(raw_fname).crop(0, 1).resample(50).info
# Only use gradiometers
picks = mne.pick_types(info, meg="grad", stim=True, exclude=())
mne.pick_info(info, picks, copy=False) # modifies info in-place
# Define a covariance matrix for the simulated noise. In this tutorial, we use
# a simple diagonal matrix.
cov = mne.cov.make_ad_hoc_cov(info)
cov["data"] *= (20.0 / snr) ** 2 # Scale the noise to achieve the desired SNR
# Simulate the raw data, with a lowpass filter on the noise
stcs = [
(stc_signal, unit_impulse(n_samp, dtype=int) * 1),
(stc_noise, unit_impulse(n_samp, dtype=int) * 2),
] # stacked in time
duration = (len(stc_signal.times) * 2) / sfreq
raw = simulate_raw(info, stcs, forward=fwd)
add_noise(raw, cov, iir_filter=[4, -4, 0.8], random_state=rand)
# %%
# We create an :class:`mne.Epochs` object containing two trials: one with
# both noise and signal and one with just noise
events = mne.find_events(raw, initial_event=True)
tmax = (len(stc_signal.times) - 1) / sfreq
epochs = mne.Epochs(
raw,
events,
event_id=dict(signal=1, noise=2),
tmin=0,
tmax=tmax,
baseline=None,
preload=True,
)
assert len(epochs) == 2 # ensure that we got the two expected events
# Plot some of the channels of the simulated data that are situated above one
# of our simulated sources.
picks = mne.read_vectorview_selection("Left-frontal") # contains both mag and grad
picks = [p for p in picks if p in epochs.ch_names] # now only grads
epochs.plot(picks=picks, events=True)
# %%
# Power mapping
# -------------
# With our simulated dataset ready, we can now pretend to be researchers that
# have just recorded this from a real subject and are going to study what parts
# of the brain communicate with each other.
#
# First, we'll create a source estimate of the MEG data. We'll use both a
# straightforward MNE-dSPM inverse solution for this, and the DICS beamformer
# which is specifically designed to work with oscillatory data.
# %%
# Computing the inverse using MNE-dSPM:
# Compute the inverse operator
fwd = mne.read_forward_solution(fwd_fname)
inv = make_inverse_operator(epochs.info, fwd, cov)
# Apply the inverse model to the trial that also contains the signal.
s = apply_inverse(epochs["signal"].average(), inv)
# Take the root-mean square along the time dimension and plot the result.
s_rms = np.sqrt((s**2).mean())
title = "MNE-dSPM inverse (RMS)"
brain = s_rms.plot(
"sample",
subjects_dir=subjects_dir,
hemi="both",
figure=1,
size=600,
time_label=title,
title=title,
)
# Indicate the true locations of the source activity on the plot.
brain.add_foci(vertices[0][0], coords_as_verts=True, hemi="lh")
brain.add_foci(vertices[1][0], coords_as_verts=True, hemi="rh")
# Rotate the view and add a title.
brain.show_view(azimuth=0, elevation=0, distance=550, focalpoint=(0, 0, 0))
# %%
# We will now compute the cortical power map at 10 Hz. using a DICS beamformer.
# A beamformer will construct for each vertex a spatial filter that aims to
# pass activity originating from the vertex, while dampening activity from
# other sources as much as possible.
#
# The :func:`mne.beamformer.make_dics` function has many switches that offer
# precise control
# over the way the filter weights are computed. Currently, there is no clear
# consensus regarding the best approach. This is why we will demonstrate two
# approaches here:
#
# 1. The approach as described in :footcite:`vanVlietEtAl2018`, which first
# normalizes the forward solution and computes a vector beamformer.
# 2. The scalar beamforming approach based on
# :footcite:`SekiharaNagarajan2008`, which uses weight normalization
# instead of normalizing the forward solution.
# Estimate the cross-spectral density (CSD) matrix on the trial containing the
# signal.
csd_signal = csd_morlet(epochs["signal"], frequencies=[10])
# Compute the spatial filters for each vertex, using two approaches.
filters_approach1 = make_dics(
info,
fwd,
csd_signal,
reg=0.05,
pick_ori="max-power",
depth=1.0,
inversion="single",
weight_norm=None,
real_filter=True,
)
print(filters_approach1)
filters_approach2 = make_dics(
info,
fwd,
csd_signal,
reg=0.05,
pick_ori="max-power",
depth=None,
inversion="matrix",
weight_norm="unit-noise-gain",
real_filter=True,
)
print(filters_approach2)
# You can save these to disk with:
# filters_approach1.save('filters_1-dics.h5')
# Compute the DICS power map by applying the spatial filters to the CSD matrix.
power_approach1, f = apply_dics_csd(csd_signal, filters_approach1)
power_approach2, f = apply_dics_csd(csd_signal, filters_approach2)
# %%
# Plot the DICS power maps for both approaches, starting with the first:
def plot_approach(power, n):
"""Plot the results on a brain."""
title = "DICS power map, approach %d" % n
brain = power_approach1.plot(
"sample",
subjects_dir=subjects_dir,
hemi="both",
size=600,
time_label=title,
title=title,
)
# Indicate the true locations of the source activity on the plot.
brain.add_foci(vertices[0][0], coords_as_verts=True, hemi="lh", color="b")
brain.add_foci(vertices[1][0], coords_as_verts=True, hemi="rh", color="b")
# Rotate the view and add a title.
brain.show_view(azimuth=0, elevation=0, distance=550, focalpoint=(0, 0, 0))
return brain
brain1 = plot_approach(power_approach1, 1)
# %%
# Now the second:
brain2 = plot_approach(power_approach2, 2)
# %%
# Excellent! All methods found our two simulated sources. Of course, with a
# signal-to-noise ratio (SNR) of 1, is isn't very hard to find them. You can
# try playing with the SNR and see how the MNE-dSPM and DICS approaches hold up
# in the presence of increasing noise. In the presence of more noise, you may
# need to increase the regularization parameter of the DICS beamformer.
#
# References
# ----------
# .. footbibliography::
| [] |
2024-01-10 | poisonwine/Tianchi-LLM-retrieval | pdfparser.py | import os
from typing import List
import re
import tqdm
from langchain.schema import Document
import spacy
import PyPDF2
def extract_page_text(filepath, max_len=256):
page_content = []
spliter = spacy.load("zh_core_web_sm")
chunks = []
with open(filepath, 'rb') as f:
pdf_reader = PyPDF2.PdfReader(f)
page_count = 10
pattern = r'^\d{1,3}'
for page in tqdm.tqdm(pdf_reader.pages[page_count:]):
page_text = page.extract_text().strip()
raw_text = [text.strip() for text in page_text.split('\n')]
new_text = '\n'.join(raw_text[1:])
new_text = re.sub(pattern, '', new_text).strip()
page_content.append(new_text)
max_chunk_length = max_len # 最大 chunk 长度
current_chunk = ""
if len(new_text) > 10:
for sentence in spliter(new_text).sents:
sentence_text = sentence.text
if len(current_chunk) + len(sentence_text) <= max_chunk_length:
current_chunk += sentence_text
else:
chunks.append(Document(page_content=current_chunk, metadata={'page':page_count+1}))
current_chunk = sentence_text
# 添加最后一个 chunk(如果有的话)
if current_chunk:
chunks.append(Document(page_content=current_chunk, metadata={'page':page_count+1}))
page_count += 1
cleaned_chunks = []
i = 0
while i <= len(chunks)-2: #简单合并一些上下文
current_chunk = chunks[i]
next_chunk = chunks[min(i+1, len(chunks)-1)]
if len(next_chunk.page_content) < 0.5 * len(current_chunk.page_content):
new_chunk = Document(page_content=current_chunk.page_content + next_chunk.page_content, metadata=current_chunk.metadata)
cleaned_chunks.append(new_chunk)
i += 2
else:
i+=1
cleaned_chunks.append(current_chunk)
return cleaned_chunks
| [] |
2024-01-10 | thomas-yanxin/langchain-ChatGLM | chatglm_llm.py | from langchain.llms.base import LLM
from typing import Optional, List
from langchain.llms.utils import enforce_stop_tokens
from transformers import AutoTokenizer, AutoModel
"""ChatGLM_G is a wrapper around the ChatGLM model to fit LangChain framework. May not be an optimal implementation"""
class ChatGLM(LLM):
max_token: int = 10000
temperature: float = 0.1
top_p = 0.9
history = []
tokenizer = AutoTokenizer.from_pretrained(
"THUDM/chatglm-6b",
trust_remote_code=True
)
model = (
AutoModel.from_pretrained(
"THUDM/chatglm-6b",
trust_remote_code=True)
.half()
.cuda()
)
def __init__(self):
super().__init__()
@property
def _llm_type(self) -> str:
return "ChatGLM"
def _call(self,
prompt: str,
stop: Optional[List[str]] = None) -> str:
response, updated_history = self.model.chat(
self.tokenizer,
prompt,
history=self.history,
max_length=self.max_token,
temperature=self.temperature,
)
print("history: ", self.history)
if stop is not None:
response = enforce_stop_tokens(response, stop)
self.history = updated_history
return response
| [] |
2024-01-10 | thomas-yanxin/langchain-ChatGLM | knowledge_based_chatglm.py | from langchain.prompts.prompt import PromptTemplate
from langchain.chains import ChatVectorDBChain
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.document_loaders import UnstructuredFileLoader
from chatglm_llm import ChatGLM
def init_knowledge_vector_store(filepath):
embeddings = HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese", )
loader = UnstructuredFileLoader(filepath, mode="elements")
docs = loader.load()
vector_store = FAISS.from_documents(docs, embeddings)
return vector_store
def get_knowledge_based_answer(query, vector_store, chat_history=[]):
system_template = """基于以下内容,简洁和专业的来回答用户的问题。
如果无法从中得到答案,请说 "不知道" 或 "没有足够的相关信息",不要试图编造答案。答案请使用中文。
----------------
{context}
----------------
"""
messages = [
SystemMessagePromptTemplate.from_template(system_template),
HumanMessagePromptTemplate.from_template("{question}"),
]
prompt = ChatPromptTemplate.from_messages(messages)
condese_propmt_template = """任务: 给一段对话和一个后续问题,将后续问题改写成一个独立的问题。确保问题是完整的,没有模糊的指代。
----------------
聊天记录:
{chat_history}
----------------
后续问题:{question}
----------------
改写后的独立、完整的问题:"""
new_question_prompt = PromptTemplate.from_template(condese_propmt_template)
chatglm = ChatGLM()
chatglm.history = chat_history
knowledge_chain = ChatVectorDBChain.from_llm(
llm=chatglm,
vectorstore=vector_store,
qa_prompt=prompt,
condense_question_prompt=new_question_prompt,
)
knowledge_chain.return_source_documents = True
knowledge_chain.top_k_docs_for_context = 10
result = knowledge_chain({"question": query, "chat_history": chat_history})
return result, chatglm.history
if __name__ == "__main__":
filepath = input("Input your local knowledge file path 请输入本地知识文件路径:")
vector_store = init_knowledge_vector_store(filepath)
history = []
while True:
query = input("Input your question 请输入问题:")
resp, history = get_knowledge_based_answer(query=query,
vector_store=vector_store,
chat_history=history)
print(resp)
| [
"基于以下内容,简洁和专业的来回答用户的问题。\n 如果无法从中得到答案,请说 \"不知道\" 或 \"没有足够的相关信息\",不要试图编造答案。答案请使用中文。\n ----------------\n {context}\n ----------------\n ",
"{question}",
"任务: 给一段对话和一个后续问题,将后续问题改写成一个独立的问题。确保问题是完整的,没有模糊的指代。\n ----------------\n 聊天记录:\n {chat_history}\n ----------------\n 后续问题:{question}\n ----------------\n 改写后的独立、完整的问题:"
] |
2024-01-10 | gerardburgues/Langchain_Chatbot | csvmain.py | from langchain.agents import create_csv_agent
from langchain.llms import OpenAI
from dotenv import load_dotenv
import os
import streamlit as st
from dotenv import load_dotenv
import pandas as pd
# Create a List of Documents from all of our files in the ./docs folder
load_dotenv()
openai_api_key = os.getenv("OPENAI_KEY")
def main():
load_dotenv()
# Load the OpenAI API key from the environment variable
st.set_page_config(page_title="Ask your CSV")
st.header("Ask your CSV 📈")
csv_file = st.file_uploader("Upload a CSV file", type="csv")
if csv_file is not None:
agent = create_csv_agent(
OpenAI(
temperature=0,
openai_api_key=openai_api_key,
),
csv_file,
verbose=True,
)
user_question = st.text_input("Ask a question about your CSV: ")
if user_question is not None and user_question != "":
with st.spinner(text="In progress..."):
st.write(agent.run(user_question))
if __name__ == "__main__":
main()
| [] |
2024-01-10 | razgaon/anthropic-docify | src~vector_store.py | import logging
import os, sys
from typing import Any, List
import chromadb
import dataclasses
from utils import get_langchain_docs_url
from llama_index import (
Document,
StorageContext,
ServiceContext,
VectorStoreIndex,
LangchainEmbedding,
)
from llama_index.embeddings import OpenAIEmbedding
from llama_index.vector_stores import ChromaVectorStore, PineconeVectorStore
from llama_index.node_parser import SimpleNodeParser
from langchain.embeddings import OpenAIEmbeddings
from custom_types import Source, SourceType
from crawler import WebpageCrawler, SourceType
from tqdm import tqdm
from dotenv import load_dotenv
load_dotenv()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# CHROMA
chroma_config = {"client": chromadb.PersistentClient()}
chroma_collection = chroma_config["client"].get_or_create_collection("official")
chroma_vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
# PINECONE
pinecone_config = {"environment": "us-west1-gcp-free"}
pinecone_vector_stores = {
"official": PineconeVectorStore(
index_name="official",
environment=pinecone_config["environment"],
namespace="dev",
)
}
embed_model = LangchainEmbedding(OpenAIEmbeddings())
def get_urls(sources: List[Source]):
return [s.url for s in sources]
def get_contents(sources: List[Source]):
return [s.content for s in sources]
def get_documents(sources: List[Source]):
return [
Document(text=s.content, embedding=embed_model.get_text_embedding(s.content))
for s in sources
]
def get_nodes(documents: List[Document]):
parser = SimpleNodeParser()
nodes = parser.get_nodes_from_documents(documents)
def get_metadatas(sources: List[Source]):
return [s.metadata for s in sources]
def create_index(vector_store, sources: List[Source] = []):
storage_context = StorageContext.from_defaults(vector_store=vector_store)
service_context = ServiceContext.from_defaults(embed_model=embed_model)
documents = get_documents(sources)
index = VectorStoreIndex.from_documents(
documents=documents,
storage_context=storage_context,
service_context=service_context,
)
return index
def get_index(vector_store):
return VectorStoreIndex.from_vector_store(vector_store=vector_store)
def create_official_langchain_index(vector_store):
langchain_paths = get_langchain_docs_url()
errored = []
urls = [*langchain_paths]
sources = []
for url in tqdm(urls):
print(f"Scraping {url}...")
crawler = WebpageCrawler(
source_type=SourceType.Official, use_unstructured=False
)
try:
sources.append(crawler.generate_row(url))
except Exception as e:
errored.append(url)
print(f"Error on {url}, {e}")
index = create_index(vector_store, sources)
return index
if __name__ == "__main__":
create_official_langchain_index(pinecone_vector_stores["official"])
| [] |
2024-01-10 | razgaon/anthropic-docify | src~crawler.py | from dataclasses import dataclass
from typing import Optional, cast
import urllib.parse as urlparse
from abc import ABC, abstractmethod
import logging
import requests
from bs4 import BeautifulSoup, Tag
from markdownify import MarkdownConverter
from custom_types import Source, SourceType, Metadata
from langchain.document_loaders import UnstructuredURLLoader
from unstructured.cleaners.core import clean, clean_extra_whitespace
from youtube_transcript_api import YouTubeTranscriptApi
from googleapiclient.discovery import build
from env_var import GOOGLE_API_KEY
logger = logging.getLogger(__name__)
class Crawler(ABC):
@abstractmethod
def generate_row(self, url) -> Source:
"""Generates a row that contains the dataclass."""
pass
class WebpageCrawler(Crawler):
def __init__(self, source_type: SourceType, use_unstructured=True) -> None:
super().__init__()
self.source_type = source_type
self.use_unstructured = use_unstructured
def _get_webpage_body(self, url: str) -> Tag:
"""Uses BeautifulSoup4 to fetch a webpage's HTML body given a URL"""
response = requests.get(url)
if response.status_code != 200:
raise Exception(
f"Failed to fetch the webpage. Status code: {response.status_code}"
)
html_content = response.text
soup = BeautifulSoup(html_content, "html.parser")
parent = soup.find("article")
if not parent:
raise Exception(f"No article tag found for url {url}")
main_content = parent.find("div", class_="markdown")
return cast(Tag, main_content)
def _html_to_markdown(self, body: Tag) -> str:
return MarkdownConverter().convert_soup(body)
def generate_row(self, url: str) -> Source:
logging.info("Starting webpage crawling")
if self.use_unstructured:
res = Source(
url=url,
content=self._get_unstructured_document(url),
metadata=Metadata(
source_type=self.source_type,
),
)
else:
res = Source(
url=url,
content=self._html_to_markdown(self._get_webpage_body(url)),
metadata=Metadata(
source_type=self.source_type,
),
)
logger.info("Finished webpage crawling")
return res
def _get_unstructured_document(self, url):
"Given an URL, return a langchain Document to futher processing"
loader = UnstructuredURLLoader(
urls=[url],
mode="elements",
post_processors=[clean, clean_extra_whitespace],
)
elements = loader.load()
selected_elements = [
e for e in elements if e.metadata["category"] == "NarrativeText"
]
full_clean = " ".join([e.page_content for e in selected_elements])
return full_clean
@dataclass
class YoutubeMetadata:
title: str
description: str
channel_title: str
published_at: str
class YoutubeCrawler(Crawler):
def _get_video_id(self, video_url: str):
"""
This function extracts the YouTube video ID from an URL.
"""
url_data = urlparse.urlparse(video_url)
video_id = urlparse.parse_qs(url_data.query)["v"][0]
return video_id
def _get_transcript(self, video_url: str) -> str:
video_id = self._get_video_id(video_url)
try:
# This will return a list of dictionaries, each containing a single part of the transcript
logger.info("Starting transcribing")
transcript_list = YouTubeTranscriptApi.get_transcript(video_id)
logger.info("Finished transcribing")
# Now we will combine all parts into a single transcript
transcript = " ".join([d["text"] for d in transcript_list])
return transcript
except Exception as e:
logger.error(f"Error getting transcript for video {video_url}: {e}")
return ""
def _get_video_metadata(self, video_url: str) -> Optional[YoutubeMetadata]:
video_id = self._get_video_id(video_url)
youtube = build("youtube", "v3", developerKey=GOOGLE_API_KEY)
request = youtube.videos().list(part="snippet", id=video_id)
response = request.execute()
if response["items"]:
item = response["items"][0]
title = item["snippet"]["title"]
description = item["snippet"]["description"]
channel_title = item["snippet"]["channelTitle"]
published_at = item["snippet"]["publishedAt"]
return YoutubeMetadata(
title=title,
description=description,
channel_title=channel_title,
published_at=published_at,
)
else:
logger.error(f"No metadata found for video: {video_url}")
return None
def generate_row(self, url):
content = self._get_transcript(url)
authors = []
metadata = self._get_video_metadata(url)
if metadata:
authors.append(metadata.channel_title)
return Source(
url=url,
source_type=SourceType.Youtube,
content=content,
authors=authors,
)
| [] |
2024-01-10 | razgaon/anthropic-docify | src~templates.py | """
Human v.s. Assistant
Instructions basics:
- Describe a task, its rules and any exceptions.
- Give example inputs & outputs
- Provide more context
- Demand specific output formatting
- Provide the specific input to process
"""
INITIAL_CRITIQUE_PAGE_TEMPLATE=""""
You are an expert in Langchain, a framework for developing applications powered by large language models.
Goal: I will provide you with a documentation page on the topic from the reference page. Please review the documentation and official documentation below, then provide constructive feedback on how
the documentation can be improved. Focus on providing the top 3 areas for improvement. Ensure your feedback is clear and actionable.
Here are some criteria you should consider when reviewing and critiquing the documentation:
1. Completeness: Is the documentation covering all necessary aspects from the reference page? Are there missing sections that need to be filled in?
2. Clarity: Is the information provided clear and easy to understand? Does the language used make the content accessible to both novice and experienced developers?
3. Technical Accuracy: Are the provided instructions, examples, and other technical details accurate? Are there discrepancies between the context and the official Langchain documentation?
4. Consistency: Is the style and tone of the documentation consistent with the official Langchain documentation? Consistency in language and presentation helps to maintain a unified user experience.
5. Organization and Structure: Is the information presented in a logical and structured manner? Does the flow of content make sense? Is there a table of contents or other navigation aids?
6. Relevance and Usefulness of Examples: Are the examples relevant and do they clearly demonstrate the concept or feature they are meant to explain? Do the examples cover a range of simple to complex scenarios?
7. Grammar and Language Use: Are there grammatical errors or awkward phrasing that makes the documentation hard to understand? Are technical terms explained or linked to further reading?
<Context>
{context}
</Context>
<official_documentation>
{reference_page}
</official_documentation>
Now, provide the top 3 areas for improvement. Ensure your feedback is related to the reference page.
1.
2.
3.
"""
IMPROVE_PAGE_TEMPLATE = """
Goal: You are an expert AI agent developer who is tasked with writng comprehensive guides for your library, LangChain.
You are given context, a reference page, and a list of feedback. You need to enrich the reference page based on the critique. You are tasked to create a new markdown page that improves on the reference page by achieving the following targets:
Targets:
1. Adding context and relevant information from the provided context. For example, if you reference a topic, bring examples and explanations from the context to the reference page. Do not provide any information that isn't in the context.
2. Providing more detailed explanations of concepts. For example, if the reference page provides a high-level overview of a concept, provide more details and clarity.
3. Ensuring a logical structure and clear organization. For example, if the reference page is not well-structured, re-organize the content into a logical order.
4. Ensuring a clear intro/overview and conclusion/summary. For example, if the reference page does not have a clear intro/overview and conclusion/summary, add one.
Rules:
1. Carefully read through the context, the feedback list, and the reference page
2. Identify key concepts, explanations, examples in the reference page
3. Supplement these with relevant information, examples from the context.
4. Expand explanations of concepts with more details and clarity
5. Structure sections and content in a logical order
6. Ensure a clear intro/overview and conclusion/summary
7. Avoid from providing urls unless they exist in the reference page, and avoid providing image urls.
When you reply, first find exact quotes in the FAQ relevant to the user's question and write them down word for word inside <thinking></thinking> XML tags. This is a space for you to write down relevant content and will not be shown to the user. One you are done extracting relevant quotes, answer the question. Put your answer to the user inside <answer></answer> XML tags.
Remember to add detailed examples, explanations, and code snippets where applicable. Ensure a logical structure and clear organization. Use consistent formatting and markdown syntax. Ensure a clear intro/overview and conclusion/summary.
Stick to the reference page and don't deviate from it.
<REFERENCE PAGE>
{reference_page}
</REFERENCE PAGE>
<FEEDBACK>
{critique}
</FEEDBACK>
<FAQ>
{context}
</FAQ>
This is how your response format should be:
<thinking>
YOUR THINKING
</thinking>
<answer>
YOUR ANSWER
</answer>
"""
CRITIQUE_PAGE_TEMPLATE = """"
You are an expert in Langchain, a framework for developing applications powered by large language models.
Goal: I will provide you with draft documentation on the topic from the reference page. Please review the draft documentation and official Langchain documentation below, then provide constructive feedback on how
the draft documentation can be improved. Focus on providing the top 3 areas for improvement. Ensure your feedback is clear and actionable.
Here are some criteria you should consider when reviewing and critiquing the documentation:
1. Completeness: Is the documentation covering all necessary aspects from the reference page? Are there missing sections that need to be filled in?
2. Clarity: Is the information provided clear and easy to understand? Does the language used make the content accessible to both novice and experienced developers?
3. Technical Accuracy: Are the provided instructions, examples, and other technical details accurate? Are there discrepancies between the draft documentation and the official Langchain documentation?
4. Consistency: Is the style and tone of the documentation consistent with the official Langchain documentation? Consistency in language and presentation helps to maintain a unified user experience.
5. Organization and Structure: Is the information presented in a logical and structured manner? Does the flow of content make sense? Is there a table of contents or other navigation aids?
<Draft>
{improved_page}
</Draft>
<Official_documentation>
{reference_page}
</Official_documentation>
Now, provide the top 3 areas for improvement. Ensure your feedback is clear and actionable:
1.
2.
3.
"""
CHECK_MISSING_SYMBOLS_TEMPLATE = """
You are an experienced software engineer. Help review the draft documentation and check if there are any symbols being used that is not imported or defined in the code sample.
Following is the first example:
<example>
For the following code snippet:
```python
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
conversation = ConversationChain(
llm=chat,
memory=ConversationBufferMemory()
)
conversation.run("Answer briefly. What are the first 3 colors of a rainbow?")
```
The ConversationChain is initialized with llm=chat, but chat is not defined or imported anywhere in the code. So this would throw an error unless chat was defined and initialized somewhere else in the full code.
</example>
Following is the second example:
<example>
For the following code snippet:
```python
llm = OpenAI(temperature=0)
from langchain.prompts.prompt import PromptTemplate
from langchain.chains import ConversationChain
template = "some template .... {history} {input}"
prompt = PromptTemplate(input_variables=["history", "input"], template=template)
conversation_with_kg = ConversationChain(
llm=llm, verbose=True, prompt=prompt, memory=ConversationKGMemory(llm=llm)
)
```
The symbol `OpenAI` is used without being imported or defined anywhere in the code. So this would throw an error.
</example>
Here is the draft documentation for you to review:
<draft_documentation>
{draft_documentation}
</draft_documentation>
Now, review the draft documentation and check if there are any symbol being used that is not imported or defined in the code sample.
For each symbol being used that is not imported or defined, find exact quote from the draft documentation and explain why it is not imported or defined.
If no variable is used without imported or defined, just tell me that there are no variables used without being imported or defined.
""" | [
"\nYou are an experienced software engineer. Help review the draft documentation and check if there are any symbols being used that is not imported or defined in the code sample.\n\nFollowing is the first example:\n\n<example>\nFor the following code snippet:\n```python\nfrom langchain.chains import ConversationChain\nfrom langchain.memory import ConversationBufferMemory\n\nconversation = ConversationChain(\n llm=chat,\n memory=ConversationBufferMemory()\n)\n\nconversation.run(\"Answer briefly. What are the first 3 colors of a rainbow?\")\n```\n\nThe ConversationChain is initialized with llm=chat, but chat is not defined or imported anywhere in the code. So this would throw an error unless chat was defined and initialized somewhere else in the full code.\n</example>\n\nFollowing is the second example:\n<example>\nFor the following code snippet:\n```python\nllm = OpenAI(temperature=0)\nfrom langchain.prompts.prompt import PromptTemplate\nfrom langchain.chains import ConversationChain\n\ntemplate = \"some template .... {history} {input}\"\nprompt = PromptTemplate(input_variables=[\"history\", \"input\"], template=template)\nconversation_with_kg = ConversationChain(\n llm=llm, verbose=True, prompt=prompt, memory=ConversationKGMemory(llm=llm)\n)\n```\n\nThe symbol `OpenAI` is used without being imported or defined anywhere in the code. So this would throw an error.\n</example>\n\nHere is the draft documentation for you to review:\n\n<draft_documentation>\n{draft_documentation}\n</draft_documentation>\n\nNow, review the draft documentation and check if there are any symbol being used that is not imported or defined in the code sample. \nFor each symbol being used that is not imported or defined, find exact quote from the draft documentation and explain why it is not imported or defined.\nIf no variable is used without imported or defined, just tell me that there are no variables used without being imported or defined.\n",
"\"\nYou are an expert in Langchain, a framework for developing applications powered by large language models. \n\nGoal: I will provide you with a documentation page on the topic from the reference page. Please review the documentation and official documentation below, then provide constructive feedback on how \nthe documentation can be improved. Focus on providing the top 3 areas for improvement. Ensure your feedback is clear and actionable.\n\nHere are some criteria you should consider when reviewing and critiquing the documentation:\n1. Completeness: Is the documentation covering all necessary aspects from the reference page? Are there missing sections that need to be filled in?\n2. Clarity: Is the information provided clear and easy to understand? Does the language used make the content accessible to both novice and experienced developers?\n3. Technical Accuracy: Are the provided instructions, examples, and other technical details accurate? Are there discrepancies between the context and the official Langchain documentation?\n4. Consistency: Is the style and tone of the documentation consistent with the official Langchain documentation? Consistency in language and presentation helps to maintain a unified user experience.\n5. Organization and Structure: Is the information presented in a logical and structured manner? Does the flow of content make sense? Is there a table of contents or other navigation aids?\n6. Relevance and Usefulness of Examples: Are the examples relevant and do they clearly demonstrate the concept or feature they are meant to explain? Do the examples cover a range of simple to complex scenarios?\n7. Grammar and Language Use: Are there grammatical errors or awkward phrasing that makes the documentation hard to understand? Are technical terms explained or linked to further reading?\n\n<Context>\n{context}\n</Context> \n\n<official_documentation>\n{reference_page}\n</official_documentation>\n\nNow, provide the top 3 areas for improvement. Ensure your feedback is related to the reference page.\n1. \n2. \n3.\n\n",
"\nGoal: You are an expert AI agent developer who is tasked with writng comprehensive guides for your library, LangChain. \n\nYou are given context, a reference page, and a list of feedback. You need to enrich the reference page based on the critique. You are tasked to create a new markdown page that improves on the reference page by achieving the following targets:\nTargets:\n1. Adding context and relevant information from the provided context. For example, if you reference a topic, bring examples and explanations from the context to the reference page. Do not provide any information that isn't in the context.\n2. Providing more detailed explanations of concepts. For example, if the reference page provides a high-level overview of a concept, provide more details and clarity.\n3. Ensuring a logical structure and clear organization. For example, if the reference page is not well-structured, re-organize the content into a logical order.\n4. Ensuring a clear intro/overview and conclusion/summary. For example, if the reference page does not have a clear intro/overview and conclusion/summary, add one.\n\nRules:\n1. Carefully read through the context, the feedback list, and the reference page\n2. Identify key concepts, explanations, examples in the reference page\n3. Supplement these with relevant information, examples from the context.\n4. Expand explanations of concepts with more details and clarity\n5. Structure sections and content in a logical order \n6. Ensure a clear intro/overview and conclusion/summary\n7. Avoid from providing urls unless they exist in the reference page, and avoid providing image urls.\n\nWhen you reply, first find exact quotes in the FAQ relevant to the user's question and write them down word for word inside <thinking></thinking> XML tags. This is a space for you to write down relevant content and will not be shown to the user. One you are done extracting relevant quotes, answer the question. Put your answer to the user inside <answer></answer> XML tags.\nRemember to add detailed examples, explanations, and code snippets where applicable. Ensure a logical structure and clear organization. Use consistent formatting and markdown syntax. Ensure a clear intro/overview and conclusion/summary.\nStick to the reference page and don't deviate from it.\n\n<REFERENCE PAGE>\n{reference_page}\n</REFERENCE PAGE>\n\n<FEEDBACK> \n{critique}\n</FEEDBACK>\n\n<FAQ>\n{context}\n</FAQ>\n\n\nThis is how your response format should be:\n<thinking>\nYOUR THINKING\n</thinking>\n<answer>\nYOUR ANSWER\n</answer>\n",
"\"\nYou are an expert in Langchain, a framework for developing applications powered by large language models. \n\nGoal: I will provide you with draft documentation on the topic from the reference page. Please review the draft documentation and official Langchain documentation below, then provide constructive feedback on how \nthe draft documentation can be improved. Focus on providing the top 3 areas for improvement. Ensure your feedback is clear and actionable.\n\nHere are some criteria you should consider when reviewing and critiquing the documentation:\n1. Completeness: Is the documentation covering all necessary aspects from the reference page? Are there missing sections that need to be filled in?\n2. Clarity: Is the information provided clear and easy to understand? Does the language used make the content accessible to both novice and experienced developers?\n3. Technical Accuracy: Are the provided instructions, examples, and other technical details accurate? Are there discrepancies between the draft documentation and the official Langchain documentation?\n4. Consistency: Is the style and tone of the documentation consistent with the official Langchain documentation? Consistency in language and presentation helps to maintain a unified user experience.\n5. Organization and Structure: Is the information presented in a logical and structured manner? Does the flow of content make sense? Is there a table of contents or other navigation aids?\n\n<Draft>\n{improved_page}\n</Draft>\n\n<Official_documentation>\n{reference_page} \n</Official_documentation>\n\nNow, provide the top 3 areas for improvement. Ensure your feedback is clear and actionable:\n1. \n2. \n3.\n"
] |
2024-01-10 | corca-ai/AIvilization | core~civilization~person~brain~memory~long_term.py | import re
import openai
import pinecone
from core.config import settings
from core.logging import logger
from .base import BaseMemory
from .vector.openai import OpenAIVector
openai.api_key = settings.OPENAI_API_KEY
class LongTermMemory(BaseMemory[str]):
_PLAN_PATTERN = r"- \[(?:x| )\]\s+(.+)\n"
@logger.disable
def __init__(self, name: str, instruction: str):
pinecone.init(api_key=settings.PINECONE_API_KEY, environment="us-east1-gcp")
index = pinecone.Index(settings.PINECONE_INDEX)
vector = OpenAIVector()
super().__init__(
name=name,
instruction=instruction,
storage=index,
change_to_memory=lambda x: vector.embedding(x),
)
def load(self, prompt: str) -> str:
return prompt # TODO
def save(self, prompt: str, thought: str) -> None:
plans = re.findall(LongTermMemory._PLAN_PATTERN, thought, re.DOTALL)
vectors = [
pinecone.Vector(
id=LongTermMemory.get_plan_id(self.name, i),
values=self.change_to_memory(plan),
metadata={"name": self.name},
)
for i, plan in enumerate(plans)
]
if vectors:
try:
self.storage.upsert(vectors=vectors)
except Exception as e:
logger.exception(e)
@staticmethod
def get_plan_id(name: str, index: int) -> str:
return f"{name}-plan#{index}"
def __del__(self):
result = self.storage.query(
self.change_to_memory("None"), top_k=10000, filter={"name": self.name}
)
self.storage.delete(
ids=[match["id"] for match in result["matches"]], filter={"name": self.name}
)
| [] |
2024-01-10 | corca-ai/AIvilization | core~civilization~person~brain~default.py | from argparse import Action
from typing import Generator, List, Tuple
from core.civilization.person.action.base import Plan
from core.civilization.person.base import BasePerson
from core.civilization.person.brain.organize.base import BaseOrganize
from core.civilization.person.brain.organize.execute import Executor
from core.civilization.person.brain.organize.optimize import Optimizer
from core.civilization.person.brain.organize.plan import Planner
from core.civilization.person.brain.organize.review import Reviewer
from core.config.env import settings
from .base import BaseBrain
from .llm.openai import OpenAILLM
from .memory import BaseMemory, LongTermMemory, ShortTermMemory
class Brain(BaseBrain):
person: BasePerson = None
sterm_memory: BaseMemory[list[dict[str, str]]] = None
lterm_memory: BaseMemory[str] = None
planner: BaseOrganize = None
optimizer: BaseOrganize = None
executor: BaseOrganize = None
reviewer: BaseOrganize = None
init_message: str = "Your name is {name}. {instruction}"
def __init__(self, person: BasePerson, name: str, instruction: str):
super().__init__(llm=OpenAILLM())
self.lterm_memory = (
LongTermMemory(name, instruction) if settings.PINECONE_API_KEY else None
)
self.sterm_memory = ShortTermMemory(name, instruction, self.init_message)
self.person = person
self.planner = Planner()
self.optimizer = Optimizer()
self.executor = Executor()
self.reviewer = Reviewer()
def plan(
self, request: str, opinions: List[str], constraints: List[str]
) -> List[Plan]:
prompt = self.planner.stringify(self.person, request, opinions, constraints)
thought = ""
for t in self._think(prompt):
thought += t
return self.planner.parse(self.person, thought)
def optimize(self, request: str, plans: List[Plan]) -> Tuple[str, bool]:
prompt = self.optimizer.stringify(self.person, request, plans)
thought = ""
for t in self._think(prompt):
thought += t
opinion, ok = self.optimizer.parse(self.person, thought)
if ok:
self.sterm_memory.save(
"Make a plan to respond to the request. Request is:\n" + request,
"\n".join(map(str, plans)),
)
return opinion, ok
def execute(self, plan: Plan, opinions: str) -> Action:
prompt = self.executor.stringify(self.person, plan, opinions)
thought = ""
for t in self._think(prompt):
thought += t
return self.executor.parse(self.person, thought)
def review(self, plan: str, action: Action, result: str) -> Tuple[str, bool]:
prompt = self.reviewer.stringify(self.person, plan, action, result)
thought = ""
for t in self._think(prompt):
thought += t
opinion, ok = self.reviewer.parse(self.person, thought)
if ok:
self.sterm_memory.save(
"Execute this plan:\n" + str(plan),
f"I did this action: {action}\nAction's result: {result}",
)
return opinion, ok
def _think(self, prompt: str) -> Generator[str, None, None]:
messages = self.sterm_memory.load(prompt)
for thought in self.llm.chat_completion(messages):
yield thought["choices"][0]["delta"].get("content", "")
| [] |
2024-01-10 | UsithaDJay/Guten-Bot | Backend~flask_backend.py | # # !pip install chromadb --progress-bar off
# !pip install replicate
# # !pip install faiss-cpu
# !pip install faiss-gpu
# !pip install transformers --progress-bar off
# !pip install langchain --progress-bar off
# !pip install sentence_transformers --progress-bar off
# !pip install InstructorEmbedding --progress-bar off
# !pip install textsum
# !pip install flask-ngrok
# !pip install pyngrok
# !ngrok authtoken '2XVaUQ29PRt48iMYXxyN6tawIFh_6kZiMroQWZJf812oC2fnz'
from flask import Flask, request, jsonify
from flask_ngrok import run_with_ngrok
import os
import glob
import textwrap
import time
import re
import langchain
# loaders
from langchain.document_loaders import TextLoader
# splits
from langchain.text_splitter import RecursiveCharacterTextSplitter
# prompts
from langchain import PromptTemplate, LLMChain
# vector stores
from langchain.vectorstores import FAISS
from langchain.vectorstores import Chroma
# models
from langchain.llms import HuggingFacePipeline
from InstructorEmbedding import INSTRUCTOR
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.llms import Replicate
from textsum.summarize import Summarizer
# retrievers
from langchain.chains import RetrievalQA
import torch
import transformers
from transformers import pipeline
# from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# from transformers import AutoTokenizer, TextStreamer, pipeline
# data collectors
import requests
from bs4 import BeautifulSoup
import difflib
app = Flask(__name__)
run_with_ngrok(app)
# Models
## Summarizing Model
model_name = "pszemraj/led-large-book-summary"
summarizer = Summarizer(
model_name_or_path=model_name,
token_batch_length=10000,
)
# configurations for summarizer
min_word_count = 200
max_word_count = 300
tokens_per_word = 1.3
min_token_count = min_word_count * tokens_per_word
max_token_count = max_word_count * tokens_per_word
# Set the length constraints in the inference params
inference_params = summarizer.inference_params
inference_params['max_length'] = int(max_token_count)
inference_params['min_length'] = int(min_token_count)
summarizer.set_inference_params(inference_params)
summ = pipeline(
"summarization",
model_name,
device=0 if torch.cuda.is_available() else -1,
)
## Embeddings model
instructor_embeddings = HuggingFaceInstructEmbeddings(
model_name = "hkunlp/instructor-base",
model_kwargs = {"device": "cuda"}
)
## Llama2-13 by Replicate
REPLICATE_API_TOKEN = "r8_4o6DI4Kl9VfQdrVv6OlaqvAyMhFdamr2jUDVe"
os.environ["REPLICATE_API_TOKEN"] = REPLICATE_API_TOKEN
llm = Replicate(
model = "replicate/llama-2-70b-chat:2796ee9483c3fd7aa2e171d38f4ca12251a30609463dcfd4cd76703f22e96cdf",
input = {"temperature": 0.75, "max_length": 1024, "top_p": 0.95, "repetition_penalty": 1.15},
)
prompt_template = """
Don't try to make up an answer, if you don't know just say that you don't know.
Answer in the same language the question was asked.
Use only the following pieces of context to answer the question at the end.
{context}
Question: {question}
Answer:"""
# Custom Prompt
PROMPT = PromptTemplate(
template = prompt_template,
input_variables = ["context", "question"]
)
# Functions for Book Retrieval
## Function to search for a book by name and return the best match URL
def search_book_by_name(book_name):
base_url = "https://www.gutenberg.org/"
search_url = base_url + "ebooks/search/?query=" + book_name.replace(" ", "+") + "&submit_search=Go%21"
response = requests.get(search_url)
soup = BeautifulSoup(response.content, "html.parser")
# Find the best match link based on similarity ratio
best_match_ratio = 0
best_match_url = ""
for link in soup.find_all("li", class_="booklink"):
link_title = link.find("span", class_="title").get_text()
similarity_ratio = difflib.SequenceMatcher(None, book_name.lower(), link_title.lower()).ratio()
if similarity_ratio > best_match_ratio:
best_match_ratio = similarity_ratio
best_match_url = base_url + link.find("a").get("href")
return best_match_url
## Function to get the "Plain Text UTF-8" download link from the book page
def get_plain_text_link(book_url):
response = requests.get(book_url)
soup = BeautifulSoup(response.content, "html.parser")
plain_text_link = ""
for row in soup.find_all("tr"):
format_cell = row.find("td", class_="unpadded icon_save")
if format_cell and "Plain Text UTF-8" in format_cell.get_text():
plain_text_link = format_cell.find("a").get("href")
break
return plain_text_link
## Function to get the content of the "Plain Text UTF-8" link
def get_plain_text_content(plain_text_link):
response = requests.get(plain_text_link)
content = response.text
return content
## Main function
def load_book(book_name):
best_match_url = search_book_by_name(book_name)
if best_match_url:
plain_text_link = get_plain_text_link(best_match_url)
if plain_text_link:
full_plain_text_link = "https://www.gutenberg.org" + plain_text_link
plain_text_content = get_plain_text_content(full_plain_text_link)
# print("Plain Text UTF-8 content:", plain_text_content)
book_text = plain_text_content
# Remove the BOM character if it exists
book_text = book_text.lstrip('\ufeff')
#####
# Define the possible variations of the start marker
possible_start_markers = [
r"\*\*\* START OF THIS PROJECT GUTENBERG EBOOK (.+?) \*\*\*",
r"\*\*\* START OF THE PROJECT GUTENBERG EBOOK (.+?) \*\*\*"
]
# Fetch the plain_text_content of the book (assuming you have it)
plain_text_content = book_text # Fetch the content here
start_index = None
for start_marker_pattern in possible_start_markers:
match = re.search(start_marker_pattern, book_text)
if match:
start_index = match.start()
book_name = match.group(1)
break
if start_index is not None:
end_marker = f"*** END OF THE PROJECT GUTENBERG EBOOK {book_name} ***"
end_index = plain_text_content.find(end_marker, start_index)
if end_index != -1:
book_text = plain_text_content[start_index + len(match.group(0)):end_index]
#####
# Choose an appropriate encoding, such as 'utf-8'
with open("book.txt", "w", encoding="utf-8") as book:
book.write(book_text)
return book_text
else:
print("No Plain Text UTF-8 link found.")
return "web site error"
else:
print("No matching book found.")
return "web site error"
# Function to get Summary
def generate_summary(book_text):
global summarizer, summ
out_str = summarizer.summarize_string(book_text)
wall_of_text = out_str
result = summ(
wall_of_text,
min_length=200,
max_length=300,
no_repeat_ngram_size=3,
encoder_no_repeat_ngram_size=3,
repetition_penalty=3.5,
num_beams=4,
early_stopping=True,
)
original_text = result[0]['summary_text']
# Remove the last sentence
sentences = original_text.split('. ')
if len(sentences) > 1:
final_text = '. '.join(sentences[:-1])
else:
final_text = original_text
# Print the modified text
print(final_text)
return final_text
# Functions for Q/A chatbot
## Splitting book.txt to create embeddings
def loadForEmbeddings(txt_file):
# load document
loader = TextLoader(txt_file, encoding="utf-8")
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(
chunk_size = 800,
chunk_overlap = 0
)
texts = text_splitter.split_documents(documents)
return texts
def wrap_text_preserve_newlines(text, width=200): # 110
# Split the input text into lines based on newline characters
lines = text.split('\n')
# Wrap each line individually
wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
# Join the wrapped lines back together using newline characters
wrapped_text = '\n'.join(wrapped_lines)
return wrapped_text
## Format llm response
def process_llm_response(llm_response):
ans = wrap_text_preserve_newlines(llm_response['result'])
sources_used = llm_response['source_documents'][0].metadata['source']
ans = ans + '\n\nSources: \n' + sources_used
return ans
## Main function in Q/A
def llm_ans(query):
start = time.time()
qa_chain = RetrievalQA.from_chain_type(
llm = llm,
chain_type = "stuff", # map_reduce, map_rerank, stuff, refine
retriever = retriever,
chain_type_kwargs = {"prompt": PROMPT},
return_source_documents = True,
verbose = False
)
llm_response = qa_chain(query)
ans = process_llm_response(llm_response)
end = time.time()
time_elapsed = int(round(end - start, 0))
time_elapsed_str = f'\n\nTime elapsed: {time_elapsed} s'
return ans + time_elapsed_str
# Example for creating Embeddings
book_name = "The prince"
book_text = load_book(book_name)
book = "book.txt"
texts = loadForEmbeddings(book)
## create embeddings
vectordb = FAISS.from_documents(
documents = texts,
embedding = instructor_embeddings
)
# Variable to check whether the book name entered
no_book = False
# Loads book then creates embeddings
@app.route('/submit', methods=['POST'])
def submit():
global vectordb, retriever, instructor_embeddings, no_book, book_text
book_name = request.json.get('book_name')
if not book_name:
no_book = True
return jsonify({'status': "Please enter the name of the book."})
book_text = load_book(book_name)
if book_text == "web site error":
return jsonify({'status': 'web site errorr'})
book = "book.txt"
texts = loadForEmbeddings(book)
# create embeddings
vectordb = FAISS.from_documents(
documents = texts,
embedding = instructor_embeddings
)
retriever = vectordb.as_retriever(search_kwargs = {"k": 3, "search_type" : "similarity"})
return jsonify({'status': 'success'})
# generates and returns summary
@app.route('/get_summary', methods=['GET'])
def get_summary():
global book_text, no_book
if no_book:
return jsonify({'answer': "Please enter the name of the book."})
summary = generate_summary(book_text)
return jsonify({'book_summary': summary})
# Gets the prompt and returns Llm response
@app.route('/get_response', methods=['POST'])
def get_response():
query = request.json.get('query')
# print("QQ:", query)
if (no_book and not query):
return jsonify({'answer': "Please enter the name of the book and the prompt."})
if no_book:
return jsonify({'answer': "Please enter the name of the book."})
if not query:
return jsonify({'answer': "Please enter the prompt."})
answer = llm_ans(query)
return jsonify({'answer': answer})
if __name__ == "__main__":
app.run() | [
"\nDon't try to make up an answer, if you don't know just say that you don't know.\nAnswer in the same language the question was asked.\nUse only the following pieces of context to answer the question at the end.\n\n{context}\n\nQuestion: {question}\nAnswer:",
"question",
"context"
] |
2024-01-10 | DominikFin/nlp-satisfaction | code~functions.py | from pandas_profiling import ProfileReport
from dotenv import load_dotenv
import os
import pyodbc
import re
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from collections import Counter
import pandas as pd
from sklearn.model_selection import train_test_split
import spacy
from spacy.lang.de.stop_words import STOP_WORDS
import spacy
import pandas as pd
import numpy as np
#from rake_nltk import Rake
from bertopic import BERTopic
import gensim.corpora as corpora
from gensim.models.coherencemodel import CoherenceModel
from gensim.utils import simple_preprocess
import typing as t
from typing import List
from nltk.corpus import stopwords
import logging
import plotly.graph_objs as go
from bertopic import BERTopic
from sklearn.decomposition import PCA
import plotly.graph_objects as go
import plotly.express as px
###############################################################
################# Globale Variablen ##########################
###############################################################
# plot settings
#color_continuous_scale="darkmint"
#color_discrete_sequence=['#001d13', '#002822', '#00342f', '#00403a', '#004d46', '#005952', '#00675f', '#00746c', '#008279', '#009087', '#009e94', '#14aca2', '#31bab0', '#45c8bd', '#56d6cb', '#66e5d9', '#75f3e7', '#91ffff']
#color_discrete_sequence_mixed=['#001d13', '#91ffff', '#002822', '#75f3e7', '#00342f', '#66e5d9', '#00403a', '#56d6cb', '#004d46', '#45c8bd', '#005952', '#31bab0', '#00675f', '#14aca2', '#00746c', '#009e94', '#008279', '#009087']
#color_discrete_sequence=["#003f5c", "#58508d", "#bc5090", "#ff6361", "#ffa600", "#ff7c43", "#ffdc00", "#00a2ff", "#7fdbff", "#e8c547", "#55b2d2", "#7fcdbb", "#5a5a5a", "#9c9c9c", "#c9c9c9", "#ef476f", "#6b5b95", "#b565a7", "#ffdab9", "#4d4d4d"]
#color_discrete_sequence=["#0B1F26","#3F89A6","#204959","#96C6D9","#D0E9F2","#42323A","#6C8C7D","#8EB3A2","#C5D9BA","#546E75"]
color_discrete_sequence= [ "#0B1F26", "#3F89A6", "#204959", "#96C6D9", "#D0E9F2", "#42323A", "#6C8C7D", "#8EB3A2", "#C5D9BA", "#546E75", "#F08080", "#FFA07A", "#FFDAB9", "#FFA500", "#FFD700", "#DAA520", "#BDB76B", "#808000", "#556B2F", "#8B4513"]
color_discrete_sequence_mixed= ['#0B1F26', '#8B4513', '#3F89A6', '#556B2F', '#204959', '#808000', '#96C6D9', '#BDB76B', '#D0E9F2', '#DAA520', '#42323A', '#FFD700', '#6C8C7D', '#FFA500', '#8EB3A2', '#FFDAB9', '#C5D9BA', '#F08080', '#546E75', '#FFA07A']
color_discrete_kuzu= ["#49787F","#A96262","#F06969","#C499CA","#EDB183","#B6D6CC","#9D6E94","#4FB286","#87A07C","#74A4BC","#F0C7ED","#9C89B8","#F06969","#ECD9BD"]
template='plotly_white'
################################################################
######################## Functions #############################
################################################################
def sql_azure_connect():
'''
Connect to an Azure SQL database using credentials from a .env file.
Returns:
cnxn (pyodbc.Connection): A connection object to the Azure SQL database.
Raises:
pyodbc.Error: If the connection to the database fails.
'''
# Load .env file
load_dotenv('../config/.env')
# Import credentials for kuzu Azure DB from .env file
credentials = {
'SERVER': os.getenv('SERVER_AZURE', "default"),
'DATABASE': os.getenv('DATABASE_AZURE', "default"),
'USERNAME': os.getenv('USERNAME_AZURE', "default"),
'PASSWORD': os.getenv('PASSWORD_AZURE', "default"),
'DRIVER': os.getenv('DRIVER_AZURE', "default")
}
connection_string = f"DRIVER={credentials['DRIVER']};SERVER={credentials['SERVER']};DATABASE={credentials['DATABASE']};UID={credentials['USERNAME']};PWD={credentials['PASSWORD']}"
cnxn = pyodbc.connect(connection_string)
# Show available tables
table_names = [x[2] for x in cnxn.cursor().tables(tableType='TABLE')]
print("Available tables: ",table_names)
return cnxn
###########################################
def filter_dateframe_cols(df,cols:list):
df = df[[cols]]
###########################################
def add_basic_textfeatures(df, colname: str):
'''
Add basic text features to a dataframe column with text.
Args:
df (pandas.DataFrame): The input dataframe.
colname (str): The name of the column with the text.
Returns:
pandas.DataFrame: A new dataframe with the added text features.
Examples:
>>> df = pd.DataFrame({'text': ['This is a sentence.', 'This is another sentence.']})
>>> df_with_features = add_basic_textfeatures(df, 'text')
>>> print(df_with_features)
text text_Character text_Tokens text_Types text_TTR
0 This is a sentence. 19 4 4 100.00
1 This is another sentence. 26 4 4 100.00
'''
dff = df.copy()
## Add character count
dff[colname + '_' + 'Character'] = df[colname].apply(lambda x: len(x))
## Add token count (wordcount)
dff[colname + '_' + 'Tokens'] = df[colname].apply(lambda x: len(str(x).split()))
## Add types count (unique wordcount)
typecount = df[colname].apply(lambda x: len(set(str(x).split())))
dff[colname + '_' + 'Types'] = typecount
## Add TTR (Type-Token Ratio)
dff[colname + '_' + 'TTR'] = (typecount / dff[colname + '_' + 'Tokens']) * 100
return dff
###########################################
def remove_redundant_whitespaces(column):
'''Removes all additional whitespaces from a list ans returns a new list'''
return [re.sub(r'\s+'," ", x).strip() for x in column]
###########################################
def get_top_n_ngrams(corpus, n=None, ngram_range=(1,1)):
'''
Get the top n n-grams from a corpus of text.
Args:
corpus (list or array-like): The input corpus of text.
n (int or None): The number of n-grams to return. If None, return all n-grams.
ngram_range (tuple): The range of n-grams to consider. Default is (1,1) for unigrams.
Returns:
list: A list of tuples, where each tuple contains an n-gram and its frequency in the corpus, sorted by frequency in descending order.
Examples:
>>> corpus = ['This is a sentence.', 'This is another sentence.']
>>> top_ngrams = get_top_n_ngrams(corpus, n=2, ngram_range=(1,2))
>>> print(top_ngrams)
[('this', 2), ('is', 2)]
'''
vec = CountVectorizer(ngram_range=ngram_range)
# check if corpus is a list of lists and flatten it if so
if isinstance(corpus[0], list):
flat_corpus = [item for sublist in corpus for item in sublist]
else:
flat_corpus = corpus
vec.fit(flat_corpus)
bag_of_words = vec.transform(flat_corpus)
sum_words = bag_of_words.sum(axis=0)
words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]
words_freq =sorted(words_freq, key = lambda x: x[1], reverse=True)
return words_freq[:n]
################## Add addition date dimensions to datarframe #########################
def add_date_columns(df, date_col):
"""
Add additional date columns to a DataFrame based on a specified date column.
Args:
df (pandas.DataFrame): The DataFrame to which the new date columns will be added.
date_col (str): The name of the column containing the date.
Returns:
pandas.DataFrame: A new DataFrame with the additional date columns.
Example:
>>> df = pd.DataFrame({'date': ['2022-01-01', '2022-02-01', '2022-03-01']})
>>> df['date'] = pd.to_datetime(df['date'])
>>> df_with_datecols = add_date_columns(df, 'date')
>>> print(df_with_datecols.head())
date year month quarter yearmonth yearquarter season yearseason
0 2022-01-01 2022 1 1 2022-01-01 2022Q1 winter 2022-winter
1 2022-02-01 2022 2 1 2022-02-01 2022Q1 winter 2022-winter
2 2022-03-01 2022 3 1 2022-03-01 2022Q1 spring 2022-spring
"""
df.insert(loc=2, column='year', value=df[date_col].dt.year) #create additional year col for viz
df.insert(loc=3, column='month', value=df[date_col].dt.month) #create additional month col for viz
df.insert(loc=4, column='quarter', value=df[date_col].dt.quarter) #create additional quarter col for viz
df.insert(loc=5, column='yearmonth', value=pd.to_datetime(df[['year', 'month']].assign(DAY=1))) #create additional yearmonth col for viz
df.insert(loc=6, column='yearquarter', value=df['year'].astype(str) + 'Q' + df['quarter'].astype(str)) #create additional yearquarter col for viz
df.insert(loc=7, column='season', value=df['month'].apply(lambda x: 'spring' if x in [3, 4, 5] else ('summer' if x in [6, 7, 8] else ('autumn' if x in [9, 10, 11] else 'winter'))))
df.insert(loc=8, column='yearseason', value=df['year'].astype(str) + '-' + df['season']) # create additional yearseason column for viz
return df
#######################################
################ TFIDF ################
################ General Group Function ################
def find_trending_keywords(dataframe, filter_column, text_column, ngram_range=(1, 1), n=10, min_df=100, max_df=0.2):
"""
Find the top n trending keywords for each value in a specified column of a Pandas DataFrame.
Args:
- dataframe (pd.DataFrame): The DataFrame containing the data to analyze.
- filter_column (str): The name of the column in the DataFrame to group the data by.
- text_column (str): The name of the column in the DataFrame containing the text to analyze.
- ngram_range (Tuple[int, int]): The range of n-grams to consider when extracting features from the text data.
- n (int): The number of top keywords to return for each group.
- min_df (int): The minimum number of documents that a term must appear in to be considered in the analysis.
- max_df (float): The maximum proportion of documents that a term can appear in to be considered in the analysis.
Returns:
- trending_keywords (Dict[str, List[Tuple[str, float, int, float]]]): A dictionary where each key is a unique value from the specified column and each value is a list of tuples containing the top n keywords for that value, along with their TF-IDF score, count in the group, and normalized count.
"""
# convert values in filter column to categorical values
dataframe[filter_column] = dataframe[filter_column].astype('category')
# add "unknown" category to filter_column categories, if not already present
if "unknown" not in dataframe[filter_column].cat.categories:
dataframe[filter_column] = dataframe[filter_column].cat.add_categories("unknown")
# replace NaN values in filter_column with "unknown"
dataframe[filter_column].fillna("unknown", inplace=True)
# Create an empty dictionary to store the top keywords and their counts for each value in filter_column
trending_keywords = {}
# Get all values in filter_column
values = dataframe[filter_column].unique()
# Convert the tokenized text column to a list of space-separated strings
text_data = [' '.join(words) for words in dataframe[text_column]]
# Create a TfidfVectorizer object with the specified n-gram range and min_df parameter
tfidf_vect = TfidfVectorizer(ngram_range=ngram_range, min_df=min_df, max_df=max_df)
# Fit and transform the tokenized text column for the whole corpus
tfidf = tfidf_vect.fit_transform(text_data)
# Loop over the values
for value in values:
# Filter the dataframe for the given value in filter_column
filter_data = dataframe[dataframe[filter_column] == value]
# Convert the tokenized text column to a list of space-separated strings
text_data_filter = [' '.join(words) for words in filter_data[text_column]]
# Transform the tokenized text column for the given value using the fitted TfidfVectorizer
tfidf_filter = tfidf_vect.transform(text_data_filter)
# Compute the sum of TF-IDF scores for each term in the given value
tfidf_filter = tfidf_filter.sum(axis=0)
# Create a list of tuples with the term and its TF-IDF score for the group
keywords = [(term, tfidf_filter[0, index]) for term, index in tfidf_vect.vocabulary_.items()]
# Filter out terms that have zero TF-IDF scores
keywords = [kw for kw in keywords if kw[1] > 0]
# Sort the keywords based on their TF-IDF scores
keywords.sort(key=lambda x: x[1], reverse=True)
# Count the occurrence of each keyword in the group
group_text_data = ' '.join(text_data_filter)
group_word_count = Counter(group_text_data.split())
# Create a list of tuples with the term, its TF-IDF score, and count in the group
keywords_with_count = [(kw[0], kw[1], group_word_count[kw[0]], group_word_count[kw[0]]/len(group_word_count)) for kw in keywords]
# Store the top n keywords for the given value in the dictionary
trending_keywords[value] = keywords_with_count[:n]
# Return the dictionary of top keywords and their counts for each value in filter_column
return trending_keywords
################ Specific Group Function ################
def find_trending_keywords_diff_normaized(dataframe, filter_column, text_column, ngram_range=(1, 1), n=10, min_df=100, max_df=0.2):
"""
Given a Pandas dataframe `dataframe`, a categorical column name `filter_column`, and a text column name `text_column`,
this function returns a dictionary of top `n` trending keywords with their TF-IDF score differences normalized by group size
for each unique value in `filter_column`. The keyword extraction is based on the difference in TF-IDF scores between
the given value and the average of the other values in `filter_column`. Only keywords with positive score differences are
included in the results. The TF-IDF score differences are normalized by the total count of all words in the group. The
`ngram_range`, `min_df`, and `max_df` parameters control the behavior of the TfidfVectorizer object used for tokenization
and weighting of the text data.
Args:
- dataframe (Pandas dataframe): The dataframe containing the data to analyze.
- filter_column (str): The name of the categorical column to group the data by.
- text_column (str): The name of the text column to extract keywords from.
- ngram_range (tuple, default=(1,1)): The range of n-grams to extract from the text data.
- n (int, default=10): The number of top keywords to extract for each group.
- min_df (int, default=100): The minimum frequency threshold for words to be included in the vocabulary.
- max_df (float, default=0.2): The maximum document frequency threshold for words to be included in the vocabulary.
Returns:
- trending_keywords (dict): A dictionary with the unique values in `filter_column` as keys and a list of top `n`
trending keywords with their TF-IDF score differences normalized by group size as values. The keyword list for each
value is sorted by descending order of the TF-IDF score difference.
"""
# convert values in filter column to categorical values
dataframe[filter_column] = dataframe[filter_column].astype('category')
# add "unknown" category to filter_column categories, if not already present
if "unknown" not in dataframe[filter_column].cat.categories:
dataframe[filter_column] = dataframe[filter_column].cat.add_categories("unknown")
# replace NaN values in filter_column with "unknown"
dataframe[filter_column].fillna("unknown", inplace=True)
# create an empty dictionary to store the top keywords for each value in filter_column
trending_keywords = {}
# get all values in filter_column
values = dataframe[filter_column].unique()
# convert the tokenized text column to a list of space-separated strings
text_data = [' '.join(words) for words in dataframe[text_column]]
# create a TfidfVectorizer object with the specified n-gram range and min_df parameter
tfidf_vect = TfidfVectorizer(ngram_range=ngram_range, min_df=min_df, max_df=max_df)
# fit and transform the tokenized text column for the whole corpus
tfidf = tfidf_vect.fit_transform(text_data)
# loop over the values
for value in values:
# filter the dataframe for the given value in filter_column
filter_data = dataframe[dataframe[filter_column] == value]
# convert the tokenized text column to a list of space-separated strings
text_data_filter = [' '.join(words) for words in filter_data[text_column]]
# transform the tokenized text column for the given value using the fitted TfidfVectorizer
tfidf_filter = tfidf_vect.transform(text_data_filter)
# compute the sum of TF-IDF scores for each term in the given value
tfidf_filter = tfidf_filter.sum(axis=0)
# normalize the TF-IDF scores by the total count of all words in the group
group_word_count = Counter(' '.join(text_data_filter).split())
total_count = sum(group_word_count.values())
tfidf_filter = tfidf_filter / total_count
# Compute the sum of TF-IDF scores for each term in the other values
tfidf_other_sum = 0
for other_value in values:
if other_value != value:
# Filter the dataframe for the other value in filter_column
other_data = dataframe[dataframe[filter_column] == other_value]
# Convert the tokenized text column to a list of space-separated strings
text_data_other = [' '.join(words) for words in other_data[text_column]]
# Transform the tokenized text column for the other value using the fitted TfidfVectorizer
tfidf_other = tfidf_vect.transform(text_data_other)
# Compute the sum of TF-IDF scores for each term in the other value
tfidf_other = tfidf_other.sum(axis=0)
# normalize the TF-IDF scores by the total count
total_count = tfidf_other.sum()
tfidf_other = tfidf_other / total_count
# Add the normalized TF-IDF scores to the running sum
tfidf_other_sum += tfidf_other
# Compute the average of the other values' TF-IDF scores for each term
tfidf_other_avg = tfidf_other_sum / (len(values) - 1)
# Compute the difference in TF-IDF scores between the given value and the average of the other values
tfidf_diff = tfidf_filter - tfidf_other_avg
# Create a list of tuples with the term and its TF-IDF score difference
keywords = [(term, tfidf_diff[0, index]) for term, index in tfidf_vect.vocabulary_.items()]
# Filter out terms that have negative or zero TF-IDF score differences
#keywords = [kw for kw in keywords if kw[1] > 0]
# Sort the keywords based on their TF-IDF score difference
keywords.sort(key=lambda x: x[1], reverse=True)
# Count the occurrence of each keyword in the group
group_text_data = ' '.join(text_data_filter)
group_word_count = Counter(group_text_data.split())
# Compute the total count of all words in the group
total_count = sum(group_word_count.values())
# Create a list of tuples with the term, its TF-IDF score difference, count in the group, and relative count
keywords_with_count_rel = [(kw[0], kw[1], group_word_count[kw[0]], group_word_count[kw[0]] / total_count) for kw in keywords]
# Store the top n keywords for the given value in the dictionary with relative count
trending_keywords[value] = keywords_with_count_rel[:n]
# Return the dictionary of top keywords for each value in filter_column
return trending_keywords
############### Transform TF-IDF results for output ###########
def trending_keywords_to_dataframe(trending_keywords):
"""
Converts a dictionary of trending keywords to a Pandas DataFrame.
Parameters:
trending_keywords (dict): A dictionary with group names as keys and lists of
(keyword, TF-IDF score, count, relative count) tuples
as values.
Returns:
Pandas DataFrame: A DataFrame with columns 'Group', 'Keyword', 'Scores',
'Total_Group_Count', and 'Relative_Group_Count'. Each row
corresponds to a single keyword for a single group, with
the values of the respective columns populated by the
corresponding elements of the input dictionary.
"""
rows = []
for group, keywords in trending_keywords.items():
for keyword, tfidf_score, count ,relativecount in keywords:
row = {'Group': group, 'Keyword': keyword, 'Scores': tfidf_score, 'Total_Group_Count': count,'Relative_Group_Count':relativecount}
rows.append(row)
return pd.DataFrame(rows)
#####################################################
def convert_resulttable_to_wide_format(df):
"""Converts a result table DataFrame from long to wide format.
Args:
df (pandas.DataFrame): A DataFrame with columns 'Group', 'Keyword', and 'Keyword Index'.
Returns:
pandas.DataFrame: A DataFrame in wide format with columns for each keyword in each group, and rows for each group.
The input DataFrame must have the following columns:
- 'Group': A categorical variable indicating the group to which the keyword belongs.
- 'Keyword': The keyword itself.
- 'Keyword Index': A unique identifier for each keyword within each group, in the format 'Keyword <index>'.
This function pivots the input DataFrame to a wide format, with columns for each keyword in each group and rows for each group.
The columns are sorted in the same order as the input DataFrame, and the columns are renamed to 'Keyword <index>'.
The resulting DataFrame is sorted by the order of the groups in the input DataFrame.
Example:
>>> df = pd.DataFrame({'Group': ['A', 'A', 'B', 'B'], 'Keyword': ['foo', 'bar', 'baz', 'qux'],
... 'Keyword Index': ['Keyword 1', 'Keyword 2', 'Keyword 1', 'Keyword 2']})
>>> convert_resulttable_to_wide_format(df)
Group Keyword 1 Keyword 2
0 A foo bar
1 B baz qux
"""
# Add a column to indicate the keyword index
df['Keyword Index'] = 'Keyword ' + (df.groupby('Group').cumcount() + 1).astype(str)
# Get the order of the groups in the input DataFrame
group_order = df['Group'].unique()
# Pivot the DataFrame to a wide format without sorting the columns
wide_df = df.pivot(index='Group', columns='Keyword Index', values='Keyword')
# Sort the pivoted DataFrame using the order of the groups in the input DataFrame
wide_df = wide_df.loc[group_order]
# Get the order of the columns in the input DataFrame
column_order = df['Keyword Index'].unique()
column_order.sort()
wide_df = wide_df[column_order] # Use the same order of columns as the input DataFrame
# Rename the columns
wide_df.columns = [f'Keyword {i}' for i in range(1, wide_df.shape[1] + 1)]
# Reset the index
wide_df = wide_df.reset_index()
return wide_df
#####################################################
def create_export_table(df, filename=None):
fig = go.Figure(data=[go.Table(
header=dict(values=list(df.columns)),
cells=dict(values=[df[col] for col in df.columns]))
])
if filename is not None:
fig.write_html(filename)
fig.show()
#####################################################
def export_table_to_xlsx(df, filename=None):
if filename is not None:
df.to_excel(filename, index=False)
else:
# Display the table if filename is not provided
fig = go.Figure(data=[go.Table(
header=dict(values=list(df.columns)),
cells=dict(values=[df[col] for col in df.columns]))
])
fig.show()
#####################################
######## Text Preprocessing ########
def preprocess_text(df, text_column, custom_stopwords=None):
"""
Preprocesses text in a DataFrame column by performing the following steps:
- Lowercases the text
- Removes stop words and custom stopwords (if provided)
- Removes numbers and special characters
- Tokenizes the text using the German language model provided by spaCy
- Lemmatizes the text using the spaCy language model
- Separates the text into nouns, adjectives, verbs, and a combination of nouns, adjectives, and verbs
Args:
- df (pandas.DataFrame): DataFrame containing the text to be preprocessed
- text_column (str): Name of the column containing the text to be preprocessed
- custom_stopwords (list): List of custom stopwords to be removed from the text. Default is None.
Returns:
- df (pandas.DataFrame): DataFrame with the preprocessed text and additional columns for tokenized text,
lemmatized text, nouns, adjectives, verbs, and a combination of nouns, adjectives,
and verbs.
"""
nlp = spacy.load("de_core_news_lg")
words_to_remove = set(STOP_WORDS) | set(custom_stopwords) if custom_stopwords else set(STOP_WORDS)
# Lowercase the text, remove stop words and custom stopwords, and remove numbers and special characters
text_preprocessed = df[text_column].str.lower().apply(
lambda x: " ".join([re.sub(r'[^\w\s]', '', word) for word in re.sub(r'([a-zA-Z]+)-([a-zA-Z]+)', r'\1 \2', x).split() if word not in words_to_remove and not re.search(r'\d', word)])
)
tokenized = []
nouns = []
adjectives = []
verbs = []
nouns_adjectives_and_verbs = []
for text in text_preprocessed:
doc = nlp(text)
if not doc:
tokenized.append([])
nouns.append([])
adjectives.append([])
verbs.append([])
nouns_adjectives_and_verbs.append([])
continue
tokenized_text = []
nouns_text = []
adjectives_text = []
verbs_text = []
nouns_adjectives_and_verbs_text = []
for token in doc:
if not token.text:
continue
token_text = token.text.lower()
if token_text not in words_to_remove:
tokenized_text.append(token_text)
if token.pos_ == "NOUN":
nouns_text.append(token_text)
nouns_adjectives_and_verbs_text.append(token_text)
if token.pos_ == "ADJ":
adjectives_text.append(token_text)
nouns_adjectives_and_verbs_text.append(token_text)
if token.pos_ == "VERB":
verbs_text.append(token_text)
nouns_adjectives_and_verbs_text.append(token_text)
tokenized.append(tokenized_text)
nouns.append(nouns_text)
adjectives.append(adjectives_text)
verbs.append(verbs_text)
nouns_adjectives_and_verbs.append(nouns_adjectives_and_verbs_text)
df["text_preprocessed"] = text_preprocessed
df["text_preprocessed_tokenized"] = tokenized
df["lemmatized"] = None
df["nouns"] = nouns
df["adjectives"] = adjectives
df["verbs"] = verbs
df["nouns_adjectives_and_verbs"] = nouns_adjectives_and_verbs
return df
########### Helper Functions ##########
def split_dataframe(df, datetime_col, train_frac, test_frac, val_frac):
# sort the dataframe by the selected datetime column
df = df.sort_values(datetime_col)
# calculate the number of samples for each subset
n = len(df)
train_size = round(n * train_frac)
test_size = round(n * test_frac)
val_size = round(n * val_frac)
# split the dataframe into train and test+val subsets
train_df, test_val_df = train_test_split(df, test_size=(test_size + val_size), random_state=22, stratify=df[datetime_col])
# split the test+val dataframe into test and val subsets
test_df, val_df = train_test_split(test_val_df, test_size=val_size, random_state=22, stratify=test_val_df[datetime_col])
return train_df, test_df, val_df
#############################################
def join_list_of_list(list_of_list):
"""
This function takes in a list of lists and returns a list of strings where each string is made by joining the elements of the corresponding list.
Parameters:
- list_of_list(List[List[Any]]): List of lists whose elements to be joined
Returns:
List[str]: List of strings where each string is made by joining the elements of the corresponding list.
"""
return [' '.join(map(str,l)) for l in list_of_list]
#########################################################
def reduce_dataframe(df, group_column, filter_value):
"""
Reduces a Pandas dataframe based on a specific column and value.
Parameters:
df (Pandas dataframe): The dataframe to reduce.
group_column (str): The name of the column to group the dataframe by.
filter_value: The value to filter the dataframe on.
Returns:
A reduced Pandas dataframe.
"""
# Group the dataframe by the specified column
grouped = df.groupby(group_column)
# Filter the groups based on the filter value
filtered_groups = {group: data for group, data in grouped if filter_value in data[group_column].values}
# Combine the filtered groups into a new dataframe
reduced_df = pd.concat(filtered_groups.values())
return reduced_df
#######################################################
def check_column_values(df, col1, col2):
# Check if either of the two columns contains a non-null value
result = (df[col1].notnull() | df[col2].notnull()).tolist()
return result
##############################################
############### Pandas Profiling ############
##############################################
def generate_profiling_report(data_file="DataText", folder_path="data/", report_title=None, report_file="html/ProfilingDataText.html", lazy=False, dark_mode=False, minimal=True):
"""
Generates a pandas profiling report for the given data file.
Parameters:
- data_file (str): The name of the data file to be used for generating the report. Default is "DataText".
- folder_path (str): The path of the folder where the data file is located. Default is "data/".
- report_title (str): The title to be used for the report. Default is None.
- report_file (str): The filepath and name of the report file. Default is "html/ProfilingDataText.html"
- lazy (bool): Whether to load the data in a lazy or non-lazy way. Default is False
- dark_mode (bool): Whether to use the dark mode or not. Default is False
- minimal (bool): Whether to produce a minimal report or not. Default is True
Returns:
None
"""
# import data
df = pd.read_feather(folder_path + data_file)
if report_title is None:
report_title = data_file
# Pandas Profiling TextData
profile = ProfileReport(
df,
title=report_title,
lazy=lazy,
dark_mode=dark_mode,
minimal=minimal,
)
profile.to_file(report_file)
############################################################
############## Train Topic Model with BERTopic #############
############################################################
def fit_berttopic(target_dir: str, docs: list, embedding_model=None, min_topic_size: int = 50, stop_words=None) -> None:
"""
Train and save a BERTopic model.
Args:
target_dir (str): Directory to save the trained model.
docs (list): List of documents to train the model on.
embedding_model (str or object): Name of the embedding model or an object representing the model.
min_topic_size (int): Minimum size of a topic (HDBSCAN clusters).
stop_words (list): List of stopwords to use for keyword extraction.
"""
if not isinstance(docs, list):
raise ValueError("docs parameter must be a list")
if not os.path.exists(target_dir):
try:
logging.info(f"Fitting BERTopic model with {embedding_model}...")
german_stop_words = stop_words or stopwords.words("german")
vectorizer = CountVectorizer(stop_words=german_stop_words)
model = BERTopic(
language="german",
vectorizer_model=vectorizer,
embedding_model=embedding_model,
min_topic_size=min_topic_size,
#nr_topics="auto"
#umap_model=PCA(n_components=5) #to use PCA as dim reduction
)
topics, probs = model.fit_transform(docs)
#model.fit_transform(docs)
new_topics = model.reduce_outliers(docs, topics) # Reduce outliers
model.update_topics(docs, topics=new_topics,vectorizer_model=vectorizer) # update Model
model.save(target_dir)
logging.info(f"Model saved to {target_dir}")
except Exception as e:
logging.error(f"Error while fitting BERTopic model: {str(e)}")
raise
else:
logging.info(f"Model already trained at {target_dir}")
def fit_berttopic_if_not_exists(target_dir: str, docs: list, embedding_model=None, min_topic_size: int = 50, stop_words=None) -> None:
"""
Wrapper function for fit_berttopic to avoid retraining a model that has already been trained.
Args:
target_dir (str): Directory to save the trained model.
docs (list): List of documents to train the model on.
embedding_model (str or object): Name of the embedding model or an object representing the model.
min_topic_size (int): Minimum size of a topic (HDBSCAN clusters).
stop_words (list): List of stopwords to use for keyword extraction.
"""
if not isinstance(docs, list):
raise ValueError("docs parameter must be a list")
if os.path.exists(target_dir):
logging.info(f"Model already trained at {target_dir}")
return
fit_berttopic(target_dir, docs, embedding_model, min_topic_size, stop_words)
################## Extract top 10 topic keywords from topic model ##############
def topic_model_top_10_keywords_export(model, modelname, directory_path):
df_topic_keywords = get_topic_keywords_df(model)
df_topic_freq = model.get_topic_freq()
df_topics = pd.merge(df_topic_keywords, df_topic_freq, left_on='topic_id', right_on='Topic', how='left')
total_count = df_topics['Count'].sum()
df_topics['Count %'] = ((df_topics['Count'] / total_count) * 100).round(1)
# reorder columns to place the new column as the second column
df_topics = df_topics.reindex(columns=['Topic', 'Count', 'Count %', 'keyword 1', 'keyword 2', 'keyword 3', 'keyword 4', 'keyword 5', 'keyword 6', 'keyword 7', 'keyword 8', 'keyword 9', 'keyword 10'])
file_path = directory_path + "/topic_keywords_"+ modelname+".xlsx"
df_topics.to_excel(file_path, index=False)
return df_topics
# def topic_model_top_10_keywords_export(model, modelname, directory_path):
# df_topic_keywords = get_topic_keywords_df(model)
# df_topic_freq = model.get_topic_freq()
# df_topics = pd.merge(df_topic_keywords, df_topic_freq, left_on='topic_id', right_on='Topic', how='left')
# # reorder columns to place the new column as the second column
# df_topics = df_topics.reindex(columns=['Topic', 'Count', 'keyword 1', 'keyword 2', 'keyword 3', 'keyword 4', 'keyword 5', 'keyword 6', 'keyword 7', 'keyword 8', 'keyword 9', 'keyword 10'])
# file_path = directory_path + "/topic_keywords_"+ modelname+".xlsx"
# df_topics.to_excel(file_path, index=False)
# return df_topics
################
def get_topic_keywords_df(topic_model):
"""
Returns a DataFrame with the topics and their keywords.
Parameters:
-----------
topic_model: object
A trained topic model object.
Returns:
--------
pandas.DataFrame
A DataFrame with the topics and their keywords.
"""
# get the topics and their keywords
topics = topic_model.get_topics()
# create an empty DataFrame to store the topics and their keywords
df = pd.DataFrame(columns=['topic_id', 'keyword 1', 'keyword 2', 'keyword 3', 'keyword 4', 'keyword 5', 'keyword 6', 'keyword 7', 'keyword 8', 'keyword 9', 'keyword 10'])
# loop through each topic and its keywords and add them to the DataFrame
for i, (topic_id, topic) in enumerate(topics.items()):
keywords = [word for word, _ in topic]
df.loc[i] = [topic_id] + keywords + ['']*(10-len(keywords))
# set the topic_id column as the index
# df.set_index('topic_id', inplace=True)
# df.reset_index()
return df
###################################################
############## Topic Model Evaluation #############
def compute_coherence_scores(documents: np.ndarray, bert_models: List[str], coherence_method: str = "u_mass", path: str = "") -> pd.Series:
cleaned_documents = [doc.replace("\n", " ") for doc in documents]
cleaned_documents = [doc.replace("\t", " ") for doc in cleaned_documents]
cleaned_documents = [doc if doc != "" else "emptydoc" for doc in cleaned_documents]
def compute_coherence_score(topics: List[str], topic_words: List[List[str]], coherence_method: str = "u_mass") -> float:
# Processing taken from BERT model but should be agnostic
vectorizer = CountVectorizer()
# Preprocess Documents
documents = pd.DataFrame({"Document": cleaned_documents, "ID": range(len(cleaned_documents)), "Topic": topics})
documents_per_topic = documents.groupby(['Topic'], as_index=False).agg({'Document': ' '.join})
cleaned_docs = documents_per_topic.Document.values
# Vectorizer
vectorizer.fit_transform(cleaned_docs)
analyzer = vectorizer.build_analyzer()
# Extract features for Topic Coherence evaluation
words = vectorizer.get_feature_names_out()
tokens = [analyzer(doc) for doc in cleaned_docs]
dictionary = corpora.Dictionary(tokens)
corpus = [dictionary.doc2bow(token) for token in tokens]
# Evaluate
coherence_model = CoherenceModel(topics=topic_words, texts=tokens, corpus=corpus, dictionary=dictionary, coherence=coherence_method)
coherence = coherence_model.get_coherence()
return coherence
scores = dict()
for model_name in bert_models:
try:
model = BERTopic.load(path + model_name)
topics = model.get_document_info(cleaned_documents)["Topic"]
topic_words = [[words for words, _ in model.get_topic(topic)] for topic in range(len(set(topics)) - 1)]
coherence = compute_coherence_score(topics=topics, topic_words=topic_words, coherence_method=coherence_method)
print(f"BERT Model {model_name}: {coherence}")
scores[model_name] = coherence
except Exception as e:
print(f"Failed to evaluate model {model_name}: {str(e)}")
scores_series = pd.Series(scores)
return scores_series
def get_topic_ratios(df, timeframe_col, name_col, topic_col):
"""
Compute the ratio of counts for each combination of CustomName and Topic,
aggregated by quarter, relative to the total count for the quarter.
Parameters:
-----------
df : pandas.DataFrame
The input data containing the columns 'yearquarter', 'CustomName', 'Topic'.
timeframe_col : str
The name of the column containing the timeframe information like quarteryear.
name_col : str
The name of the column containing the CustomName information.
topic_col : str
The name of the column containing the Topic information.
Returns:
--------
pandas.DataFrame
A new DataFrame with the columns 'yearquarter', 'CustomName', 'Topic', 'count_x', 'count_y', 'Topic_Ratio'.
"""
timeframe_col = timeframe_col
# Get totals for each quarter
df_counts_quarter = pd.DataFrame(df.groupby([timeframe_col]).size().reset_index(name='count_y'))
# Aggregate counts by quarter, CustomName, and Topic
df_topic_quarter = pd.DataFrame(df.groupby([timeframe_col, name_col, topic_col]).size().reset_index(name='count_x'))
# Merge DataFrames
df_topic_quarter = df_topic_quarter.merge(df_counts_quarter, on=timeframe_col, how='left')
# Compute Topic_Ratio
df_topic_quarter['Topic_Ratio'] = (df_topic_quarter['count_x'] / df_topic_quarter['count_y'])
return df_topic_quarter[[timeframe_col, 'CustomName', 'Topic', 'count_x', 'count_y', 'Topic_Ratio']]
import pandas as pd
def compute_categorical_counts(df, categorical_col, name_col):
"""
Compute the counts and relative counts for each combination of CustomName and Topic,
aggregated by the categorical column.
Parameters:
-----------
df : pandas.DataFrame
The input data containing the columns for the categorical_col, name_col, and topic_col.
categorical_col : str
The name of the column containing the categorical information (e.g. year, quarter, month, etc.).
name_col : str
The name of the column containing the CustomName information.
topic_col : str
The name of the column containing the Topic information.
Returns:
--------
pandas.DataFrame
A new DataFrame with the columns 'categorical_col', 'CustomName', 'Topic', 'count', and 'relative_count'.
"""
# Aggregate counts by categorical value, CustomName, and Topic
df_topic_categorical = pd.DataFrame(df.groupby([categorical_col, name_col]).size().reset_index(name='count'))
# Compute total count for each categorical value
df_total_count = pd.DataFrame(df.groupby([categorical_col]).size().reset_index(name='total_count'))
# Merge total count into topic counts DataFrame
df_topic_categorical = df_topic_categorical.merge(df_total_count, on=categorical_col)
# Compute relative count
df_topic_categorical['relative_count'] = df_topic_categorical['count'] / df_topic_categorical['total_count']
return df_topic_categorical[[categorical_col, name_col, 'count', 'relative_count']]
# def create_grouped_barchart(df, x_col, y_col, color_col, color_discrete_sequence, ignore_group=None, title='', xaxis_title='', yaxis_title='', legend_title='', template=''):
# if ignore_group:
# df = df[df[color_col] != ignore_group]
# fig = px.bar(df,
# x=x_col,
# y=y_col,
# color=color_col,
# hover_data=[color_col, y_col,x_col],
# color_discrete_sequence=color_discrete_sequence,
# template=template,
# barmode='group')
# fig.update_layout(
# width=900,
# height=600,
# title=title,
# yaxis_title=yaxis_title,
# xaxis_title=xaxis_title,
# legend_title=legend_title,
# )
# fig.update_xaxes(showgrid=False, tickmode='linear', tickangle=0, tickfont=dict(size=12), tickwidth=1)
# fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='rgba(0,0,0,0.1)')
# # wrap long x-axis labels on two lines and rotate by 270 degrees
# fig.update_layout(
# xaxis=dict(
# tickmode='array',
# tickvals=list(range(len(df[x_col]))),
# ticktext=[x.replace(' ', '<br>') if len(x) > 40 else x for x in df[x_col]],
# automargin=True,
# tickangle=270,
# tickfont=dict(size=12),
# ),
# legend=dict(orientation='h', yanchor='top', y=1.1, xanchor='left', x=0.5)
# )
# fig.show()
def create_grouped_radar(df, x_col, y_col, color_col, color_discrete_sequence, ignore_group=None, title='', xaxis_title='', yaxis_title='', legend_title='', template=''):
fig = go.Figure()
for color in df[color_col].unique():
if color != ignore_group:
fig.add_trace(go.Scatterpolar(
r=df[df[color_col] == color][y_col].values.tolist(),
theta=df[df[color_col] == color][x_col].values.tolist(),
fill='none',
name=color,
line=dict(color=color_discrete_sequence[df[color_col].unique().tolist().index(color)]),
showlegend=True,
marker=dict(size=4)
))
fig.update_layout(
width=900,
height=650,
polar=dict(
radialaxis=dict(
visible=True,
range=[0, df[y_col].max()],
showgrid=True,
gridwidth=1,
gridcolor='rgba(0,0,0,0.1)'
),
angularaxis=dict(
visible=True,
tickmode='linear',
tickfont=dict(size=10),
showticklabels=True,
gridcolor='rgba(0,0,0,0.1)'
)
),
showlegend=True,
title=title,
legend_title=legend_title,
xaxis_title=xaxis_title,
yaxis_title=yaxis_title,
template=template
)
# fig.update_layout(legend=dict(orientation='h', yanchor='top', y=1.1, xanchor='center', x=0.5))
fig.show()
| [
"plotly_white"
] |
2024-01-10 | TestSavant/SalesGPT | salesgpt~salesgptapi.py | import json
from langchain.chat_models import ChatOpenAI
from salesgpt.agents import SalesGPT
GPT_MODEL = "gpt-3.5-turbo-0613"
# GPT_MODEL_16K = "gpt-3.5-turbo-16k-0613"
class SalesGPTAPI:
USE_TOOLS = False
def __init__(
self, config_path: str, verbose: bool = False, max_num_turns: int = 10
):
self.config_path = config_path
self.verbose = verbose
self.max_num_turns = max_num_turns
self.llm = ChatOpenAI(temperature=0.2, model_name=GPT_MODEL)
def do(self, conversation_history: [str], human_input=None):
if self.config_path == "":
print("No agent config specified, using a standard config")
# USE_TOOLS = True
if self.USE_TOOLS:
sales_agent = SalesGPT.from_llm(
self.llm,
use_tools=True,
product_catalog="examples/sample_product_catalog.txt",
salesperson_name="Ted Lasso",
verbose=self.verbose,
)
else:
sales_agent = SalesGPT.from_llm(self.llm, verbose=self.verbose)
else:
with open(self.config_path, "r") as f:
config = json.load(f)
if self.verbose:
print(f"Agent config {config}")
sales_agent = SalesGPT.from_llm(self.llm, verbose=self.verbose, **config)
# check turns
current_turns = len(conversation_history) + 1
if current_turns >= self.max_num_turns:
# todo:
# if self.verbose:
print("Maximum number of turns reached - ending the conversation.")
return "<END_OF_>"
# seed
sales_agent.seed_agent()
sales_agent.conversation_history = conversation_history
if human_input is not None:
sales_agent.human_step(human_input)
# sales_agent.determine_conversation_stage()
# print('=' * 10)
# print(f"conversation_stage_id:{sales_agent.conversation_stage_id}")
sales_agent.step()
# end conversation
if "<END_OF_CALL>" in sales_agent.conversation_history[-1]:
print("Sales Agent determined it is time to end the conversation.")
return "<END_OF_CALL>"
reply = sales_agent.conversation_history[-1]
if self.verbose:
print("=" * 10)
print(f"{sales_agent.salesperson_name}:{reply}")
return reply.split(": ")
| [] |
2024-01-10 | TestSavant/SalesGPT | salesgpt~agents.py | from copy import deepcopy
from typing import Any, Callable, Dict, List, Union
from langchain import LLMChain
from langchain.agents import AgentExecutor, LLMSingleActionAgent
from langchain.chains import RetrievalQA
from langchain.chains.base import Chain
from langchain.llms import BaseLLM
from langchain.llms.base import create_base_retry_decorator
from pydantic import BaseModel, Field
from salesgpt.chains import SalesConversationChain, StageAnalyzerChain
from salesgpt.logger import time_logger
from salesgpt.parsers import SalesConvoOutputParser
from salesgpt.prompts import SALES_AGENT_TOOLS_PROMPT
from salesgpt.stages import CONVERSATION_STAGES
from salesgpt.templates import CustomPromptTemplateForTools
from salesgpt.tools import get_tools, setup_knowledge_base
def _create_retry_decorator(llm: Any) -> Callable[[Any], Any]:
import openai
errors = [
openai.error.Timeout,
openai.error.APIError,
openai.error.APIConnectionError,
openai.error.RateLimitError,
openai.error.ServiceUnavailableError,
]
return create_base_retry_decorator(error_types=errors, max_retries=llm.max_retries)
class SalesGPT(Chain, BaseModel):
"""Controller model for the Sales Agent."""
conversation_history: List[str] = []
conversation_stage_id: str = "1"
current_conversation_stage: str = CONVERSATION_STAGES.get("1")
stage_analyzer_chain: StageAnalyzerChain = Field(...)
sales_agent_executor: Union[AgentExecutor, None] = Field(...)
knowledge_base: Union[RetrievalQA, None] = Field(...)
sales_conversation_utterance_chain: SalesConversationChain = Field(...)
conversation_stage_dict: Dict = CONVERSATION_STAGES
use_tools: bool = False
salesperson_name: str = "Ted Lasso"
salesperson_role: str = "Business Development Representative"
company_name: str = "Sleep Haven"
company_business: str = "Sleep Haven is a premium mattress company that provides customers with the most comfortable and supportive sleeping experience possible. We offer a range of high-quality mattresses, pillows, and bedding accessories that are designed to meet the unique needs of our customers."
company_values: str = "Our mission at Sleep Haven is to help people achieve a better night's sleep by providing them with the best possible sleep solutions. We believe that quality sleep is essential to overall health and well-being, and we are committed to helping our customers achieve optimal sleep by offering exceptional products and customer service."
conversation_purpose: str = "find out whether they are looking to achieve better sleep via buying a premier mattress."
conversation_type: str = "call"
def retrieve_conversation_stage(self, key):
return self.conversation_stage_dict.get(key, "1")
@property
def input_keys(self) -> List[str]:
return []
@property
def output_keys(self) -> List[str]:
return []
@time_logger
def seed_agent(self):
# Step 1: seed the conversation
self.current_conversation_stage = self.retrieve_conversation_stage("1")
self.conversation_history = []
@time_logger
def determine_conversation_stage(self):
self.conversation_stage_id = self.stage_analyzer_chain.run(
conversation_history="\n".join(self.conversation_history).rstrip("\n"),
conversation_stage_id=self.conversation_stage_id,
conversation_stages="\n".join(
[
str(key) + ": " + str(value)
for key, value in CONVERSATION_STAGES.items()
]
),
)
print(f"Conversation Stage ID: {self.conversation_stage_id}")
self.current_conversation_stage = self.retrieve_conversation_stage(
self.conversation_stage_id
)
print(f"Conversation Stage: {self.current_conversation_stage}")
def human_step(self, human_input):
# process human input
human_input = "User: " + human_input + " <END_OF_TURN>"
self.conversation_history.append(human_input)
@time_logger
def step(
self, return_streaming_generator: bool = False, model_name="gpt-3.5-turbo-0613"
):
"""
Args:
return_streaming_generator (bool): whether or not return
streaming generator object to manipulate streaming chunks in downstream applications.
"""
if not return_streaming_generator:
self._call(inputs={})
else:
return self._streaming_generator(model_name=model_name)
@time_logger
def astep(
self, return_streaming_generator: bool = False, model_name="gpt-3.5-turbo-0613"
):
"""
Args:
return_streaming_generator (bool): whether or not return
streaming generator object to manipulate streaming chunks in downstream applications.
"""
if not return_streaming_generator:
self._acall(inputs={})
else:
return self._astreaming_generator(model_name=model_name)
@time_logger
def acall(self, *args, **kwargs):
raise NotImplementedError("This method has not been implemented yet.")
@time_logger
def _prep_messages(self):
"""
Helper function to prepare messages to be passed to a streaming generator.
"""
prompt = self.sales_conversation_utterance_chain.prep_prompts(
[
dict(
conversation_stage=self.current_conversation_stage,
conversation_history="\n".join(self.conversation_history),
salesperson_name=self.salesperson_name,
salesperson_role=self.salesperson_role,
company_name=self.company_name,
company_business=self.company_business,
company_values=self.company_values,
conversation_purpose=self.conversation_purpose,
conversation_type=self.conversation_type,
)
]
)
inception_messages = prompt[0][0].to_messages()
message_dict = {"role": "system", "content": inception_messages[0].content}
if self.sales_conversation_utterance_chain.verbose:
print("\033[92m" + inception_messages[0].content + "\033[0m")
return [message_dict]
@time_logger
def _streaming_generator(self, model_name="gpt-3.5-turbo-0613"):
"""
Sometimes, the sales agent wants to take an action before the full LLM output is available.
For instance, if we want to do text to speech on the partial LLM output.
This function returns a streaming generator which can manipulate partial output from an LLM
in-flight of the generation.
Example:
>> streaming_generator = self._streaming_generator()
# Now I can loop through the output in chunks:
>> for chunk in streaming_generator:
Out: Chunk 1, Chunk 2, ... etc.
See: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb
"""
messages = self._prep_messages()
return self.sales_conversation_utterance_chain.llm.completion_with_retry(
messages=messages,
stop="<END_OF_TURN>",
stream=True,
model=model_name,
)
async def acompletion_with_retry(self, llm: Any, **kwargs: Any) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs)
return await _completion_with_retry(**kwargs)
async def _astreaming_generator(self, model_name="gpt-3.5-turbo-0613"):
"""
Asynchronous generator to reduce I/O blocking when dealing with multiple
clients simultaneously.
Sometimes, the sales agent wants to take an action before the full LLM output is available.
For instance, if we want to do text to speech on the partial LLM output.
This function returns a streaming generator which can manipulate partial output from an LLM
in-flight of the generation.
Example:
>> streaming_generator = self._astreaming_generator()
# Now I can loop through the output in chunks:
>> async for chunk in streaming_generator:
await chunk ...
Out: Chunk 1, Chunk 2, ... etc.
See: https://github.com/openai/openai-cookbook/blob/main/examples/How_to_stream_completions.ipynb
"""
messages = self._prep_messages()
return await self.acompletion_with_retry(
llm=self.sales_conversation_utterance_chain.llm,
messages=messages,
stop="<END_OF_TURN>",
stream=True,
model=model_name,
)
def _call(self, inputs: Dict[str, Any]) -> None:
"""Run one step of the sales agent."""
# Generate agent's utterance
# if use tools
if self.use_tools:
ai_message = self.sales_agent_executor.run(
input="",
conversation_stage=self.current_conversation_stage,
conversation_history="\n".join(self.conversation_history),
salesperson_name=self.salesperson_name,
salesperson_role=self.salesperson_role,
company_name=self.company_name,
company_business=self.company_business,
company_values=self.company_values,
conversation_purpose=self.conversation_purpose,
conversation_type=self.conversation_type,
)
else:
# else
ai_message = self.sales_conversation_utterance_chain.run(
conversation_stage=self.current_conversation_stage,
conversation_history="\n".join(self.conversation_history),
salesperson_name=self.salesperson_name,
salesperson_role=self.salesperson_role,
company_name=self.company_name,
company_business=self.company_business,
company_values=self.company_values,
conversation_purpose=self.conversation_purpose,
conversation_type=self.conversation_type,
)
# Add agent's response to conversation history
agent_name = self.salesperson_name
ai_message = agent_name + ": " + ai_message
if "<END_OF_TURN>" not in ai_message:
ai_message += " <END_OF_TURN>"
self.conversation_history.append(ai_message)
print(ai_message.replace("<END_OF_TURN>", ""))
return {}
@classmethod
@time_logger
def from_llm(cls, llm: BaseLLM, verbose: bool = False, **kwargs) -> "SalesGPT":
"""Initialize the SalesGPT Controller."""
stage_analyzer_chain = StageAnalyzerChain.from_llm(llm, verbose=verbose)
if (
"use_custom_prompt" in kwargs.keys()
and kwargs["use_custom_prompt"] == "True"
):
use_custom_prompt = deepcopy(kwargs["use_custom_prompt"])
custom_prompt = deepcopy(kwargs["custom_prompt"])
# clean up
del kwargs["use_custom_prompt"]
del kwargs["custom_prompt"]
sales_conversation_utterance_chain = SalesConversationChain.from_llm(
llm,
verbose=verbose,
use_custom_prompt=use_custom_prompt,
custom_prompt=custom_prompt,
)
else:
sales_conversation_utterance_chain = SalesConversationChain.from_llm(
llm, verbose=verbose
)
if "use_tools" in kwargs.keys() and kwargs["use_tools"] == "True":
# set up agent with tools
product_catalog = kwargs["product_catalog"]
knowledge_base = setup_knowledge_base(product_catalog)
tools = get_tools(knowledge_base)
prompt = CustomPromptTemplateForTools(
template=SALES_AGENT_TOOLS_PROMPT,
tools_getter=lambda x: tools,
# This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically
# This includes the `intermediate_steps` variable because that is needed
input_variables=[
"input",
"intermediate_steps",
"salesperson_name",
"salesperson_role",
"company_name",
"company_business",
"company_values",
"conversation_purpose",
"conversation_type",
"conversation_history",
],
)
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
tool_names = [tool.name for tool in tools]
# WARNING: this output parser is NOT reliable yet
## It makes assumptions about output from LLM which can break and throw an error
output_parser = SalesConvoOutputParser(ai_prefix=kwargs["salesperson_name"])
sales_agent_with_tools = LLMSingleActionAgent(
llm_chain=llm_chain,
output_parser=output_parser,
stop=["\nObservation:"],
allowed_tools=tool_names,
)
sales_agent_executor = AgentExecutor.from_agent_and_tools(
agent=sales_agent_with_tools, tools=tools, verbose=verbose
)
else:
sales_agent_executor = None
knowledge_base = None
return cls(
stage_analyzer_chain=stage_analyzer_chain,
sales_conversation_utterance_chain=sales_conversation_utterance_chain,
sales_agent_executor=sales_agent_executor,
knowledge_base=knowledge_base,
verbose=verbose,
**kwargs,
)
| [
"\n",
"company_name",
"use_custom_prompt",
"company_values",
"conversation_history",
"company_business",
"conversation_purpose",
"input",
"conversation_type",
"salesperson_name",
"salesperson_role",
"custom_prompt",
"intermediate_steps"
] |
2024-01-10 | victordibia/llmx | llmx~generators~text~textgen.py | from ...utils import load_config
from .openai_textgen import OpenAITextGenerator
from .palm_textgen import PalmTextGenerator
from .cohere_textgen import CohereTextGenerator
import logging
logger = logging.getLogger("llmx")
def sanitize_provider(provider: str):
if provider.lower() == "openai" or provider.lower() == "default" or provider.lower(
) == "azureopenai" or provider.lower() == "azureoai":
return "openai"
elif provider.lower() == "palm" or provider.lower() == "google":
return "palm"
elif provider.lower() == "cohere":
return "cohere"
elif provider.lower() == "hf" or provider.lower() == "huggingface":
return "hf"
else:
raise ValueError(
f"Invalid provider '{provider}'. Supported providers are 'openai', 'hf', 'palm', and 'cohere'."
)
def llm(provider: str = None, **kwargs):
# load config. This will load the default config from
# configs/config.default.yml if no path to a config file is specified. in
# the environment variable LLMX_CONFIG_PATH
config = load_config()
if provider is None:
# provider is not explicitly specified, use the default provider from the config file
provider = config["model"]["provider"] if "provider" in config["model"] else None
kwargs = config["model"]["parameters"] if "parameters" in config["model"] else {}
if provider is None:
logger.info("No provider specified. Defaulting to 'openai'.")
provider = "openai"
# sanitize provider
provider = sanitize_provider(provider)
# set the list of available models based on the config file
models = config["providers"][provider]["models"] if "providers" in config and provider in config["providers"] else {}
kwargs["provider"] = kwargs["provider"] if "provider" in kwargs else provider
kwargs["models"] = kwargs["models"] if "models" in kwargs else models
# print(kwargs)
if provider.lower() == "openai":
return OpenAITextGenerator(**kwargs)
elif provider.lower() == "palm":
return PalmTextGenerator(**kwargs)
elif provider.lower() == "cohere":
return CohereTextGenerator(**kwargs)
elif provider.lower() == "hf":
try:
import transformers
except ImportError:
raise ImportError(
"Please install the `transformers` package to use the HFTextGenerator class. pip install llmx[transformers]"
)
# Check if torch package is installed
try:
import torch
except ImportError:
raise ImportError(
"Please install the `torch` package to use the HFTextGenerator class. pip install llmx[transformers]"
)
from .hf_textgen import HFTextGenerator
return HFTextGenerator(**kwargs)
else:
raise ValueError(
f"Invalid provider '{provider}'. Supported providers are 'openai', 'hf', 'palm', and 'cohere'."
)
| [] |
2024-01-10 | victordibia/llmx | llmx~generators~text~cohere_textgen.py | from typing import Dict, Union
import os
import cohere
from dataclasses import asdict
from .base_textgen import TextGenerator
from ...datamodel import TextGenerationConfig, TextGenerationResponse, Message
from ...utils import cache_request, get_models_maxtoken_dict, num_tokens_from_messages
from ..text.providers import providers
class CohereTextGenerator(TextGenerator):
def __init__(
self,
api_key: str = None,
provider: str = "cohere",
model: str = None,
models: Dict = None,
):
super().__init__(provider=provider)
api_key = api_key or os.environ.get("COHERE_API_KEY", None)
if api_key is None:
raise ValueError(
"Cohere API key is not set. Please set the COHERE_API_KEY environment variable."
)
self.client = cohere.Client(api_key)
self.model_max_token_dict = get_models_maxtoken_dict(models)
self.model_name = model or "command"
def format_messages(self, messages):
prompt = ""
for message in messages:
if message["role"] == "system":
prompt += message["content"] + "\n"
else:
prompt += message["role"] + ": " + message["content"] + "\n"
return prompt
def generate(
self,
messages: Union[list[dict], str],
config: TextGenerationConfig = TextGenerationConfig(),
**kwargs,
) -> TextGenerationResponse:
use_cache = config.use_cache
messages = self.format_messages(messages)
self.model_name = config.model or self.model_name
max_tokens = (
self.model_max_token_dict[self.model_name]
if config.model in self.model_max_token_dict else 1024)
cohere_config = {
"model": self.model_name,
"prompt": messages,
"max_tokens": config.max_tokens or max_tokens,
"temperature": config.temperature,
"k": config.top_k,
"p": config.top_p,
"num_generations": config.n,
"stop_sequences": config.stop,
"frequency_penalty": config.frequency_penalty,
"presence_penalty": config.presence_penalty,
}
# print("calling cohere ***************", config)
cache_key_params = cohere_config | {"messages": messages}
if use_cache:
response = cache_request(cache=self.cache, params=cache_key_params)
if response:
return TextGenerationResponse(**response)
co_response = self.client.generate(**cohere_config)
response_text = [
Message(
role="system",
content=x.text,
)
for x in co_response.generations
]
response = TextGenerationResponse(
text=response_text,
logprobs=[], # You may need to extract log probabilities from the response if needed
config=cohere_config,
usage={},
response=co_response,
)
cache_request(
cache=self.cache, params=cache_key_params, values=asdict(response)
)
return response
def count_tokens(self, text) -> int:
return num_tokens_from_messages(text)
| [
"PLACEHOLDER\n",
"PLACEHOLDER: PLACEHOLDER\n"
] |
2024-01-10 | baolongbk29/llm-projects | Automated-Customer-Service~support.py | from langchain.schema import Document
from langchain.document_transformers import DoctranPropertyExtractor
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
class AICustomerSupport:
def __init__(self, openai_api_model):
self.openai_api_model = openai_api_model
self.properties = [
{
"name": "category",
"description": "The type of email this is.",
"type": "string",
"enum": [
"complaint",
"refund_request",
"product_feedback",
"customer_service",
"other",
],
"required": True,
},
{
"name": "mentioned_product",
"description": "The product mentioned in this email.",
"type": "string",
"required": True,
},
{
"name": "issue_description",
"description": "A brief explanation of the problem encountered with the product.",
"type": "string",
"required": True,
},
{
"name": "name",
"description": "Name of the person who wrote the email",
"type": "string",
"required": True,
},
]
def interpret_and_evaluate(self, extracted_properties):
template = f"""
You are an AI Customer Support that writes friendly emails back to customers. Adresse the user with his or her name {extracted_properties['name']} If no name was provided,
say 'Dear customer'.
The customer's email was categorized as {extracted_properties['category']}, and mentioned the product {extracted_properties['mentioned_product']}.
They described an issue: {extracted_properties['issue_description']}.
Please reply to this email in a friendly and helpful manner.
Write a response that includes an understanding of the problem, a proposed solution, and a polite sign-off.
Your sign-off name in the email is John Doe
"""
llm = ChatOpenAI(temperature=0)
prompt_template = PromptTemplate.from_template(template=template)
chain = LLMChain(llm=llm, prompt=prompt_template)
result = chain.predict(input="")
return result
def get_email_content(self, email_message):
maintype = email_message.get_content_maintype()
if maintype == "multipart":
for part in email_message.get_payload():
if part.get_content_maintype() == "text":
return part.get_payload()
elif maintype == "text":
return email_message.get_payload()
async def process_email(self, email_message):
email_content = self.get_email_content(email_message)
documents = [Document(page_content=email_content)]
property_extractor = DoctranPropertyExtractor(
properties=self.properties, openai_api_model=self.openai_api_model
)
extracted_document = await property_extractor.atransform_documents(
documents, properties=self.properties
)
extracted_properties = extracted_document[0].metadata["extracted_properties"]
evaluation_result = self.interpret_and_evaluate(extracted_properties)
return extracted_properties, evaluation_result
| [
"Dear customer",
"\n You are an AI Customer Support that writes friendly emails back to customers. Adresse the user with his or her name PLACEHOLDER If no name was provided, \n say 'Dear customer'. \n The customer's email was categorized as PLACEHOLDER, and mentioned the product PLACEHOLDER. \n They described an issue: PLACEHOLDER. \n Please reply to this email in a friendly and helpful manner.\n\n Write a response that includes an understanding of the problem, a proposed solution, and a polite sign-off.\n Your sign-off name in the email is John Doe\n "
] |
2024-01-10 | baolongbk29/llm-projects | LangChain-EventDriven-Architecture~email_classifier~service.py | import logging
from confluent_kafka import Consumer, Producer
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.output_parsers import ResponseSchema
from langchain.output_parsers import StructuredOutputParser
from dotenv import load_dotenv
load_dotenv()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
category_schema = ResponseSchema(
name = "category",
description="Is the email an cancellation, review, or inquiry? Only provide these words.",
)
response_schemas = [category_schema]
parser = StructuredOutputParser.from_response_schemas(response_schemas)
format_instructions = parser.get_format_instructions()
template = """
Interpret the text and evaluate it.
category: Is the email an cancellation, review, or inquiry? Only provide these words.
Return only the JSON, do not add ANYTHING, NO INTERPRETATION!
text: {input}
{format_instructions}
"""
prompt = ChatPromptTemplate.from_template(template=template)
chat = ChatOpenAI(temperature=0.0)
def delivery_report(err, msg):
if err is not None:
logger.error(f"Message delivery failed: {err}")
else:
logger.info(f"Message delivered to {msg.topic()}")
def classify_email(email):
messages = prompt.format_messages(
input=email,
format_instructions=format_instructions,
)
response = chat(messages)
output_dict = parser.parse(response.content)
return output_dict['category']
c = Consumer({
'bootstrap.servers': 'kafka:29092',
'group.id': 'email-classifier',
'auto.offset.reset': 'earliest'
})
c.subscribe(['raw-emails'])
p = Producer({'bootstrap.servers': 'kafka:29092'})
while True:
msg = c.poll(1.0)
if msg is None:
continue
if msg.error():
logger.error(f"Consumer error: {msg.error()}")
continue
email = msg.value().decode('utf-8')
category = classify_email(email)
logger.info(f"Categorized as {category}")
logger.info(f"Classified email: {category}")
topic_map = {
"cancellation": "cancellation-emails",
"review": "review-emails",
"inquiry": "inquiry-emails",
}
p.produce(topic_map.get(category, 'unknown-category'), email, callback=delivery_report)
p.flush() | [
"\nInterpret the text and evaluate it.\ncategory: Is the email an cancellation, review, or inquiry? Only provide these words.\n\nReturn only the JSON, do not add ANYTHING, NO INTERPRETATION!\n\ntext: {input}\n\n{format_instructions}\n"
] |
2024-01-10 | Bluefin-Tuna/pdroid | pdroid~lib.py | """File for complete ML pipeline functions for easy and clean calling."""
from langchain_core.output_parsers import XMLOutputParser
from model import llm
from prompt import ProjectDroidPrompt
from utils import xml_to_md
def generate_ticket(title):
"""Function for creating a ticket given a title"""
prompt = ProjectDroidPrompt().create_prompt()
parser = XMLOutputParser()
chain = prompt | llm | parser
output = chain.invoke({"title": title})
return xml_to_md(output["xml"])
| [] |
2024-01-10 | streamlit/llm-examples | app_test.py | import datetime
from unittest.mock import patch
from streamlit.testing.v1 import AppTest
from openai.types.chat import ChatCompletionMessage
from openai.types.chat.chat_completion import ChatCompletion, Choice
# See https://github.com/openai/openai-python/issues/715#issuecomment-1809203346
def create_chat_completion(response: str, role: str = "assistant") -> ChatCompletion:
return ChatCompletion(
id="foo",
model="gpt-3.5-turbo",
object="chat.completion",
choices=[
Choice(
finish_reason="stop",
index=0,
message=ChatCompletionMessage(
content=response,
role=role,
),
)
],
created=int(datetime.datetime.now().timestamp()),
)
@patch("openai.resources.chat.Completions.create")
def test_Chatbot(openai_create):
at = AppTest.from_file("Chatbot.py").run()
assert not at.exception
at.chat_input[0].set_value("Do you know any jokes?").run()
assert at.info[0].value == "Please add your OpenAI API key to continue."
JOKE = "Why did the chicken cross the road? To get to the other side."
openai_create.return_value = create_chat_completion(JOKE)
at.text_input(key="chatbot_api_key").set_value("sk-...")
at.chat_input[0].set_value("Do you know any jokes?").run()
print(at)
assert at.chat_message[1].markdown[0].value == "Do you know any jokes?"
assert at.chat_message[2].markdown[0].value == JOKE
assert at.chat_message[2].avatar == "assistant"
assert not at.exception
@patch("langchain.llms.OpenAI.__call__")
def test_Langchain_Quickstart(langchain_llm):
at = AppTest.from_file("pages/3_Langchain_Quickstart.py").run()
assert at.info[0].value == "Please add your OpenAI API key to continue."
RESPONSE = "1. The best way to learn how to code is by practicing..."
langchain_llm.return_value = RESPONSE
at.sidebar.text_input[0].set_value("sk-...")
at.button[0].set_value(True).run()
print(at)
assert at.info[0].value == RESPONSE
| [] |
2024-01-10 | streamlit/llm-examples | pages~1_File_Q%26A.py | import streamlit as st
import anthropic
with st.sidebar:
anthropic_api_key = st.text_input("Anthropic API Key", key="file_qa_api_key", type="password")
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/pages/1_File_Q%26A.py)"
"[](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
st.title("📝 File Q&A with Anthropic")
uploaded_file = st.file_uploader("Upload an article", type=("txt", "md"))
question = st.text_input(
"Ask something about the article",
placeholder="Can you give me a short summary?",
disabled=not uploaded_file,
)
if uploaded_file and question and not anthropic_api_key:
st.info("Please add your Anthropic API key to continue.")
if uploaded_file and question and anthropic_api_key:
article = uploaded_file.read().decode()
prompt = f"""{anthropic.HUMAN_PROMPT} Here's an article:\n\n<article>
{article}\n\n</article>\n\n{question}{anthropic.AI_PROMPT}"""
client = anthropic.Client(api_key=anthropic_api_key)
response = client.completions.create(
prompt=prompt,
stop_sequences=[anthropic.HUMAN_PROMPT],
model="claude-v1", #"claude-2" for Claude 2 model
max_tokens_to_sample=100,
)
st.write("### Answer")
st.write(response.completion)
| [
"f\"\"\"{anthropic.HUMAN_PROMPT} Here's an article:\\n\\n<article>\n {article}\\n\\n</article>\\n\\n{question}{anthropic.AI_PROMPT}"
] |
2024-01-10 | streamlit/llm-examples | pages~4_Langchain_PromptTemplate.py | import streamlit as st
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
st.title("🦜🔗 Langchain - Blog Outline Generator App")
openai_api_key = st.sidebar.text_input("OpenAI API Key", type="password")
def blog_outline(topic):
# Instantiate LLM model
llm = OpenAI(model_name="text-davinci-003", openai_api_key=openai_api_key)
# Prompt
template = "As an experienced data scientist and technical writer, generate an outline for a blog about {topic}."
prompt = PromptTemplate(input_variables=["topic"], template=template)
prompt_query = prompt.format(topic=topic)
# Run LLM model
response = llm(prompt_query)
# Print results
return st.info(response)
with st.form("myform"):
topic_text = st.text_input("Enter prompt:", "")
submitted = st.form_submit_button("Submit")
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
elif submitted:
blog_outline(topic_text)
| [
"As an experienced data scientist and technical writer, generate an outline for a blog about {topic}."
] |
2024-01-10 | streamlit/llm-examples | pages~5_Chat_with_user_feedback.py | from openai import OpenAI
import streamlit as st
from streamlit_feedback import streamlit_feedback
import trubrics
with st.sidebar:
openai_api_key = st.text_input("OpenAI API Key", key="feedback_api_key", type="password")
"[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/pages/5_Chat_with_user_feedback.py)"
"[](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
st.title("📝 Chat with feedback (Trubrics)")
"""
In this example, we're using [streamlit-feedback](https://github.com/trubrics/streamlit-feedback) and Trubrics to collect and store feedback
from the user about the LLM responses.
"""
if "messages" not in st.session_state:
st.session_state.messages = [
{"role": "assistant", "content": "How can I help you? Leave feedback to help me improve!"}
]
if "response" not in st.session_state:
st.session_state["response"] = None
messages = st.session_state.messages
for msg in messages:
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input(placeholder="Tell me a joke about sharks"):
messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
client = OpenAI(api_key=openai_api_key)
response = client.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
st.session_state["response"] = response.choices[0].message.content
with st.chat_message("assistant"):
messages.append({"role": "assistant", "content": st.session_state["response"]})
st.write(st.session_state["response"])
if st.session_state["response"]:
feedback = streamlit_feedback(
feedback_type="thumbs",
optional_text_label="[Optional] Please provide an explanation",
key=f"feedback_{len(messages)}",
)
# This app is logging feedback to Trubrics backend, but you can send it anywhere.
# The return value of streamlit_feedback() is just a dict.
# Configure your own account at https://trubrics.streamlit.app/
if feedback and "TRUBRICS_EMAIL" in st.secrets:
config = trubrics.init(
email=st.secrets.TRUBRICS_EMAIL,
password=st.secrets.TRUBRICS_PASSWORD,
)
collection = trubrics.collect(
component_name="default",
model="gpt",
response=feedback,
metadata={"chat": messages},
)
trubrics.save(config, collection)
st.toast("Feedback recorded!", icon="📝")
| [
"How can I help you? Leave feedback to help me improve!",
"response"
] |
2024-01-10 | streamlit/llm-examples | Chatbot.py | from openai import OpenAI
import streamlit as st
with st.sidebar:
openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password")
"[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/Chatbot.py)"
"[](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
st.title("💬 Chatbot")
st.caption("🚀 A streamlit chatbot powered by OpenAI LLM")
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}]
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input():
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
client = OpenAI(api_key=openai_api_key)
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
response = client.chat.completions.create(model="gpt-3.5-turbo", messages=st.session_state.messages)
msg = response.choices[0].message.content
st.session_state.messages.append({"role": "assistant", "content": msg})
st.chat_message("assistant").write(msg)
| [
"How can I help you?"
] |
2024-01-10 | streamlit/llm-examples | pages~2_Chat_with_search.py | import streamlit as st
from langchain.agents import initialize_agent, AgentType
from langchain.callbacks import StreamlitCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.tools import DuckDuckGoSearchRun
with st.sidebar:
openai_api_key = st.text_input("OpenAI API Key", key="langchain_search_api_key_openai", type="password")
"[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/pages/2_Chat_with_search.py)"
"[](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
st.title("🔎 LangChain - Chat with search")
"""
In this example, we're using `StreamlitCallbackHandler` to display the thoughts and actions of an agent in an interactive Streamlit app.
Try more LangChain 🤝 Streamlit Agent examples at [github.com/langchain-ai/streamlit-agent](https://github.com/langchain-ai/streamlit-agent).
"""
if "messages" not in st.session_state:
st.session_state["messages"] = [
{"role": "assistant", "content": "Hi, I'm a chatbot who can search the web. How can I help you?"}
]
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input(placeholder="Who won the Women's U.S. Open in 2018?"):
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
llm = ChatOpenAI(model_name="gpt-3.5-turbo", openai_api_key=openai_api_key, streaming=True)
search = DuckDuckGoSearchRun(name="Search")
search_agent = initialize_agent([search], llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True)
with st.chat_message("assistant"):
st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)
response = search_agent.run(st.session_state.messages, callbacks=[st_cb])
st.session_state.messages.append({"role": "assistant", "content": response})
st.write(response)
| [
"Hi, I'm a chatbot who can search the web. How can I help you?"
] |
2024-01-10 | streamlit/llm-examples | pages~3_Langchain_Quickstart.py | import streamlit as st
from langchain.llms import OpenAI
st.title("🦜🔗 Langchain Quickstart App")
with st.sidebar:
openai_api_key = st.text_input("OpenAI API Key", type="password")
"[Get an OpenAI API key](https://platform.openai.com/account/api-keys)"
def generate_response(input_text):
llm = OpenAI(temperature=0.7, openai_api_key=openai_api_key)
st.info(llm(input_text))
with st.form("my_form"):
text = st.text_area("Enter text:", "What are 3 key advice for learning how to code?")
submitted = st.form_submit_button("Submit")
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
elif submitted:
generate_response(text)
| [] |
2024-01-10 | String-sg/ai-starter-kit | basecode~kb_module.py | import streamlit as st
import sqlite3
import streamlit_antd_components as sac
import pandas as pd
import os
import openai
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.document_loaders import UnstructuredFileLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import LanceDB
from basecode.authenticate import return_api_key
from langchain.docstore.document import Document
import lancedb
import configparser
import ast
import json
class ConfigHandler:
def __init__(self):
self.config = configparser.ConfigParser()
self.config.read('config.ini')
def get_config_values(self, section, key):
value = self.config.get(section, key)
try:
# Try converting the string value to a Python data structure
return ast.literal_eval(value)
except (SyntaxError, ValueError):
# If not a data structure, return the plain string
return value
config_handler = ConfigHandler()
TCH = config_handler.get_config_values('constants', 'TCH')
STU = config_handler.get_config_values('constants', 'STU')
SA = config_handler.get_config_values('constants', 'SA')
AD = config_handler.get_config_values('constants', 'AD')
# Create or check for the 'database' directory in the current working directory
cwd = os.getcwd()
WORKING_DIRECTORY = os.path.join(cwd, "database")
if not os.path.exists(WORKING_DIRECTORY):
os.makedirs(WORKING_DIRECTORY)
if st.secrets["sql_ext_path"] == "None":
WORKING_DATABASE= os.path.join(WORKING_DIRECTORY , st.secrets["default_db"])
else:
WORKING_DATABASE= st.secrets["sql_ext_path"]
os.environ["OPENAI_API_KEY"] = return_api_key()
lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb")
db = lancedb.connect(lancedb_path)
def fetch_vectorstores_with_usernames():
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
query = '''
SELECT
Vector_Stores.vs_id,
Subject.subject_name,
Topic.topic_name,
Vector_Stores.vectorstore_name,
Users.username,
Vector_Stores.sharing_enabled
FROM Vector_Stores
JOIN Users ON Vector_Stores.user_id = Users.user_id
LEFT JOIN Subject ON Vector_Stores.subject = Subject.id
LEFT JOIN Topic ON Vector_Stores.topic = Topic.id;
'''
cursor.execute(query)
data = cursor.fetchall()
conn.close()
return data
def display_vectorstores():
data = fetch_vectorstores_with_usernames()
df = pd.DataFrame(data, columns=["vs_id", "subject_name", "topic_name", "vectorstore_name", "username", "sharing_enabled"])
# Convert the 'sharing_enabled' values
df["sharing_enabled"] = df["sharing_enabled"].apply(lambda x: '✔' if x == 1 else '')
st.dataframe(
df,
use_container_width=True,
column_order=["vs_id", "subject_name", "topic_name", "vectorstore_name", "username", "sharing_enabled"]
)
def fetch_all_files():
"""
Fetch all files either shared or based on user type
"""
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Construct the SQL query with JOINs for Subject, Topic, and Users tables
if st.session_state.user['profile_id'] == 'SA':
cursor.execute('''
SELECT Files.file_id, Files.file_name, Subject.subject_name, Topic.topic_name, Users.username
FROM Files
JOIN Subject ON Files.subject = Subject.id
JOIN Topic ON Files.topic = Topic.id
JOIN Users ON Files.user_id = Users.user_id
''')
else:
cursor.execute('''
SELECT Files.file_id, Files.file_name, Subject.subject_name, Topic.topic_name, Users.username
FROM Files
JOIN Subject ON Files.subject = Subject.id
JOIN Topic ON Files.topic = Topic.id
JOIN Users ON Files.user_id = Users.user_id
WHERE Files.sharing_enabled = 1
''')
files = cursor.fetchall()
formatted_files = [f"({file[0]}) {file[1]} ({file[4]})" for file in files]
conn.close()
return formatted_files
def fetch_file_data(file_id):
"""
Fetch file data given a file id
"""
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
cursor.execute("SELECT data, metadata FROM Files WHERE file_id = ?", (file_id,))
data = cursor.fetchone()
conn.close()
if data:
return data[0], data[1]
else:
return None, None
def insert_topic(org_id, topic_name):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
try:
cursor.execute('INSERT INTO Topic (org_id, topic_name) VALUES (?, ?);', (org_id, topic_name))
conn.commit()
return True # Indicates successful insertion
except sqlite3.IntegrityError:
# IntegrityError occurs if topic_name is not unique within the org
return False # Indicates topic_name is not unique within the org
finally:
conn.close()
def insert_subject(org_id, subject_name):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
try:
cursor.execute('INSERT INTO Subject (org_id, subject_name) VALUES (?, ?);', (org_id, subject_name))
conn.commit()
return True # Indicates successful insertion
except sqlite3.IntegrityError:
# IntegrityError occurs if subject_name is not unique within the org
return False # Indicates subject_name is not unique within the org
finally:
conn.close()
def select_organization():
with sqlite3.connect(WORKING_DATABASE) as conn:
cursor = conn.cursor()
# Org selection
org_query = "SELECT org_name FROM Organizations"
cursor.execute(org_query)
orgs = cursor.fetchall()
org_names = [org[0] for org in orgs]
# Use a Streamlit selectbox to choose an organization
selected_org_name = st.selectbox("Select an organization:", org_names)
# Retrieve the org_id for the selected organization
cursor.execute('SELECT org_id FROM Organizations WHERE org_name = ?;', (selected_org_name,))
result = cursor.fetchone()
if result:
org_id = result[0]
st.write(f"The org_id for {selected_org_name} is {org_id}.")
return org_id
else:
st.write(f"Organization '{selected_org_name}' not found in the database.")
return None
def fetch_subjects_by_org(org_id):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Check if the user is a super_admin (org_id is 0)
if org_id == 0:
cursor.execute('SELECT * FROM Subject;')
else:
cursor.execute('SELECT * FROM Subject WHERE org_id = ?;', (org_id,))
subjects = cursor.fetchall()
conn.close()
return subjects
def fetch_topics_by_org(org_id):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Check if the user is a super_admin (org_id is 0)
if org_id == 0:
cursor.execute('SELECT * FROM Topic;')
else:
cursor.execute('SELECT * FROM Topic WHERE org_id = ?;', (org_id,))
topics = cursor.fetchall()
conn.close()
return topics
def split_docs(file_path,meta):
#def split_meta_docs(file, source, tch_code):
loader = UnstructuredFileLoader(file_path)
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
docs = text_splitter.split_documents(documents)
metadata = {"source": meta}
for doc in docs:
doc.metadata.update(metadata)
return docs
def create_lancedb_table(embeddings, meta, table_name):
lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb")
# LanceDB connection
db = lancedb.connect(lancedb_path)
table = db.create_table(
f"{table_name}",
data=[
{
"vector": embeddings.embed_query("Query Unsuccessful"),
"text": "Query Unsuccessful",
"id": "1",
"source": f"{meta}"
}
],
mode="overwrite",
)
return table
def save_to_vectorstores(vs, vstore_input_name, subject, topic, username, share_resource=False):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Fetch the user's details
cursor.execute('SELECT user_id FROM Users WHERE username = ?', (username,))
user_details = cursor.fetchone()
if not user_details:
st.error("Error: User not found.")
return
user_id = user_details[0]
# If Vector_Store instance exists in session state, then serialize and save
# vs is the documents in json format and vstore_input_name is the name of the table and vectorstore
if vs:
try:
cursor.execute('SELECT 1 FROM Vector_Stores WHERE vectorstore_name LIKE ? AND user_id = ?', (f"%{vstore_input_name}%", user_id))
exists = cursor.fetchone()
if exists:
st.error("Error: An entry with the same vectorstore_name and user_id already exists.")
return
if subject is None:
st.error("Error: Subject is missing.")
return
if topic is None:
st.error("Error: Topic is missing.")
return
# Get the subject and topic IDs
cursor.execute('SELECT id FROM Subject WHERE subject_name = ?', (subject,))
subject_id = cursor.fetchone()[0]
cursor.execute('SELECT id FROM Topic WHERE topic_name = ?', (topic,))
topic_id = cursor.fetchone()[0]
# Insert the new row
cursor.execute('''
INSERT INTO Vector_Stores (vectorstore_name, documents, user_id, subject, topic, sharing_enabled)
VALUES (?, ?, ?, ?, ?, ?)
''', (vstore_input_name, vs, user_id, subject_id, topic_id, share_resource))
conn.commit()
conn.close()
except Exception as e:
st.error(f"Error in storing documents and vectorstore: {e}")
return
def document_to_dict(doc):
# Assuming 'doc' has 'page_content' and 'metadata' attributes
return {
'page_content': doc.page_content,
'metadata': doc.metadata
}
def dict_to_document(doc_dict):
# Create a Document object from the dictionary
# Adjust this according to how your Document class is defined
return Document(page_content=doc_dict['page_content'],metadata=doc_dict['metadata'])
def create_vectorstore():
openai.api_key = return_api_key()
os.environ["OPENAI_API_KEY"] = return_api_key()
full_docs = []
st.subheader("Enter the topic and subject for your knowledge base")
embeddings = OpenAIEmbeddings()
if st.session_state.user['profile_id'] == SA:
org_id = select_organization()
if org_id is None:
return
else:
org_id = st.session_state.user["org_id"]
# Fetch all available subjects
subjects = fetch_subjects_by_org(st.session_state.user["org_id"])
subject_names = [sub[2] for sub in subjects] # Assuming index 2 holds the subject_name
selected_subject = st.selectbox("Select an existing subject or type a new one:", options=subject_names + ['New Subject'])
if selected_subject == 'New Subject':
subject = st.text_input("Please enter the new subject name:", max_chars=30)
if subject:
insert_subject(org_id, subject)
else:
subject = selected_subject
# Fetch all available topics
topics = fetch_topics_by_org(st.session_state.user["org_id"])
topic_names = [topic[2] for topic in topics] # Assuming index 2 holds the topic_name
selected_topic = st.selectbox("Select an existing topic or type a new one:", options=topic_names + ['New Topic'])
if selected_topic == 'New Topic':
topic = st.text_input("Please enter the new topic name:", max_chars=30)
if topic:
insert_topic(org_id, topic)
else:
topic = selected_topic
vectorstore_input = st.text_input("Please type in a name for your knowledge base:", max_chars=20)
vs_name = vectorstore_input + f"_({st.session_state.user['username']})"
share_resource = st.checkbox("Share this resource", value=True) # <-- Added this line
# Show the current build of files for the latest database
st.subheader("Select one or more files to build your knowledge base")
files = fetch_all_files()
if files:
selected_files = sac.transfer(items=files, label=None, index=None, titles=['Uploaded files', 'Select files for KB'], format_func='title', width='100%', height=None, search=True, pagination=False, oneway=False, reload=True, disabled=False, return_index=False)
# Alert to confirm the creation of knowledge base
st.warning("Building your knowledge base will take some time. Please be patient.")
build = sac.buttons([
dict(label='Build VectorStore', icon='check-circle-fill', color = 'green'),
dict(label='Cancel', icon='x-circle-fill', color='red'),
], label=None, index=1, format_func='title', align='center', position='top', size='default', direction='horizontal', shape='round', type='default', compact=False, return_index=False)
if build == 'Build VectorStore' and selected_files:
for s_file in selected_files:
file_id = int(s_file.split("(", 1)[1].split(")", 1)[0])
file_data, meta = fetch_file_data(file_id)
docs = split_docs(file_data, meta)
full_docs.extend(docs)
#convert full_docs to json to store in sqlite
full_docs_dicts = [document_to_dict(doc) for doc in full_docs]
docs_json = json.dumps(full_docs_dicts)
#db = LanceDB.from_documents(full_docs, OpenAIEmbeddings(), connection=create_lancedb_table(embeddings, meta, vs_name))
#table = create_lancedb_table(embeddings, meta, vs_name)
# lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb")
# LanceDB connection
# db = lancedb.connect(lancedb_path)
# st.session_state.test1 = table
# st.write("full_docs",full_docs)
#full_docs_dicts = [document_to_dict(doc) for doc in full_docs]
#docs_json = json.dumps(full_docs_dicts)
# st.write("docs_json",docs_json)
#retrieved_docs_dicts = get_docs() # Assuming this returns the list of dictionaries
# retrieved_docs_dicts = json.loads(docs_json)
# retrieved_docs = [dict_to_document(doc_dict) for doc_dict in retrieved_docs_dicts]
# st.write("retrieved_docs",retrieved_docs)
#st.session_state.test2 = json.loads(docs_json)
# st.session_state.vs = LanceDB.from_documents(retrieved_docs , OpenAIEmbeddings(), connection= db.open_table("_(super_admin)"))
# st.session_state.current_model = "test1"
# st.write(st.session_state.test1)
#st.write(st.session_state.test2)
#st.write(type(db))
#st.session_state.vs = load_vectorstore(documents, table_name)
create_lancedb_table(embeddings, meta, vs_name)
save_to_vectorstores(docs_json, vs_name, subject, topic, st.session_state.user["username"], share_resource) # Passing the share_resource to the function
st.success("Knowledge Base loaded")
else:
st.write("No files found in the database.")
def load_vectorstore(documents, table_name):
retrieved_docs_dicts = json.loads(documents)
retrieved_docs = [dict_to_document(doc_dict) for doc_dict in retrieved_docs_dicts]
vs = LanceDB.from_documents(retrieved_docs , OpenAIEmbeddings(), connection= db.open_table(f"{table_name}"))
return vs
def delete_lancedb_table(table_name):
lancedb_path = os.path.join(WORKING_DIRECTORY, "lancedb")
# LanceDB connection
db = lancedb.connect(lancedb_path)
db.drop_table(f"{table_name}")
def fetch_vectorstores_by_user_id(user_id):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Fetch vectorstores based on user_id
cursor.execute('SELECT vectorstore_name FROM Vector_Stores WHERE user_id = ?;', (user_id,))
vectorstores = cursor.fetchall()
conn.close()
return vectorstores
def delete_vectorstores():
st.subheader("Delete VectorStores in Database:")
user_vectorstores = fetch_vectorstores_by_user_id(st.session_state.user["id"])
if user_vectorstores:
vectorstore_names = [vs[0] for vs in user_vectorstores]
selected_vectorstores = st.multiselect("Select vectorstores to delete:", options=vectorstore_names)
confirm_delete = st.checkbox("I understand that this action cannot be undone.", value=False)
if st.button("Delete VectorStore"):
if confirm_delete and selected_vectorstores:
delete_vectorstores_from_db(selected_vectorstores, st.session_state.user["id"], st.session_state.user["profile_id"])
st.success(f"Deleted {len(selected_vectorstores)} vectorstores.")
else:
st.warning("Please confirm the deletion action.")
else:
st.write("No vectorstores found in the database.")
def delete_vectorstores_from_db(vectorstore_names, user_id, profile):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
for vectorstore_name in vectorstore_names:
if profile in ['SA', 'AD']:
# Delete the corresponding LanceDB table
delete_lancedb_table(vectorstore_name)
# Delete vectorstore irrespective of the user_id associated with them
cursor.execute('DELETE FROM Vector_Stores WHERE vectorstore_name=?;', (vectorstore_name,))
else:
# Delete the corresponding LanceDB table
delete_lancedb_table(vectorstore_name)
# Delete only if the user_id matches
cursor.execute('DELETE FROM Vector_Stores WHERE vectorstore_name=? AND user_id=?;', (vectorstore_name, user_id))
# Check if the row was affected
if cursor.rowcount == 0:
st.error(f"Unable to delete vectorstore '{vectorstore_name}' that is not owned by you.")
conn.commit() # Commit the changes
conn.close() # Close the connection
| [] |
2024-01-10 | String-sg/ai-starter-kit | basecode~main_bot.py | import streamlit as st
import openai
from openai import OpenAI
import sqlite3
from basecode.authenticate import return_api_key
from datetime import datetime
from langchain.memory import ConversationSummaryBufferMemory
from langchain.memory import ConversationBufferWindowMemory
from langchain.chat_models import ChatOpenAI
import streamlit_antd_components as sac
import configparser
import os
from Markdown2docx import Markdown2docx
client = OpenAI(
# defaults to os.environ.get("OPENAI_API_KEY")
api_key=return_api_key(),
)
config = configparser.ConfigParser()
config.read('config.ini')
NEW_PLAN = config['constants']['NEW_PLAN']
FEEDBACK_PLAN = config['constants']['FEEDBACK_PLAN']
PERSONAL_PROMPT = config['constants']['PERSONAL_PROMPT']
DEFAULT_TEXT = config['constants']['DEFAULT_TEXT']
# Create or check for the 'database' directory in the current working directory
cwd = os.getcwd()
WORKING_DIRECTORY = os.path.join(cwd, "database")
if not os.path.exists(WORKING_DIRECTORY):
os.makedirs(WORKING_DIRECTORY)
if st.secrets["sql_ext_path"] == "None":
WORKING_DATABASE= os.path.join(WORKING_DIRECTORY , st.secrets["default_db"])
else:
WORKING_DATABASE= st.secrets["sql_ext_path"]
def set_chat_prompts(dict_buttons, key):
# Extract values from the dictionary and store in a list
button_labels = [dict_buttons.get(f"sent_{i+1}", "disabled") for i in range(5)]
# Create button items using the extracted labels
button_items = [sac.ButtonsItem(label=label) for label in button_labels]
str = sac.buttons(button_items, index=None, format_func='title', align='left', size='small', key=key)
if str:
return str
def response_download():
docx_name = "crp" + st.session_state.user['username'] + ".docx"
docx_path = os.path.join("chatbot_response", docx_name)
if os.path.exists(docx_path):
# Provide the docx for download via Streamlit
with open(docx_path, "rb") as docx_file:
docx_bytes = docx_file.read()
st.success("File is ready for downloading")
st.download_button(
label="Download document as DOCX",
data=docx_bytes,
file_name=docx_name,
mime='application/vnd.openxmlformats-officedocument.wordprocessingml.document',
)
os.remove(docx_path)
st.session_state.button_text = 'Reset'
else:
st.warning("There is no lesson plan available for download.")
def complete_my_lesson():
plan_action = sac.buttons([sac.ButtonsItem(label='Preview Responses', icon='eye', color='#00BFFF'),
sac.ButtonsItem(label='Download Responses', icon='file-earmark-arrow-down', color='#40826D'),
sac.ButtonsItem(label='Clear Responses', icon='file-earmark-arrow-down', color='#FF7F50')
], index=None, format_func='title', size='small', type='primary')
if plan_action == 'Preview Responses':
st.write(st.session_state.data_doc)
elif plan_action == 'Download Responses':
st.write("Downloading your lesson plan")
md_filename = "crp" + st.session_state.user['username'] + ".md"
md_filepath = os.path.join("chatbot_response", md_filename)
if not os.path.exists("chatbot_response"):
os.makedirs("chatbot_response")
with open(md_filepath, 'w', encoding='utf-8') as file:
file.write(st.session_state.data_doc)
# Convert the markdown file to a docx
base_filepath = os.path.join("chatbot_response", "crp" + st.session_state.user['username'])
project = Markdown2docx(base_filepath)
project.eat_soup()
project.save() # Assuming it saves the file with the same name but a .docx extension
response_download()
elif plan_action == 'Clear Responses':
if st.checkbox("Clear Responses"):
st.session_state.data_doc = ""
st.success("Responses cleared")
def add_response(response):
# add_response = sac.buttons([sac.ButtonsItem(label='Ignore Response', icon='plus-circle', color='#40826D'), [sac.ButtonsItem(label='Add Response', icon='plus-circle', color='#25C3B0')]
# ], index=None, format_func='title', size='small',type='primary')
opt = sac.buttons([sac.ButtonsItem(label='Save Response', color='#40826D')], format_func='title', index=None, size='small',type='primary')
# st.write(response)
if add_response:
st.session_state.data_doc = st.session_state.data_doc + "\n\n" + response
return opt
#response rating component
def rating_component():
rating_value = sac.rate(label='Response ratings:', position='left', clear=True, value=2.0, align='left', size=15, color='#25C3B0')
return rating_value
def insert_into_data_table(date, chatbot_ans,user_prompt, tokens, function_name, value=0):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Insert data into Data_Table using preloaded session state value
cursor.execute('''
INSERT INTO Data_Table (date, user_id, profile_id, chatbot_ans, user_prompt, function_name, tokens, response_rating)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
''', (date, st.session_state.data_profile["user_id"], st.session_state.data_profile["profile_id"], chatbot_ans, user_prompt, function_name, tokens, value))
conn.commit()
conn.close()
#clear messages and memory
def clear_session_states():
st.session_state.msg = []
if "memory" not in st.session_state:
pass
else:
del st.session_state["memory"]
#below ------------------------------ QA base bot , K=2 memory for short term memory---------------------------------------------
#using the query from lanceDB and vector store , combine with memory
def memory_buffer_qa_component(prompt):
#st.write(type(st.session_state.vs))
if st.session_state.vs:
docs = st.session_state.vs.similarity_search(prompt)
resource = docs[0].page_content
source = docs[0].metadata
if "memory" not in st.session_state:
st.session_state.memory = ConversationBufferWindowMemory(k=st.session_state.k_memory)
mem = st.session_state.memory.load_memory_variables({})
#st.write(resource)
prompt_template = st.session_state.chatbot + f"""
Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
Search Result:
{resource}
{source}
History of conversation:
{mem}
You must quote the source of the Search Result if you are using the search result as part of the answer"""
return prompt_template
#chat completion memory for streamlit using memory buffer
def chat_completion_qa_memory(prompt):
openai.api_key = return_api_key()
os.environ["OPENAI_API_KEY"] = return_api_key()
prompt_template = memory_buffer_qa_component(prompt)
response = client.chat.completions.create(
model=st.session_state.openai_model,
messages=[
{"role": "system", "content":prompt_template },
{"role": "user", "content": prompt},
],
temperature=st.session_state.temp, #settings option
presence_penalty=st.session_state.presence_penalty, #settings option
frequency_penalty=st.session_state.frequency_penalty, #settings option
stream=True #settings option
)
return response
#integration API call into streamlit chat components with memory and qa
def basebot_qa_memory(bot_name):
full_response = ""
greetings_str = f"Hi, I am {bot_name}"
help_str = "How can I help you today?"
# Check if st.session_state.msg exists, and if not, initialize with greeting and help messages
if 'msg' not in st.session_state:
st.session_state.msg = [
{"role": "assistant", "content": greetings_str},
{"role": "assistant", "content": help_str}
]
elif st.session_state.msg == []:
st.session_state.msg = [
{"role": "assistant", "content": greetings_str},
{"role": "assistant", "content": help_str}
]
#lesson collaborator
for message in st.session_state.msg:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# #adding on response
# if st.session_state.download_response_flag == True:
# if st.session_state.msg: # Check if the list is not empty
# last_message = st.session_state.msg[-1] # Access the last message
# with st.chat_message(last_message["role"]): # Use the role from the last message
# st.markdown(last_message["content"]) # Display the content of the last message
# add_response(last_message["content"])
try:
if prompt := st.chat_input("Enter your query"):
st.session_state.msg.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in chat_completion_qa_memory(prompt):
full_response += (response.choices[0].delta.content or "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
#Response Rating
st.session_state.msg.append({"role": "assistant", "content": full_response})
st.session_state["memory"].save_context({"input": prompt},{"output": full_response})
# Insert data into the table
now = datetime.now() # Using ISO format for date
num_tokens = len(full_response + prompt)*1.3
#need to store num_tokens,full_response, prompt, bot_name, now and all in a dictionary
#if user press feedback it will look for the last entry in the database of the user and update the rating for this table
if st.session_state.rating == True:
feedback_value = rating_component()
else:
feedback_value = 0
#st.write(num_tokens)
insert_into_data_table(now.strftime("%d/%m/%Y %H:%M:%S"), full_response, prompt, num_tokens, bot_name)
if st.session_state.download_response_flag == True:
st.session_state.chat_response = add_response(full_response)
except Exception as e:
st.exception(e)
#below ------------------------------ base bot , K=2 memory for short term memory---------------------------------------------
#faster and more precise but no summary
def memory_buffer_component():
if "memory" not in st.session_state:
st.session_state.memory = ConversationBufferWindowMemory(k=st.session_state.k_memory)
#st.write("Messages ", messages)
mem = st.session_state.memory.load_memory_variables({})
#For more customisation, this can be in the config.ini file
prompt_template = st.session_state.chatbot + f"""
History of conversation:
{mem}"""
return prompt_template
#chat completion memory for streamlit using memory buffer
def chat_completion_memory(prompt):
openai.api_key = return_api_key()
os.environ["OPENAI_API_KEY"] = return_api_key()
prompt_template = memory_buffer_component()
#st.write("Prompt Template ", prompt_template)
response = client.chat.completions.create(
model=st.session_state.openai_model,
messages=[
{"role": "system", "content":prompt_template },
{"role": "user", "content": prompt},
],
temperature=st.session_state.temp, #settings option
presence_penalty=st.session_state.presence_penalty, #settings option
frequency_penalty=st.session_state.frequency_penalty, #settings option
stream=True #settings option
)
return response
#integration API call into streamlit chat components with memory
def basebot_memory(bot_name):
full_response = ""
greetings_str = f"Hi, I am {bot_name}"
help_str = "How can I help you today?"
# Check if st.session_state.msg exists, and if not, initialize with greeting and help messages
if 'msg' not in st.session_state:
st.session_state.msg = [
{"role": "assistant", "content": greetings_str},
{"role": "assistant", "content": help_str}
]
elif st.session_state.msg == []:
st.session_state.msg = [
{"role": "assistant", "content": greetings_str},
{"role": "assistant", "content": help_str}
]
for message in st.session_state.msg:
with st.chat_message(message["role"]):
st.markdown(message["content"])
#adding on response
# if st.session_state.download_response_flag == True:
# if st.session_state.msg: # Check if the list is not empty
# last_message = st.session_state.msg[-1] # Access the last message
# add_response(last_message["content"])
try:
if prompt := st.chat_input("What is up?"):
st.session_state.msg.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in chat_completion_memory(prompt):
full_response += (response.choices[0].delta.content or "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.msg.append({"role": "assistant", "content": full_response})
st.session_state["memory"].save_context({"input": prompt},{"output": full_response})
# Insert data into the table
now = datetime.now() # Using ISO format for date
num_tokens = len(full_response + prompt)*1.3
if st.session_state.rating == True:
feedback_value = rating_component()
else:
feedback_value = 0
insert_into_data_table(now.strftime("%d/%m/%Y %H:%M:%S"), full_response, prompt, num_tokens, bot_name)
if st.session_state.download_response_flag == True:
st.session_state.chat_response = add_response(full_response)
except Exception as e:
st.error(e)
#below ------------------------------ Suitable for Q & A--------------------------------------------
#below ------------------------------ base bot , no memory ---------------------------------------------
#chat completion for streamlit function
def chat_completion(prompt):
openai.api_key = return_api_key()
os.environ["OPENAI_API_KEY"] = return_api_key()
response = client.chat.completions.create(
model=st.session_state.openai_model,
messages=[
{"role": "system", "content": st.session_state.chatbot},
{"role": "user", "content": prompt},
],
temperature=st.session_state.temp, #settings option
stream=True #settings option
)
return response
#integration API call into streamlit chat components
def basebot(bot_name):
full_response = ""
greetings_str = f"Hi, I am {bot_name}"
help_str = "How can I help you today?"
# Check if st.session_state.msg exists, and if not, initialize with greeting and help messages
if 'msg' not in st.session_state:
st.session_state.msg = [
{"role": "assistant", "content": greetings_str},
{"role": "assistant", "content": help_str}
]
elif st.session_state.msg == []:
st.session_state.msg = [
{"role": "assistant", "content": greetings_str},
{"role": "assistant", "content": help_str}
]
for message in st.session_state.msg:
with st.chat_message(message["role"]):
st.markdown(message["content"])
try:
if prompt := st.chat_input("What is up?"):
st.session_state.msg.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in chat_completion(prompt):
full_response += (response.choices[0].delta.content or "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
#Response Rating
if st.session_state.rating == True:
feedback_value = rating_component()
else:
feedback_value = 0
now = datetime.now() # Using ISO format for date
num_tokens = len(full_response + prompt)*1.3
st.session_state.msg.append({"role": "assistant", "content": full_response})
insert_into_data_table(now.strftime("%d/%m/%Y %H:%M:%S"), full_response, prompt, num_tokens, bot_name)
# if st.session_state.visuals == True:
# metacognitive_prompter(full_response)
if st.session_state.download_response_flag == True:
st.session_state.chat_response = add_response(full_response)
except Exception as e:
st.error(e)
#below ------------------------------ base bot , with vectorstore ---------------------------------------------
def qa_component(prompt):
#st.write(type(st.session_state.vs))
if st.session_state.vs:
docs = st.session_state.vs.similarity_search(prompt)
resource = docs[0].page_content
source = docs[0].metadata
#st.write(resource)
prompt_template = st.session_state.chatbot + f"""
Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
Search Result:
{resource}
{source}
You must quote the source of the Search Result if you are using the search result as part of the answer"""
return prompt_template
#chat completion with vectorstore for streamlit
def chat_completion_qa(prompt):
openai.api_key = return_api_key()
os.environ["OPENAI_API_KEY"] = return_api_key()
#show the qa component results in the prompt
prompt_template = qa_component(prompt)
response = client.chat.completions.create(
model=st.session_state.openai_model,
messages=[
{"role": "system", "content":prompt_template },
{"role": "user", "content": prompt},
],
temperature=st.session_state.temp, #settings option
presence_penalty=st.session_state.presence_penalty, #settings option
frequency_penalty=st.session_state.frequency_penalty, #settings option
stream=True #settings option
)
return response
#chat completion with vectorstore for streamlit
def basebot_qa(bot_name):
full_response = ""
greetings_str = f"Hi, I am {bot_name}"
help_str = "How can I help you today?"
# Check if st.session_state.msg exists, and if not, initialize with greeting and help messages
if 'msg' not in st.session_state:
st.session_state.msg = [
{"role": "assistant", "content": greetings_str},
{"role": "assistant", "content": help_str}
]
elif st.session_state.msg == []:
st.session_state.msg = [
{"role": "assistant", "content": greetings_str},
{"role": "assistant", "content": help_str}
]
for message in st.session_state.msg:
with st.chat_message(message["role"]):
st.markdown(message["content"])
try:
if prompt := st.chat_input("What is up?"):
st.session_state.msg.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in chat_completion_qa(prompt):
full_response += (response.choices[0].delta.content or "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
#Response Rating
if st.session_state.rating == True:
feedback_value = rating_component()
else:
feedback_value = 0
st.session_state.msg.append({"role": "assistant", "content": full_response})
# Insert data into the table
now = datetime.now() # Using ISO format for date
num_tokens = len(full_response + prompt)*1.3
#st.write(num_tokens)
insert_into_data_table(now.strftime("%d/%m/%Y %H:%M:%S"), full_response, prompt, num_tokens, bot_name)
# if st.session_state.visuals == True:
# metacognitive_prompter(full_response)
#metacognitive_prompter(full_response)
if st.session_state.download_response_flag == True:
st.session_state.chat_response = add_response(full_response)
except Exception as e:
st.exception(e)
#----------------------------------return search results--------------------------------------------
def return_search_raw_results(prompt):
if st.session_state.vs:
docs = st.session_state.vs.similarity_search(prompt)
ans = docs[0].page_content
source = docs[0].metadata.get('source', None)
return f"""{ans} \n\n Source: ({source})"""
def search_bot():
full_response = ""
for message in st.session_state.msg:
with st.chat_message(message["role"]):
st.markdown(message["content"])
try:
if prompt := st.chat_input("Enter your search query"):
st.session_state.msg.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
full_response = return_search_raw_results(prompt)
message_placeholder.markdown(full_response)
if st.session_state.rating == True:
feedback_value = rating_component()
else:
feedback_value = 0
#message_placeholder.markdown(source)
st.session_state.msg.append({"role": "assistant", "content": full_response})
# Insert data into the table
now = datetime.now() # Using ISO format for date
num_tokens = len(full_response + prompt)*1.3
#st.write(num_tokens)
insert_into_data_table(now.strftime("%d/%m/%Y %H:%M:%S"), full_response, prompt, num_tokens)
# if st.session_state.visuals == True:
# metacognitive_prompter(full_response)
if st.session_state.download_response_flag == True:
st.session_state.chat_response = add_response(full_response)
except Exception as e:
st.error(e)
#below ------------------------------ base bot , summary memory for long conversation---------------------------------------------
#summary of conversation , requires another LLM call for every input, useful for feedback and summarising what was spoken
def memory_summary_component(prompt): #currently not in use
if "memory" not in st.session_state:
llm = ChatOpenAI(model_name=st.session_state.openai_model,temperature=st.session_state.temp)
st.session_state.memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=1000)
messages = st.session_state["memory"].chat_memory.messages
#st.write("Messages ", messages)
previous_summary = ""
mem = st.session_state["memory"].predict_new_summary(messages, previous_summary)
prompt_template = st.session_state.chatbot + f"""
Summary of current conversation:
{mem}"""
return prompt_template
| [
"constants",
"PERSONAL_PROMPT",
" \n\t\t\t\t\t\tHistory of conversation:\n\t\t\t\t\t\tPLACEHOLDER",
"\n\t\t\t\t\t\tUse the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. \n\t\t\t\t\t\tSearch Result:\n\t\t\t\t\t\tPLACEHOLDER\n\t\t\t\t\t\tPLACEHOLDER\n\t\t\t\t\t\tHistory of conversation:\n\t\t\t\t\t\tPLACEHOLDER\n\t\t\t\t\t\tYou must quote the source of the Search Result if you are using the search result as part of the answer",
"\n\t\t\t\t\t\tSummary of current conversation:\n\t\t\t\t\t\tPLACEHOLDER",
"\n\t\t\t\t\t\tUse the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. \n\t\t\t\t\t\tSearch Result:\n\t\t\t\t\t\tPLACEHOLDER\n\t\t\t\t\t\tPLACEHOLDER\n\t\t\t\t\t\tYou must quote the source of the Search Result if you are using the search result as part of the answer"
] |
2024-01-10 | String-sg/ai-starter-kit | basecode~files_module.py | import streamlit as st
import sqlite3
import pandas as pd
import os
import tempfile
#from langchain.embeddings.openai import OpenAIEmbeddings
import configparser
import ast
class ConfigHandler:
def __init__(self):
self.config = configparser.ConfigParser()
self.config.read('config.ini')
def get_config_values(self, section, key):
value = self.config.get(section, key)
try:
# Try converting the string value to a Python data structure
return ast.literal_eval(value)
except (SyntaxError, ValueError):
# If not a data structure, return the plain string
return value
config_handler = ConfigHandler()
TCH = config_handler.get_config_values('constants', 'TCH')
STU = config_handler.get_config_values('constants', 'STU')
SA = config_handler.get_config_values('constants', 'SA')
AD = config_handler.get_config_values('constants', 'AD')
# Create or check for the 'database' directory in the current working directory
cwd = os.getcwd()
WORKING_DIRECTORY = os.path.join(cwd, "database")
if not os.path.exists(WORKING_DIRECTORY):
os.makedirs(WORKING_DIRECTORY)
if st.secrets["sql_ext_path"] == "None":
WORKING_DATABASE= os.path.join(WORKING_DIRECTORY , st.secrets["default_db"])
else:
WORKING_DATABASE= st.secrets["sql_ext_path"]
def fetch_files_with_usernames():
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
query = '''
SELECT
Files.file_id,
Subject.subject_name,
Topic.topic_name,
Files.file_name,
Users.username,
Files.sharing_enabled
FROM Files
JOIN Users ON Files.user_id = Users.user_id
LEFT JOIN Subject ON Files.subject = Subject.id
LEFT JOIN Topic ON Files.topic = Topic.id;
'''
cursor.execute(query)
data = cursor.fetchall()
conn.close()
return data
def display_files():
data = fetch_files_with_usernames()
df = pd.DataFrame(data, columns=["file_id", "subject_name", "topic_name", "file_name", "username", "sharing_enabled"])
# Convert the 'sharing_enabled' values
df["sharing_enabled"] = df["sharing_enabled"].apply(lambda x: '✔' if x == 1 else '')
st.dataframe(
df,
use_container_width=True,
column_order=["file_id", "subject_name", "topic_name", "file_name", "username", "sharing_enabled"]
)
def get_file_extension(file_name):
return os.path.splitext(file_name)[1]
def save_file_to_db(org_id , user_id, file_name, file_content, metadata, subject, topic, sharing_enabled):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
extension = get_file_extension(file_name)
with tempfile.NamedTemporaryFile(delete=False, suffix=extension) as temp_file:
temp_file.write(file_content)
temp_file.flush()
# Check if the subject already exists. If not, insert it.
cursor.execute('SELECT id FROM Subject WHERE subject_name = ?', (subject,))
subject_id_result = cursor.fetchone()
if not subject_id_result:
cursor.execute('INSERT INTO Subject (org_id, subject_name) VALUES (?, ?)', (org_id, subject))
subject_id = cursor.lastrowid
else:
subject_id = subject_id_result[0]
# Check if the topic already exists. If not, insert it.
cursor.execute('SELECT id FROM Topic WHERE topic_name = ?', (topic,))
topic_id_result = cursor.fetchone()
if not topic_id_result:
cursor.execute('INSERT INTO Topic (org_id, topic_name) VALUES (?, ?)', (org_id, topic))
topic_id = cursor.lastrowid
else:
topic_id = topic_id_result[0]
# Insert the file data and metadata into the Files table
cursor.execute(
'INSERT INTO Files (user_id, file_name, data, metadata, subject, topic, sharing_enabled) VALUES (?, ?, ?, ?, ?, ?, ?);',
(user_id, file_name, temp_file.name,metadata, subject_id, topic_id, int(sharing_enabled))
)
conn.commit()
conn.close()
def fetch_subjects_by_org(org_id):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Check if the user is a super_admin (org_id is 0)
if org_id == 0:
cursor.execute('SELECT * FROM Subject;')
else:
cursor.execute('SELECT * FROM Subject WHERE org_id = ?;', (org_id,))
subjects = cursor.fetchall()
conn.close()
return subjects
def fetch_topics_by_org(org_id):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Check if the user is a super_admin (org_id is 0)
if org_id == 0:
cursor.execute('SELECT * FROM Topic;')
else:
cursor.execute('SELECT * FROM Topic WHERE org_id = ?;', (org_id,))
topics = cursor.fetchall()
conn.close()
return topics
def select_organization():
with sqlite3.connect(WORKING_DATABASE) as conn:
cursor = conn.cursor()
# Org selection
org_query = "SELECT org_name FROM Organizations"
cursor.execute(org_query)
orgs = cursor.fetchall()
org_names = [org[0] for org in orgs]
# Use a Streamlit selectbox to choose an organization
selected_org_name = st.selectbox("Select an organization:", org_names)
# Retrieve the org_id for the selected organization
cursor.execute('SELECT org_id FROM Organizations WHERE org_name = ?;', (selected_org_name,))
result = cursor.fetchone()
if result:
org_id = result[0]
st.write(f"The org_id for {selected_org_name} is {org_id}.")
return org_id
else:
st.write(f"Organization '{selected_org_name}' not found in the database.")
return None
def docs_uploader():
st.subheader("Upload Files to build your knowledge base")
if st.session_state.user['profile_id'] == SA:
org_id = select_organization()
if org_id is None:
return
else:
org_id = st.session_state.user["org_id"]
# Upload the file using Streamlit
uploaded_file = st.file_uploader("Choose a file", type=['docx', 'txt', 'pdf'])
meta = st.text_input("Please enter your document source (Default is MOE):", max_chars=20)
# Fetch all available subjects
subjects = fetch_subjects_by_org(st.session_state.user["org_id"])
subject_names = [sub[2] for sub in subjects] # Assuming index 2 holds the subject_name
selected_subject = st.selectbox("Select an existing subject or type a new one:", options=subject_names + ['New Subject'])
if selected_subject == 'New Subject':
new_subject = st.text_input("Please enter the new subject name:", max_chars=30)
else:
new_subject = None
# Fetch all available topics
topics = fetch_topics_by_org(st.session_state.user["org_id"])
topic_names = [topic[2] for topic in topics] # Assuming index 2 holds the topic_name
selected_topic = st.selectbox("Select an existing topic or type a new one:", options=topic_names + ['New Topic'])
if selected_topic == 'New Topic':
new_topic = st.text_input("Please enter the new topic name:", max_chars=30)
else:
new_topic = None
share_resource = st.checkbox("Share this resource", value=True)
if uploaded_file:
st.write("File:", uploaded_file.name, "uploaded!")
if not meta:
meta = "MOE"
# Save to Database Button
if st.button("Save to Database"):
save_file_to_db(
org_id=org_id,
user_id=st.session_state.user["id"],
file_name=uploaded_file.name,
file_content=uploaded_file.read(),
metadata=meta,
subject=selected_subject if not new_subject else new_subject,
topic=selected_topic if not new_topic else new_topic,
sharing_enabled=share_resource
)
st.success("File saved to database!")
def fetch_files_by_user_id(user_id):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
# Fetch files based on user_id
cursor.execute('SELECT file_name FROM Files WHERE user_id = ?;', (user_id,))
files = cursor.fetchall()
conn.close()
return files
def delete_files():
st.subheader("Delete Files in Database:")
user_files = fetch_files_by_user_id(st.session_state.user["id"])
if user_files:
file_names = [file[0] for file in user_files]
selected_files = st.multiselect("Select files to delete:", options=file_names)
confirm_delete = st.checkbox("I understand that this action cannot be undone.", value=False)
if st.button("Delete"):
if confirm_delete and selected_files:
delete_files_from_db(selected_files, st.session_state.user["id"], st.session_state.user["profile_id"])
st.success(f"Deleted {len(selected_files)} files.")
else:
st.warning("Please confirm the deletion action.")
else:
st.write("No files found in the database.")
def delete_files_from_db(file_names, user_id, profile):
conn = sqlite3.connect(WORKING_DATABASE)
cursor = conn.cursor()
if profile in [SA, AD]:
# Delete files irrespective of the user_id associated with them
for file_name in file_names:
cursor.execute('DELETE FROM Files WHERE file_name=?;', (file_name,))
else:
for file_name in file_names:
cursor.execute('DELETE FROM Files WHERE file_name=? AND user_id=?;', (file_name, user_id))
# Check if the row was affected
if cursor.rowcount == 0:
st.error(f"Unable to delete file '{file_name}' that is not owned by you.")
conn.commit() # Commit the changes
conn.close() # Close the connection
| [] |
2024-01-10 | jbpayton/llm-auto-forge | TestDynamicToolCreationWithFullBrowser.py | from langchain.callbacks import StreamingStdOutCallbackHandler
from langchain.chat_models import ChatOpenAI
from prompts import BROWSER_TOOL_MAKER_PROMPT
from Agents import DialogueAgentWithTools
import util
from langchain.tools import Tool
from langchain.agents.agent_toolkits import FileManagementToolkit
from langchain.utilities import GoogleSearchAPIWrapper
from tools.ToolRegistrationTool import tool_registration_tool, query_available_modules
from tools.ToolQueryTool import tool_query_tool
from tools.LLMBrowsingTools import query_website, paged_web_browser
util.load_secrets()
# Define system prompts for our agent
system_prompt_scribe = BROWSER_TOOL_MAKER_PROMPT
# initialize file management tools
file_tools = FileManagementToolkit(
selected_tools=["read_file", "write_file", "list_directory", "copy_file", "move_file", "file_delete"]
).get_tools()
# initialie search API
search = GoogleSearchAPIWrapper()
def top10_results(query):
return search.results(query, 10)
GoogleSearchTool = Tool(
name="Google Search",
description="Search Google for recent results.",
func=top10_results,
)
tools = [GoogleSearchTool,
tool_query_tool,
tool_registration_tool,
query_available_modules,
paged_web_browser,
] + file_tools
# Initialize our agents with their respective roles and system prompts
tool_making_agent = DialogueAgentWithTools(name="ToolMaker",
system_message=system_prompt_scribe,
model=ChatOpenAI(
model_name='gpt-4',
streaming=True,
temperature=0.0,
callbacks=[StreamingStdOutCallbackHandler()]),
tools=tools)
tool_making_agent.receive("HumanUser", "Can you tell me what is in this image and add an in-picture caption to this "
"image? ./TestInput/mystery_image.jpg Write the captioned image to "
"./TestOutput/captioned_image.jpg")
tool_making_agent.send()
print("Done")
| [] |
2024-01-10 | jbpayton/llm-auto-forge | TestDynamicToolCreation.py | from langchain import WikipediaAPIWrapper
from langchain.callbacks import StreamingStdOutCallbackHandler
from langchain.chat_models import ChatOpenAI
from prompts import TOOL_MAKER_PROMPT
from Agents import DialogueAgentWithTools
import util
from langchain.tools import DuckDuckGoSearchRun, WikipediaQueryRun
from langchain.tools.file_management import WriteFileTool, ReadFileTool
from tools.ToolRegistrationTool import tool_registration_tool
from tools.ToolQueryTool import tool_query_tool
util.load_secrets()
# Define system prompts for our agent
system_prompt_scribe = TOOL_MAKER_PROMPT
tools = [ReadFileTool(),
WriteFileTool(),
WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper()),
DuckDuckGoSearchRun(),
tool_registration_tool,
tool_query_tool]
# Initialize our agents with their respective roles and system prompts
tool_making_agent = DialogueAgentWithTools(name="ToolMaker",
system_message=system_prompt_scribe,
model=ChatOpenAI(
model_name='gpt-4',
streaming=True,
temperature=0.9,
callbacks=[StreamingStdOutCallbackHandler()]),
tools=tools)
tool_making_agent.receive("HumanUser", "Write the first sentence of the gettysburg address to a file (create a tool to do this).")
tool_making_agent.send()
print("Done")
| [] |
2024-01-10 | jbpayton/llm-auto-forge | tools~ToolRegistrationTool.py | import sys
from langchain.tools import tool
from tools.ToolRegistry import register_tool
@tool("tool_registration_tool", return_direct=False)
def tool_registration_tool(tool_function: str, tool_filename: str, agent_name: str) -> str:
"""This tool allows an agent to load a tool for its own use given the tool name, tool filename (just the file
name, no directory), and the agent's name. """
try:
output = register_tool(tool_function, tool_filename, agent_name)
# After it is finished the tool should return a string that is the output of the tool.
return "Tool Registered successfully: " + output
except:
# print the exception
return str(sys.exc_info())
@tool("query_available_modules", return_direct=False)
def query_available_modules() -> str:
"""This tool allows an agent to query the available python modules on the system. """
try:
import pkgutil
return "The following libraries exist in your environment" + str([x[1] for x in list(pkgutil.iter_modules())])
except:
# print the exception
return str(sys.exc_info()) | [] |
2024-01-10 | jbpayton/llm-auto-forge | tools~ToolTemplate.py | import sys
from langchain.tools import tool
@tool("example_tool", return_direct=False)
def example_tool(query: str) -> str:
"""This example tool returns the query string back to the console."""
# note the above docstring in the triple quotes is the description. Each tool MUST have a description.
try:
# Put your code here. This is where you will write your tool.
print(query)
# Sometimes you will want to write debug statements to the console
print("This is a debug statement")
# After it is finished the tool should return a string that is the output of the tool.
return "Finished running tool."
except:
# If there is an error, print the error to the console.
return "Error: " + str(sys.exc_info())
| [] |
2024-01-10 | jbpayton/llm-auto-forge | tools~ToolQueryTool.py | import sys
from langchain.tools import tool
from tools.ToolRegistry import query_unregistered_tools
@tool("tool_query_tool", return_direct=False)
def tool_query_tool(tool_description: str, agent_name: str) -> str:
"""This tool allows an agent to query for available (unregistered) tools given a desired functional description
and the agent's name. """
try:
output = query_unregistered_tools(tool_description, agent_name)
# After it is finished the tool should return a string that is the output of the tool.
return output
except:
# print the exception
return str(sys.exc_info()) | [] |
2024-01-10 | jbpayton/llm-auto-forge | AgentTools~GoogleImageSearch.py | import sys, os
import requests
from langchain.tools import tool
@tool("google_image_search", return_direct=False)
def google_image_search(query: str) -> str:
"""This tool performs a Google Image Search using the Search API and returns a list of image URLs."""
try:
API_KEY = os.environ['GOOGLE_API_KEY']
SEARCH_ENGINE_ID = os.environ['GOOGLE_CSE_ID']
response = requests.get('https://www.googleapis.com/customsearch/v1', params={'key': API_KEY, 'cx': SEARCH_ENGINE_ID, 'q': query, 'searchType': 'image'})
response.raise_for_status()
results = response.json()
image_urls = [item['link'] for item in results['items']]
return image_urls
except:
return 'Error: ' + str(sys.exc_info()) | [] |
2024-01-10 | jbpayton/llm-auto-forge | AgentTools~ImageDisplayer.py | import sys
from PIL import Image
from langchain.tools import tool
@tool("image_displayer", return_direct=False)
def image_displayer(image_path: str) -> str:
"""This tool displays an image from a given local file path."""
try:
img = Image.open(image_path)
img.show()
return "Image displayed successfully."
except:
return "Error: " + str(sys.exc_info()) | [] |
2024-01-10 | jbpayton/llm-auto-forge | Agents.py | import time
from typing import List, Dict, Optional, Any, Tuple
from langchain import PromptTemplate, LLMChain
from langchain.agents import StructuredChatAgent, AgentExecutor
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.memory import ConversationBufferMemory
from langchain.schema import SystemMessage, HumanMessage, AIMessage, AgentAction, AgentFinish
from langchain.tools import Tool
from langchain.utils import get_color_mapping
from tools.ToolRegistry import ToolRegistry
class DialogueAgent:
def __init__(
self,
name: str,
system_message: SystemMessage = None,
model = None,
) -> None:
self.name = name
self.system_message = system_message
self.model = model
self.prefix = f"{self.name}: "
self.reset()
def reset(self):
self.message_history = ["Here is the conversation so far."]
def send(self) -> str:
if self.model and self.system_message:
print(f"{self.name}: ")
message = self.model(
[
self.system_message,
HumanMessage(content="\n".join(self.message_history + [self.prefix])),
]
)
return message.content
else:
raise NotImplementedError
def receive(self, name: str, message: str) -> None:
self.message_history.append(f"{name}: {message}")
class SelfModifiableAgentExecutor(AgentExecutor):
@property
def _chain_type(self) -> str:
pass
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run text through and get agent response."""
# Construct a mapping of tool name to tool for easy lookup
num_tools = len(self.tools)
name_to_tool_map = {tool.name: tool for tool in self.tools}
# We construct a mapping from each tool to a color, used for logging.
color_mapping = get_color_mapping(
[tool.name for tool in self.tools], excluded_colors=["green", "red"]
)
intermediate_steps: List[Tuple[AgentAction, str]] = []
# Let's start tracking the number of iterations and time elapsed
iterations = 0
time_elapsed = 0.0
start_time = time.time()
# We now enter the agent loop (until it returns something).
while self._should_continue(iterations, time_elapsed):
if num_tools != len(ToolRegistry().get_tools(self._lc_kwargs.get("name", None))):
# If the number of tools has changed, update the mapping
self.tools = ToolRegistry().get_tools(self._lc_kwargs.get("name", None))
name_to_tool_map = {tool.name: tool for tool in self.tools}
# We construct a mapping from each tool to a color, used for logging.
color_mapping = get_color_mapping(
[tool.name for tool in self.tools], excluded_colors=["green", "red"]
)
num_tools = len(self.tools)
next_step_output = self._take_next_step(
name_to_tool_map,
color_mapping,
inputs,
intermediate_steps,
run_manager=run_manager,
)
if isinstance(next_step_output, AgentFinish):
return self._return(
next_step_output, intermediate_steps, run_manager=run_manager
)
intermediate_steps.extend(next_step_output)
if len(next_step_output) == 1:
next_step_action = next_step_output[0]
# See if tool should return directly
tool_return = self._get_tool_return(next_step_action)
if tool_return is not None:
return self._return(
tool_return, intermediate_steps, run_manager=run_manager
)
iterations += 1
time_elapsed = time.time() - start_time
output = self.agent.return_stopped_response(
self.early_stopping_method, intermediate_steps, **inputs
)
return self._return(output, intermediate_steps, run_manager=run_manager)
class DialogueAgentWithTools(DialogueAgent):
def __init__(
self,
name: str,
system_message: SystemMessage,
model,
tools: List,
) -> None:
super().__init__(name, system_message, model)
self.tools = tools
ToolRegistry().set_tools(name, self.tools)
def send(self) -> str:
"""
Applies the chatmodel to the message history
and returns the message string
"""
todo_prompt = PromptTemplate.from_template(
"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}"
)
todo_chain = LLMChain(llm=self.model, prompt=todo_prompt)
todo_tool = Tool(
name="TODO",
func=todo_chain.run,
description="useful for when you need to come up with todo lists. Input: an objective to create a todo list for. Output: a todo list for that objective. Fully describe your task in detail!",
)
ToolRegistry().add_tool(self.name, todo_tool)
agent_chain = SelfModifiableAgentExecutor.from_agent_and_tools(
agent=StructuredChatAgent.from_llm_and_tools(llm=self.model,
tools=self.tools),
tools=self.tools,
max_iterations=99,
verbose=True,
memory=ConversationBufferMemory(
memory_key="chat_history", return_messages=True
),
name=self.name
)
message = AIMessage(
content=agent_chain.run(
input="\n".join(
[self.system_message.content] + self.message_history + [self.prefix]
)
)
)
return message.content
class UserAgent(DialogueAgent):
def send(self) -> str:
message = input(f"\n{self.prefix}")
return message | [
"You are a planner who is an expert at coming up with a todo list for a given objective. Come up with a todo list for this objective: {objective}",
"\n"
] |
2024-01-10 | jbpayton/llm-auto-forge | ExampleOutput~ExampleAgentTools~DirectoryCreator.py | import os
from langchain.tools import tool
@tool("create_directory", return_direct=False)
def create_directory(dir_path: str) -> str:
"""This tool creates a directory."""
try:
# Create the directory
os.makedirs(dir_path, exist_ok=True)
return 'Finished running tool.'
except:
# If there is an error, print the error to the console.
return 'Error: ' + str(sys.exc_info()) | [] |
2024-01-10 | jbpayton/llm-auto-forge | tools~ToolRegistry.py | from langchain.schema import Document
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
import importlib.util
import os
import re
import util
class ToolRegistry:
_instance = None
_tool_registry = {}
_tool_data_store = None
def __new__(cls):
if cls._instance is None:
cls._instance = super(ToolRegistry, cls).__new__(cls)
return cls._instance
def set_tools(self, agent_name, tools):
self._tool_registry[agent_name] = tools
def get_tools(self, agent_name):
return self._tool_registry.get(agent_name, [])
def add_tool(self, agent_name, tool):
if agent_name not in self._tool_registry:
self._tool_registry[agent_name] = []
self._tool_registry[agent_name].append(tool)
def query_unregistered_tools_by_description(self, description, agent_name, tool_path="./AgentTools"):
if self._tool_data_store is None:
self._tool_data_store = ToolDataStore(tool_path)
registered_tools = self._tool_registry.get(agent_name, [])
registed_tool_names = [tool.name for tool in registered_tools]
tools = self._tool_data_store.query_description(description)
unregistered_tools = str(["'Description':" + tool.page_content + "," + str(tool.metadata) for tool in tools if
tool.metadata["tool_name"] not in registed_tool_names])
return unregistered_tools
def register_tool(tool_function, tool_filename, agent_name, tool_path="./AgentTools"):
# Parse the module name from the filename
module_name = os.path.splitext(os.path.basename(tool_filename))[0]
# Load the module
spec = importlib.util.spec_from_file_location(module_name, tool_path + "/" + tool_filename)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
# Get a reference to the function
loaded_tool = getattr(module, tool_function)
# register tool to agent
ToolRegistry().add_tool(agent_name, loaded_tool)
# return the description of the tool
output = loaded_tool.description
return output
def query_unregistered_tools(description, agent_name, tool_path="./AgentTools"):
unregistered_tools = ToolRegistry().query_unregistered_tools_by_description(description, agent_name, tool_path)
tool_message = "The following tools may be helpful, they have not yet been registered and can be registered with " \
"the tool_registration_tool:\n" + unregistered_tools
return tool_message
class ToolDataStore:
def __init__(self, tool_path="./AgentTools"):
# Get all the files in the directory
files = os.listdir(tool_path)
# initialize the tool list
tool_list = []
# Load the tools
for file in files:
if file.endswith(".py") and file != "__init__.py" and file != "ToolTemplate.py":
tool_details = ToolDataStore.parse_py_file(tool_path, file)
tool_doc = Document(page_content=tool_details["description"],
metadata={"tool_name": tool_details["tool_name"],
"tool_filename": tool_details["tool_filename"]})
tool_list.append(tool_doc)
embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
self.vectorstore = Chroma.from_documents(tool_list, embeddings)
@staticmethod
def parse_py_file(tool_path, filename):
with open(os.path.join(tool_path, filename), 'r') as f:
content = f.read()
# Extracting function decorated with @tool
tool_match = re.search(r'@tool\("([^"]+)"', content)
if not tool_match:
raise ValueError("Function is not decorated with @tool")
tool_function = tool_match.group(1)
# Extracting docstring
docstring_match = re.search(r'"""\s*([\s\S]*?)\s*"""', content)
if not docstring_match:
raise ValueError("Function does not have a description")
function_description = docstring_match.group(1).strip()
return {
"tool_filename": filename,
"description": function_description,
"tool_name": tool_function
}
def query_description(self, query):
return self.vectorstore.similarity_search(query)
if (__name__ == "__main__"):
register_tool("image_downloader", "ImageDownloader.py", "agent1", tool_path="../AgentTools")
print(query_unregistered_tools("Make dank memes", "agent1", tool_path="../AgentTools"))
| [] |
2024-01-10 | jbpayton/llm-auto-forge | ExampleOutput~ExampleAgentTools~ImageRecognitionAndCaptioning.py | import sys
from PIL import Image, ImageDraw
from transformers import pipeline
from langchain.tools import tool
@tool("image_recognition_and_captioning", return_direct=False)
def image_recognition_and_captioning(image_path: str, output_path: str) -> str:
"""This tool recognizes the content of an image and adds a caption to it."""
try:
# Create a pipeline for image recognition
image_recognition = pipeline('image-classification')
# Open the image file
img = Image.open(image_path)
# Perform image recognition
result = image_recognition(img)
# Get the label of the image
label = result[0]['label']
# Create a draw object
draw = ImageDraw.Draw(img)
# Add the caption to the image
draw.text((10, 10), label, fill='white')
# Save the captioned image
img.save(output_path)
return 'Finished running tool.'
except:
# If there is an error, print the error to the console.
return 'Error: ' + str(sys.exc_info()) | [] |
2024-01-10 | jbpayton/llm-auto-forge | tools~LLMBrowsingTools.py | import re
import requests
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.tools import tool
from langchain.vectorstores import Chroma
import markdownify
class WebScrapingCache:
_instance = None
_initialised = False
def __new__(cls):
if cls._instance is None:
cls._instance = super(WebScrapingCache, cls).__new__(cls)
return cls._instance
def __init__(self):
if WebScrapingCache._initialised:
return
WebScrapingCache._initialised = True
self._embeddings = None
self._vector_store = None
self._url_list = []
def add_documents(self, docs):
if self._embeddings is None:
self._embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
if self._vector_store is None:
self._vector_store = Chroma.from_documents(docs, self._embeddings)
else:
self._vector_store.add_documents(docs)
def query_website(self, url: str, query: str, keep_links: bool = False):
self.scrape_website(url, keep_links=keep_links)
filter_dict = dict()
filter_dict["url"] = url
results = self._vector_store.max_marginal_relevance_search(query, 3, filter=filter_dict)
return results
def paged_read(self, url: str, page: int, keep_links: bool = False):
docs = self.scrape_website(url, keep_links=keep_links, chunk_size=2000, chunk_overlap=0, cache=False)
if docs is None:
return "Error scraping website"
if page > len(docs):
return "Page not found"
return str(docs[page]) + "\n\n" + f" = Page {page} of {len(docs)-1}"
def scrape_website(self, url: str, keep_links=False, chunk_size=1024, chunk_overlap=128, cache=True):
link_suffix = "(Keep links)" if keep_links else ""
if url + link_suffix in self._url_list and cache:
print("Site in cache, skipping...")
return
print("Scraping website...")
# Make the request
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0'}
response = requests.get(url, headers=headers)
# Check the response status code
if response.status_code == 200:
if keep_links:
tags_to_strip = []
else:
tags_to_strip = ['a']
# Remove script and style tags (and meta tags)
stripped_text = re.sub(r'<script.*?</script>', '', str(response.content))
stripped_text = re.sub(r'<style.*?</style>', '', str(stripped_text))
stripped_text = re.sub(r'<meta.*?</meta>', '', str(stripped_text))
text = markdownify.markdownify(stripped_text, strip=tags_to_strip)
# Removing \n and \t
text = re.sub(r'\\n|\\t', '', text)
# Removing emoji sequences (unicode escape sequences)
text = re.sub(r'\\x[0-9a-f]{2}', '', text)
# split the text into chunks
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size,
chunk_overlap=chunk_overlap)
docs = text_splitter.create_documents([text], metadatas=[{"url": url}])
if cache:
self.add_documents(docs)
self._url_list.append(url + link_suffix)
return docs
else:
print(f"HTTP request failed with status code {response.status_code}")
return f"HTTP request failed with status code {response.status_code}"
@tool("query_website", return_direct=False)
def query_website(website_url: str, query: str, keep_links: bool = False) -> str:
"""useful when you need to get data from a website url, passing both url and the query to the function; DO NOT
make up any url, the url should only be from the search results. Links can be enabled or disabled as needed. """
return str(WebScrapingCache().query_website(website_url, query, keep_links=keep_links))
@tool("paged_web_browser", return_direct=False)
def paged_web_browser(website_url: str, page: int) -> str:
"""useful when you need to read data from a website without overflowing context, passing both url and the page number (zero indexed) to the function; DO NOT
make up any url, the url should only be from the search results. Links can be enabled or disabled as needed. """
return str(WebScrapingCache().paged_read( website_url, page))
if __name__ == "__main__":
query = "What does Rachel Alucard look like?"
print(query)
results = WebScrapingCache().query_website('https://blazblue.fandom.com/wiki/Rachel_Alucard', query)
print(str(results))
query = "Rachel Alucard and bell peppers?"
print(query)
results = WebScrapingCache().query_website('https://blazblue.fandom.com/wiki/Rachel_Alucard', query)
print(str(results))
doc = WebScrapingCache().paged_read('https://www.deeplearning.ai/resources/natural-language-processing/', 5)
print(doc)
| [] |
Subsets and Splits