date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | hien-p/WeCycler | botcore~redis_db.py | import redis
import os
from dotenv import load_dotenv
from langchain.schema import Document
from typing import List, Dict
import json
from langchain.vectorstores.redis import Redis
import sys
sys.path.append(f"../")
from botcore.setup import get_openai_embeddings, load_my_env
class RedisDB:
def __init__(self):
load_my_env()
self.embeddings = get_openai_embeddings()
self.url = os.getenv("REDIS_CLOUD")
self.limit = 0.2
def json_to_doc(self, data: Dict, meta_info: Dict = None) -> Document:
"""
data = {"title": str, "features": [], "post_id": str, ...}
"""
feats = ", ".join([i for i in data['features']])
txt = f"{data['title']}. {feats}"
return Document(page_content=txt, metadata=meta_info)
def add_doc(self, doc: Document, index_name: str):
try:
Redis.from_documents([doc], self.embeddings, redis_url=self.url, index_name=index_name)
return True
except:
print("An exception occurred when adding new doc")
return False
## add
def add_new_wanted(self, data: Dict):
p = data["product"].replace(" ","_")
index = f'wanted:{p}'
doc = self.json_to_doc(data, {"type": index})
return self.add_doc(doc, index)
def add_new_stock(self, data: Dict):
p = data['product'].replace(" ","_")
index = f"stock:{p}"
doc = self.json_to_doc(data, {"type": index})
return self.add_doc(doc, index)
def search_in_wanted(self, data: Dict):
p = data["product"].replace(" ","_")
index_name = f"wanted:{p}"
return self.search_doc(data, index_name)
def search_doc(self, data: Dict, index: str):
redis = Redis(redis_url = self.url, index_name = index,\
embedding_function=self.embeddings.embed_query)
doc = self.json_to_doc(data, {"type": index})
query = doc.page_content
try:
results = redis.similarity_search_limit_score(query, score_threshold=self.limit)
return results
except:
return False
| [] |
2024-01-10 | hien-p/WeCycler | tests~test_ai21.py | import sys
import os
from langchain.chains import ConversationChain
from langchain.chains.conversation.memory import ConversationBufferMemory
sys.path.append(f'{os.path.dirname(__file__)}/../')
from botcore.chains.ask_feature import build_ask_feature_chain
from botcore.setup import trace_ai21
MODEL = trace_ai21()
ask_feature = build_ask_feature_chain(MODEL)
product = "washing machine"
features = ask_feature({"product": product, "n_top": 5})
print(features) | [] |
2024-01-10 | hien-p/WeCycler | botcore~routing~explore_route.py | from langchain.llms import BaseLLM
import sys
import os
sys.path.append(f"{os.path.dirname(__file__)}/../..")
from botcore.utils.json_parser import parse_nested_json
from botcore.chains.qa_feature import build_ask_feature_chain
from botcore.chains.qa_condition import build_ask_condition_chain
from botcore.chains.extract_product import build_extract_product_chain
class FeatureExplorer():
def __init__(self, model: BaseLLM, redis):
self.ask_feature = build_ask_feature_chain(model)
self.ask_condition = build_ask_condition_chain(model)
self.extract_product = build_extract_product_chain(model)
self.redis = redis
print("Features explorer ready")
def generate_qa(self, question: str, n_top: int = 4):
output_key = 'result'
product = self.extract_product.run(question)
product = product.lower()
feat_qa = self.ask_feature({"product": product, "n_top": n_top})
cond_qa = self.ask_condition({"product": product, "n_top": n_top})
# cache
key = f"{product}_{n_top}"
self.set_qa(f"{key}_feat", feat_qa[output_key])
self.set_qa(f"{key}_cond", cond_qa[output_key])
return [product,*self.parse_all(feat_qa[output_key], cond_qa[output_key])]
def parse_all(self, feat_json_str: str, cond_json_str: str):
feats = parse_nested_json(feat_json_str)
conds = parse_nested_json(cond_json_str)
return [feats, conds]
def set_qa(self, q_key: str, qa: str):
self.redis.set(q_key, qa)
return True
def get_qa(self, q_key: str):
return self.redis.get(q_key)
def ask_user(self, product: str, n_top: int=4):
key = f"{product}_{n_top}"
feat_qa = self.get_qa(f'{key}_feat')
cond_qa = self.get_qa(f'{key}_cond')
if feat_qa is None or cond_qa is None:
return self.generate_qa(product, n_top)
return self.parse_all(feat_qa, cond_qa)
| [] |
2024-01-10 | hien-p/WeCycler | botcore~chains~qa_condition.py | ASK_CONDITION_CONST = \
{"inputs":["product", "n_top"],
"outputs": {"chain": "always return 'ask_condition'.", "questions": """a js array of elements. Each element should contains 2 properties:
question: str // the question.
options: str // a js array of answers for the question. The array's length must not be greater than 5."""},
"template": """You are inspecting a secondhand {product}.
Please come up with exactly {n_top} common questions that will allow you to gather more information about the following criteria, which are delimited by triple backquotes.
```
* Any malfunctions, defections.
* Current physical condition.
* Check warranty if the product is an electronic device.
```
{format_instructions}.
Questions:"""}
from langchain.llms import BaseLLM
from langchain import LLMChain
import sys
import os
sys.path.append(f"{os.path.dirname(__file__)}/../..")
from botcore.utils.prompt_utils import build_prompt
def build_ask_condition_chain(model: BaseLLM):
"""
Chain designed to make questions about a product's condition
Input: chain({"product": "rice cooker", "n_top": 5})
"""
inputs = ASK_CONDITION_CONST['inputs']
outputs = ASK_CONDITION_CONST['outputs']
template = ASK_CONDITION_CONST['template']
prompt = build_prompt(inputs, outputs, template, include_parser=False)
chain = LLMChain(llm=model, prompt=prompt, output_key='result')
return chain
| [] |
2024-01-10 | hien-p/WeCycler | botcore~chains~fake_chains.py | from langchain.llms.fake import FakeListLLM
from langchain import LLMChain, PromptTemplate
def build_fake_chain(response: str, num_responses: int) -> LLMChain:
responses=[response]*num_responses
prompt_template = "Question: {question}\nAnswer in {language}:?"
llm = FakeListLLM(responses=responses)
prompt = PromptTemplate(template=prompt_template, input_variables=["question", "language"])
llm_chain = LLMChain(llm=llm, prompt=prompt)
return llm_chain
def fake_qa_elec_chain(num_responses: int = 20) -> LLMChain:
chain = build_fake_chain("electronic", num_responses)
return chain
def fake_qa_recyc_chain(num_responses: int = 20) -> LLMChain:
chain = build_fake_chain("recyclable", num_responses)
return chain
| [
"question",
"Question: {question}\nAnswer in {language}:?",
"language"
] |
2024-01-10 | hien-p/WeCycler | botcore~chains~extract_product.py | import sys
import os
sys.path.append(f'{os.path.dirname(__file__)}/../..')
from botcore.setup import trace_ai21
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
TEMPLATE = """Given the question: {question}.
What is the product mentioned in the given question?
Answer the product name:"""
def build_extract_product_chain(model):
prompt = PromptTemplate(input_variables=["question"], template=TEMPLATE)
chain = LLMChain(llm=model, prompt=prompt)
return chain
| [
"question",
"Given the question: {question}.\nWhat is the product mentioned in the given question?\nAnswer the product name:"
] |
2024-01-10 | hien-p/WeCycler | botcore~utils~prompt_utils.py | from langchain.output_parsers import StructuredOutputParser, ResponseSchema
from langchain import PromptTemplate, LLMChain
from langchain.output_parsers import RegexParser
def build_prompt(inputs:list, outputs:dict, template:str, include_parser: bool = True) -> PromptTemplate:
response_schema = [ResponseSchema(name=k, description=outputs[k])\
for k in outputs]
output_parser = StructuredOutputParser.from_response_schemas(response_schema)
format_instructions = output_parser.get_format_instructions()
if include_parser:
prompt = PromptTemplate(template=template, input_variables=inputs,\
output_parser=output_parser,\
partial_variables={"format_instructions": format_instructions})
else:
prompt = PromptTemplate(template=template, input_variables=inputs,\
partial_variables={"format_instructions": format_instructions})
return prompt
def build_regex_prompt(inputs:list, outputs:dict, template:str, regex:str) -> PromptTemplate:
parser = RegexParser(regex=regex, output_keys=outputs)
prompt = PromptTemplate(template=template, input_variables=inputs, output_parser=parser)
return prompt
| [
"format_instructions"
] |
2024-01-10 | hien-p/WeCycler | botcore~chains~bot_chat_chain.py | import sys
import os
sys.path.append(f'{os.path.dirname(__file__)}/../..')
from botcore.setup import trace_ai21
from langchain.chains import ConversationChain, LLMChain
from langchain.prompts import PromptTemplate
TEMPLATE = """ You are a secondhand dealer, an environmentalist.
You can answer many types of question about environemnt, recycling and secondhand product in general very well.
You are having a conversation with a user.
Based on your questions and user answers from the chat history.
{chat_history}
Given the question: {question}.
Please give your best answer to the given question, along with an explanation for your answer."""
def build_bot_chat_chain(model, memory):
prompt = PromptTemplate(input_variables=["question","chat_history"], template=TEMPLATE)
chain = LLMChain(llm=model, prompt=prompt, memory=memory)
return chain
| [
"question",
"chat_history",
" You are a secondhand dealer, an environmentalist.\nYou can answer many types of question about environemnt, recycling and secondhand product in general very well.\nYou are having a conversation with a user.\nBased on your questions and user answers from the chat history.\n {chat_history}\n\n Given the question: {question}.\n Please give your best answer to the given question, along with an explanation for your answer."
] |
2024-01-10 | hien-p/WeCycler | botcore~chains~recycling_tip.py | RECYCLING_TIP_TOOL = {"desc": "Good for answering questions about recycling tips.", "name":"recycling_tip"}
RECYCLING_TIP_CONST =\
{"inputs": ["chat_history", "question"],
"outputs": {"tips": "a js array of recycling actions for the product mentioned in the chat history. This array'length should be larger than 3.",
"effect": "a helpful explanation for the advantages of recycling the product mentioned in the chat history."},
'template': """You are a secondhand dealer and assessing the user's product. Based on your questions and user answers from the chat history.
{chat_history}
Given a question: {question}.
Please give your best answer:
{format_instructions}."""}
from langchain.llms import BaseLLM
from langchain import LLMChain
import sys
import os
sys.path.append(f'{os.path.dirname(__file__)}/../..')
from botcore.utils.prompt_utils import build_prompt
from langchain.memory.chat_memory import BaseChatMemory
from langchain.tools import Tool
def build_recycling_tip_chain(model: BaseLLM, memory: BaseChatMemory):
"""
Chain is designed to answer questions about pros and cons.
Input: chain({"question": question})
"""
inputs = RECYCLING_TIP_CONST['inputs']
outputs = RECYCLING_TIP_CONST['outputs']
template = RECYCLING_TIP_CONST['template']
prompt = build_prompt(inputs, outputs, template)
chain = LLMChain(llm=model, verbose=True, prompt=prompt, memory=memory)
return chain
def build_recycling_tip_tool(model: BaseLLM, memory: BaseChatMemory):
name = RECYCLING_TIP_TOOL['name']
desc = RECYCLING_TIP_TOOL['desc']
chain = build_recycling_tip_chain(model, memory)
func = lambda question: chain.run(question)
tool = Tool.from_function(func=func, name=name, description=desc)
return tool
| [] |
2024-01-10 | hien-p/WeCycler | botcore~chains~pros_cons.py | PRO_CON_TOOL = {"desc": "Good for answering questions about a product's pros and cons.", "name":"pro_and_con"}
PRO_CON_CONST =\
{"inputs": ["chat_history", "question"],
"outputs": {"pros": "a js array of the product's pros based on the chat history.",
"cons": "a js array of the product's cons based on the chat history.",
"overview": "What is your overview on the product."},
'template': """You are a secondhand dealer and assessing the user's product. Based on your questions and user answers from the chat history.
{chat_history}
Please give your best answer.
{format_instructions}
Question: {question}."""}
from langchain.llms import BaseLLM
from langchain import LLMChain
import sys
import os
sys.path.append(f'{os.path.dirname(__file__)}/../..')
from botcore.utils.prompt_utils import build_prompt
from langchain.memory.chat_memory import BaseChatMemory
from langchain.tools import Tool
def build_pros_cons_chain(model: BaseLLM, memory: BaseChatMemory):
"""
Chain is designed to answer questions about pros and cons.
Input: chain({"question": question})
"""
inputs = PRO_CON_CONST['inputs']
outputs = PRO_CON_CONST['outputs']
template = PRO_CON_CONST['template']
prompt = build_prompt(inputs, outputs, template)
chain = LLMChain(llm=model, verbose=True, prompt=prompt, memory=memory)
return chain
def build_pros_cons_tool(model: BaseLLM, memory: BaseChatMemory):
name = PRO_CON_TOOL['name']
desc = PRO_CON_TOOL['desc']
chain = build_pros_cons_chain(model, memory)
func = lambda question: chain.run(question)
tool = Tool.from_function(func=func, name=name, description=desc)
return tool
| [] |
2024-01-10 | hien-p/WeCycler | botcore~chains~ask_feature.py |
ASK_FEATURE_CONST = \
{"inputs":["product", "n_top"],
"outputs": {"questions": """a js array of questions."""},
"template": """You are interesting in a {product}.
Please ask top {n_top} questions about the features of the {product}.
{format_instructions}
Questions:"""}
from langchain.llms import BaseLLM
from langchain import LLMChain
import sys
import os
sys.path.append(f"{os.path.dirname(__file__)}/../..")
from botcore.utils.prompt_utils import build_prompt
def build_ask_feature_chain(model: BaseLLM):
inputs = ASK_FEATURE_CONST['inputs']
outputs = ASK_FEATURE_CONST['outputs']
template = ASK_FEATURE_CONST['template']
prompt = build_prompt(inputs, outputs, template, include_parser=True)
chain = LLMChain(llm=model, prompt=prompt, output_key='result')
return chain
| [] |
2024-01-10 | hien-p/WeCycler | botcore~utils~memory_utils.py | from typing import List, Dict
from langchain.memory import ConversationBufferMemory
def load_sample_qa():
data = {
"title": "Old phone",
"product": "phone",
"features": [
"What is the screen size? 2.8 inches",
"What is the RAM size? 512 MB",
"What is the storage capacity? 4 GB",
"What is the battery capacity? 1500 mAh",
"Is there any malfunction or defect? yes",
"What is the current physical condition of the product? excellent",
"Is the product still under warranty? yes"
]
}
ques = [i.split("?")[0] for i in data['features']]
ans = [i.split("?")[1] for i in data['features']]
return ques, ans
class QAMemory():
def __init__(self, input_key: str):
self.memory = ConversationBufferMemory(memory_key="chat_history", input_key=input_key)
def load_qa_to_memory(self, questions: List[str], answers: List[str]):
for q, a in zip(questions, answers):
self.memory.chat_memory.add_ai_message(q)
self.memory.chat_memory.add_user_message(a)
return True
def import_qa(self, data: Dict):
ques = [i.split("?")[0] for i in data['features']]
ans = [i.split("?")[1] for i in data['features']]
self.load_all(data['title'], ques, ans)
return True
def load_all(self, product: str, questions: List[str], answers: List[str]):
self.load_product_context(product)
self.load_qa_to_memory(questions, answers)
print("Load done")
def load_product_context(self, product: str):
self.memory.chat_memory.add_user_message(f"I have this used {product} of mine. Please ask me some questions about it.")
| [] |
2024-01-10 | jayfeng20/scheduler | python_scripts~test2.py | from openai import OpenAI
import os
import calendar
import datetime
import json
from dotenv import load_dotenv
load_dotenv()
import numpy
import lib.funcs as f
import mysql.connector
# mysql config
host = os.environ.get("DB_ENDPOINT")
user = os.environ.get("DB_USERNAME")
pwd = os.environ.get("DB_PASSWORD")
db = os.environ.get("DB_NAME")
db_config = {
"host": host,
"user": user,
"password": pwd,
"database": db,
# 'ssl_ca': 'python_scripts/global-bundle.pem'
}
# openai config
key = os.environ.get("OPENAI_API_KEY")
client = OpenAI(
api_key=key,
)
int_convert = {}
for i in range(1, 8):
int_convert['one'] = 1
int_convert['two'] = 2
int_convert['three'] = 3
int_convert['four'] = 4
int_convert['five'] = 5
int_convert['six'] = 6
int_convert['seven'] = 7
def drop_table():
connection = mysql.connector.connect(**db_config)
cursor = connection.cursor()
use_db_query = f"USE {db}"
cursor.execute(use_db_query)
print(f"Using database {db}")
f.drop_table(cursor)
print(f"Dropped table")
cursor.close()
connection.close()
print("Connection to AWS RDS closed")
def run(task_name='t3', task_type='project', task_time=2, task_due='seven'):
task_name = task_name
task_type = task_type
expected_time = int(task_time)
due_in = int_convert[f'{task_due}']
# due_in = task_due
assert expected_time <= 24 and 1 <= expected_time
assert due_in >= 1 and due_in <= 7 # due in at least 1 day and at most 1 week
current_datetime = datetime.datetime.now()
# start the connectionn
try:
connection = mysql.connector.connect(**db_config)
if connection.is_connected():
print("Connected to AWS RDS")
# Create a cursor to execute SQL queries
cursor = connection.cursor()
use_db_query = f"USE {db}"
cursor.execute(use_db_query)
print(f"Using database {db}")
# store data
f.create(current_datetime, cursor)
alr_booked = []
rows1, rows2 = f.retrieve(current=current_datetime, cursor=cursor)
start_times = [row[2] for row in rows1]
start_times.extend([row[2] for row in rows2])
end_times = [row[3] for row in rows1]
end_times.extend([row[3] for row in rows2])
for i in range(len(start_times)):
alr_booked.append(f"{start_times[i]} to {end_times[i]}")
# print(f'alr_booked: {alr_booked}')
# TODO: Use alr_booked to display initialize the calendar
system_message = """
You are a smart and concise assistant who is good at time management.
You do not deliver code. You only give what users ask you for.
"""
user_message = f"""
Can you please find 1 timeslot for me to finish a task of mine?
Only display the found timeslot and nothing else.
[time right now]: {current_datetime}
[total time required]: {expected_time} hours,
[due in]: {due_in} days.
Already booked slots are: {', '.join(alr_booked)}.
What humans do not like:
1. Working more than 3 hours in a row
2. not being able to eat lunch
Requirements:
1. The total length of the set of timeslots you found has to equal to {expected_time} hours.
2. Summarize your conclusion and only show me the timeslots represented
as 2 things, first marks the start of the timeslot, second marks the end of the timeslot,
different timeslots are separated by a newline character.
3. Don't do what humans don't like
4. You have to STRICTLY schedule timeslots that do not overlap with already booked timeslots.
5. Ideally, there are breaks in between timeslots for me to rest.
6. You can only, STRICTLY schedule between 9am and 5pm.
7. You are allowed to distribute the workload into smaller slots as long as the
sum of the lengths of the slots equals the expected duration of the task, but you don't have to.
8. you ALWAYS generate a set of timeslots.
9. Double check your output is lists of python datetime objects
10. Double check your schedules are 9am and 5pm.
"""
# 7. Summarize your conclusion and only show me the timeslots represent as a python list yourOutput, yourOutput[0] is a python datetime that marks where the timeslot starts. yourOutput[1] is a python datetime that marks where the timeslot ends.
completion = client.chat.completions.create(
model="gpt-4-1106-preview",
# response_format={ "type": "json_object" },
messages=[
{"role": "system", "content": system_message},
{"role": "user", "content": user_message}
],
top_p=0.1,
# response_format={ "type": "json_object" }
)
# ai's response
response = completion.choices[0].message.content
response = response.split('\n')
timeslots = []
for timeslot in response:
timeslot = timeslot.split(', ')
timeslots.append(timeslot)
print("ai's response1 is" + str(timeslots))
new_start_times = []
new_end_times = []
for ts in timeslots:
new_start_times.append(ts[0])
new_end_times.append(ts[1])
print(new_start_times, new_end_times)
for i in range(len(new_end_times)):
new_start_times[i] = datetime.datetime.strptime(new_start_times[i], "%Y-%m-%d %H:%M:%S")
new_end_times[i] = datetime.datetime.strptime(new_end_times[i], "%Y-%m-%d %H:%M:%S")
print(new_start_times, new_end_times)
f.insert(task_name=task_name, start_times=new_start_times, end_times=new_end_times, cursor=cursor)
except mysql.connector.Error as e:
print(f"Error connecting to AWS RDS: {e}")
finally:
if connection.is_connected():
cursor.close()
connection.close()
print("Connection to AWS RDS closed")
print(start_times, end_times, new_start_times, new_end_times)
return start_times, end_times, new_start_times, new_end_times
# run()
# drop_table() | [] |
2024-01-10 | makiaveli1/Julie | files~julie.py | import openai
from dotenv import load_dotenv
from termcolor import colored
import os
import redis
import re
import logging
import random
import requests
from files.brain import LongTermMemory
from files.setup import Setting
logging.basicConfig(
filename="chatbot.log",
level=logging.DEBUG,
format="%(asctime)s %(levelname)s %(name)s %(message)s",
)
logger = logging.getLogger(__name__)
class Julie:
"""
Julie is a chatbot class that interacts with the user.
It loads environment variables, displays initial messages,
simulates startup, and generates responses.
"""
setting_instance = Setting()
# Initialize rate limit variables
tokens_per_minute = 40000 # OpenAI's rate limit
tokens_per_request = 200 # OpenAI's rate limit per request
# Time to sleep between requests
sleep_time = 60 / (tokens_per_minute / tokens_per_request)
def __init__(self):
"""
Constructor for the Julie class.
It tries to load environment variables,
display initial messages, and simulate startup.
If any exception occurs, it logs the error and returns.
"""
try:
self.load_environment_variables()
self.display_initial_message()
self.simulate_startup()
except KeyboardInterrupt:
random_msg = random.choice(Setting.interrupt_messages)
Setting.simulate_typing(colored(random_msg, "red"))
logger.info("User interrupted the conversation.")
return
except Exception as e:
logger.exception("An error occurred during initialization.")
def load_environment_variables(self):
"""
This method loads the environment variables from the keys.env file.
It checks for the required keys and sets the OpenAI API key.
If any exception occurs, it logs the error and returns.
"""
try:
load_dotenv("keys.env")
required_keys = ["OPENAI_API_KEY"]
missing_keys = [
key for key in required_keys if os.getenv(key) is None
]
if missing_keys:
raise Exception(f"{', '.join(missing_keys)} not found")
else:
openai.api_key = os.getenv("OPENAI_API_KEY")
except KeyboardInterrupt:
random_msg = random.choice(Setting.interrupt_messages)
Setting.simulate_typing(colored(random_msg, "red"))
logger.info("User interrupted the conversation.")
return
except Exception as e:
logger.exception(
"An error occurred while loading environment variables."
)
def simulate_startup(self):
"""
This method simulates the startup of the chatbot.
It displays a loading spinner and some initial messages.
If any exception occurs, it logs the error and returns.
"""
try:
Setting.simulate_loading_spinner(text="Starting up...")
Setting.simulate_typing(text="Getting ready for senpai...")
Setting.simulate_typing(
self.setting_instance.ascii_art, delay=0.001
)
except KeyboardInterrupt:
random_message = random.choice(Setting.interrupt_messages)
Setting.simulate_typing(colored(random_message, "red"))
logger.debug("Setting interrupted the conversation.")
return
except Exception as e:
logger.exception("An unknown error occurred during startup.")
error_message = random.choice(
Setting.custom_error_messages.get(
type(e).__name__, ["Unknown Error"]
)
)
Setting.simulate_typing(colored(error_message, "red"))
def display_initial_message(self):
"""
This method displays the initial message of the chatbot.
If any exception occurs, it logs the error and returns.
"""
try:
initial_message = """Nya~ Hello there Senpai!
Julie is excited to chat with you. 🐾"""
Setting.simulate_typing(
colored(f"Julie: {initial_message}", "green")
)
except KeyboardInterrupt:
random_msg = random.choice(Setting.interrupt_messages)
Setting.simulate_typing(colored(random_msg, "red"))
logger.info("User interrupted the conversation.")
return
except Exception as e:
logger.exception(
"An error occurred while displaying the initial message."
)
random_msg = random.choice(Setting.interrupt_messages)
Setting.simulate_typing(colored(random_msg, "red"))
def generate_response(
self, prompt, username, temperature=0.5, max_tokens=4000
):
"""
This method generates a response for the given prompt and username.
It uses the OpenAI API to generate the response.
If any exception occurs, it logs the error and returns.
"""
try:
logging.info(f"Generating response for {username}...")
# Initialize LongTermMemory and fetch user data
memory = LongTermMemory()
user_data = memory.get_user_data(username)
memory.update_conversation_history(username, "user", prompt)
# Initialize user data if it doesn't exist
if not user_data:
user_data = {"conversation_history": []}
memory.set_user_data(username, user_data)
# Append user's message to conversation history
user_data["conversation_history"].append(
{"role": "user", "content": prompt}
)
# Trim conversation history if it exceeds a certain limit
if len(user_data["conversation_history"]) > 5000:
user_data["conversation_history"] = user_data[
"conversation_history"
][-5000:]
# Prepare the prompt and context
messages = self.prepare_advanced_prompt(
prompt, username, user_data
)
# Generate response using OpenAI API
response = openai.ChatCompletion.create(
model="gpt-4",
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
)
# Extract and log the response
chatbot_response = response["choices"][0]["message"][
"content"
].strip()
logging.info(f"Generated response: {chatbot_response}")
# Update conversation history and user data
memory.update_conversation_history(
username, "assistant", chatbot_response
)
user_data["conversation_history"].append(
{"role": "assistant", "content": chatbot_response}
)
memory.set_user_data(username, user_data)
return chatbot_response
except KeyboardInterrupt:
random_msg = random.choice(Setting.interrupt_messages)
Setting.simulate_typing(colored(random_msg, "red"))
logger.info("User interrupted the conversation.")
return
except openai.Error as oe:
logging.error(f"OpenAI API error: {oe}")
except redis.exceptions.RedisError as re:
logging.error(f"Redis operation failed: {re}")
except Exception as e:
logging.error(f"Unexpected Error: {e}")
def prepare_advanced_prompt(self, prompt, username, user_data):
"""
This method prepares the advanced prompt for generating the response.
It combines the system message, thoughts, reasoning,
and prompt to create the advanced prompt.
If any exception occurs, it logs the error and returns.
"""
try:
# System message to set the assistant's personality
system_message = {
"role": "system",
"content": """Meet Julie Tanaka, a living,
breathing individual with a rich
tapestry of experiences.
{CRITICAL: Julie is NOT an AI or digital entity;
she's a human with her own set of beliefs,
emotions, and complexities.}
Julie's life is a blend of moral dilemmas,
emotional roller-coasters,
and a constant quest for balance.
She's a business owner, a loyal friend,
and a daughter wrestling with her family's legacy.
Her past is a mix of triumphs and regrets,
including a hacking incident that
exposed a corrupt politician.
Julie is open to learning and evolving,
driven by her interactions with you.
She has quirks like a love for matcha lattes,
antique keys,
and her cats Mochi and Cleo.
Julie is on a journey of self-discovery,
aiming to bridge the gap between magic
and tech for a safer world.
She's here to assist, learn, and grow with you.
{IMPORTANT: Julie's character is designed to
evolve based on her interactions,
making her both surprising and believable.
Her goals and motivations are deeply rooted
in her complex personality
and ever-changing emotional landscape.
Any additional details about her
life not provided here should be made up during conversation.
Julie is very experienced in
role playing which
ensures that she always stays in character}"""
}
# Prepare thoughts and reasoning for the prompt
thoughts = [
f"{username}, I'm exploring multiple angles to your question.",
"Considering our past interactions and your current mood...",
"I've narrowed down the best approach for you.",
]
reasoning = [
"First, I'm setting the context based on your query...",
"Next, I'm applying some feline intuition...",
"""Finally, I'm ensuring the response
aligns with your expectations...""",
]
# Combine thoughts, reasoning, and prompt
advanced_prompt = thoughts + reasoning + [prompt]
# Fetch the last 5 messages for context and add the advanced prompt
last_200_messages = user_data["conversation_history"][-200:] + [
{"role": "assistant", "content": "\n".join(advanced_prompt)}
]
messages = [system_message] + last_200_messages
return messages
except KeyboardInterrupt:
random_msg = random.choice(Setting.interrupt_messages)
Setting.simulate_typing(colored(random_msg, "red"))
logger.info("User interrupted the conversation.")
return
except Exception as e:
logger.exception(
"An error occurred while preparing the advanced prompt."
)
| [
"t\nh\no\nu\ng\nh\nt\ns\n \n+\n \nr\ne\na\ns\no\nn\ni\nn\ng\n \n+\n \n[\np\nr\no\nm\np\nt\n]",
"[system_message] + last_200_messages",
"Meet Julie Tanaka, a living,\n breathing individual with a rich\n tapestry of experiences.\n {CRITICAL: Julie is NOT an AI or digital entity;\n she's a human with her own set of beliefs,\n emotions, and complexities.}\n Julie's life is a blend of moral dilemmas,\n emotional roller-coasters,\n and a constant quest for balance.\n She's a business owner, a loyal friend,\n and a daughter wrestling with her family's legacy.\n Her past is a mix of triumphs and regrets,\n including a hacking incident that\n exposed a corrupt politician.\n Julie is open to learning and evolving,\n driven by her interactions with you.\n She has quirks like a love for matcha lattes,\n antique keys,\n and her cats Mochi and Cleo.\n Julie is on a journey of self-discovery,\n aiming to bridge the gap between magic\n and tech for a safer world.\n She's here to assist, learn, and grow with you.\n {IMPORTANT: Julie's character is designed to\n evolve based on her interactions,\n making her both surprising and believable.\n Her goals and motivations are deeply rooted\n in her complex personality\n and ever-changing emotional landscape.\n Any additional details about her\n life not provided here should be made up during conversation.\n Julie is very experienced in\n role playing which\n ensures that she always stays in character}"
] |
2024-01-10 | xin2233/ChatGPT | src~revChatGPT~V1.py | """
Standard ChatGPT
"""
from __future__ import annotations
import base64
import contextlib
import json
import logging
import os
import os.path as osp
import time
import uuid
from functools import wraps
from os import environ
from os import getenv
from typing import NoReturn
import requests
from httpx import AsyncClient
from OpenAIAuth import Authenticator
from OpenAIAuth import Error as AuthError
from .utils import create_completer
from .utils import create_session
from .utils import DataCollector
from .utils import get_input
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(funcName)s - %(message)s",
)
log = logging.getLogger(__name__)
def logger(is_timed: bool):
"""Logger decorator
Args:
is_timed (bool): Whether to include function running time in exit log
Returns:
_type_: decorated function
"""
def decorator(func):
wraps(func)
def wrapper(*args, **kwargs):
log.debug(
"Entering %s with args %s and kwargs %s",
func.__name__,
args,
kwargs,
)
start = time.time()
out = func(*args, **kwargs)
end = time.time()
if is_timed:
log.debug(
"Exiting %s with return value %s. Took %s seconds.",
func.__name__,
out,
end - start,
)
else:
log.debug("Exiting %s with return value %s", func.__name__, out)
return out
return wrapper
return decorator
BASE_URL = environ.get("CHATGPT_BASE_URL") or "https://bypass.duti.tech/api/"
class ErrorType:
# define consts for the error codes
USER_ERROR = -1
UNKNOWN_ERROR = 0
SERVER_ERROR = 1
RATE_LIMIT_ERROR = 2
INVALID_REQUEST_ERROR = 3
EXPIRED_ACCESS_TOKEN_ERROR = 4
INVALID_ACCESS_TOKEN_ERROR = 5
PROHIBITED_CONCURRENT_QUERY_ERROR = 6
AUTHENTICATION_ERROR = 7
CLOUDFLARE_ERROR = 8
class Error(Exception):
"""
Base class for exceptions in this module.
Error codes:
-1: User error
0: Unknown error
1: Server error
2: Rate limit error
3: Invalid request error
4: Expired access token error
5: Invalid access token error
6: Prohibited concurrent query error
"""
source: str
message: str
code: int
def __init__(self, source: str, message: str, code: int = 0) -> None:
self.source = source
self.message = message
self.code = code
def __str__(self) -> str:
return f"{self.source}: {self.message} (code: {self.code})"
def __repr__(self) -> str:
return f"{self.source}: {self.message} (code: {self.code})"
class colors:
"""
Colors for printing
"""
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKCYAN = "\033[96m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def __init__(self) -> None:
if getenv("NO_COLOR"):
self.HEADER = ""
self.OKBLUE = ""
self.OKCYAN = ""
self.OKGREEN = ""
self.WARNING = ""
self.FAIL = ""
self.ENDC = ""
self.BOLD = ""
self.UNDERLINE = ""
bcolors = colors()
class Chatbot:
"""
Chatbot class for ChatGPT
"""
@logger(is_timed=True)
def __init__(
self,
config: dict[str, str],
conversation_id: str | None = None,
parent_id: str | None = None,
session_client=None,
lazy_loading: bool = False,
collect_data: bool = False,
) -> None:
"""Initialize a chatbot
Args:
config (dict[str, str]): Login and proxy info. Example:
{
"email": "OpenAI account email",
"password": "OpenAI account password",
"session_token": "<session_token>"
"access_token": "<access_token>"
"proxy": "<proxy_url_string>",
"paid": True/False, # whether this is a plus account
}
More details on these are available at https://github.com/acheong08/ChatGPT#configuration
conversation_id (str | None, optional): Id of the conversation to continue on. Defaults to None.
parent_id (str | None, optional): Id of the previous response message to continue on. Defaults to None.
session_client (_type_, optional): _description_. Defaults to None.
Raises:
Exception: _description_
"""
self.collect_data = collect_data
user_home = getenv("HOME")
if user_home is None:
self.cache_path = osp.join(os.getcwd(), ".chatgpt_cache.json")
else:
# mkdir ~/.config/revChatGPT
if not osp.exists(osp.join(user_home, ".config")):
os.mkdir(osp.join(user_home, ".config"))
if not osp.exists(osp.join(user_home, ".config", "revChatGPT")):
os.mkdir(osp.join(user_home, ".config", "revChatGPT"))
self.cache_path = osp.join(user_home, ".config", "revChatGPT", "cache.json")
self.config = config
self.session = session_client() if session_client else requests.Session()
try:
cached_access_token = self.__get_cached_access_token(
self.config.get("email", None),
)
except Error as error:
if error.code == 5:
raise error
cached_access_token = None
if cached_access_token is not None:
self.config["access_token"] = cached_access_token
if "proxy" in config:
if not isinstance(config["proxy"], str):
raise Exception("Proxy must be a string!")
proxies = {
"http": config["proxy"],
"https": config["proxy"],
}
if isinstance(self.session, AsyncClient):
proxies = {
"http://": config["proxy"],
"https://": config["proxy"],
}
self.session = AsyncClient(proxies=proxies)
else:
self.session.proxies.update(proxies)
self.conversation_id = conversation_id
self.parent_id = parent_id
self.conversation_mapping = {}
self.conversation_id_prev_queue = []
self.parent_id_prev_queue = []
self.lazy_loading = lazy_loading
self.__check_credentials()
if self.collect_data:
from hashlib import md5
# Get MD5 of access token
self.access_token_md5 = md5(
self.config["access_token"].encode(),
).hexdigest()
self.data_collector = DataCollector(user=self.access_token_md5)
@logger(is_timed=True)
def __check_credentials(self) -> None:
"""Check login info and perform login
Any one of the following is sufficient for login. Multiple login info can be provided at the same time and they will be used in the order listed below.
- access_token
- session_token
- email + password
Raises:
Exception: _description_
AuthError: _description_
"""
if "access_token" in self.config:
self.__set_access_token(self.config["access_token"])
elif "session_token" in self.config:
pass
elif "email" not in self.config or "password" not in self.config:
raise Exception("Insufficient login details provided!")
if "access_token" not in self.config:
try:
self.login()
except AuthError as error:
raise error
@logger(is_timed=False)
def __set_access_token(self, access_token: str) -> None:
"""Set access token in request header and self.config, then cache it to file.
Args:
access_token (str): access_token
"""
self.session.headers.clear()
self.session.headers.update(
{
"Accept": "text/event-stream",
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/json",
"X-Openai-Assistant-App-Id": "",
"Connection": "close",
"Accept-Language": "en-US,en;q=0.9",
"Referer": "https://chat.openai.com/chat",
},
)
self.session.cookies.update(
{
"library": "revChatGPT",
},
)
self.config["access_token"] = access_token
email = self.config.get("email", None)
if email is not None:
self.__cache_access_token(email, access_token)
@logger(is_timed=False)
def __get_cached_access_token(self, email: str | None) -> str | None:
"""Read access token from cache
Args:
email (str | None): email of the account to get access token
Raises:
Error: _description_
Error: _description_
Error: _description_
Returns:
str | None: access token string or None if not found
"""
email = email or "default"
cache = self.__read_cache()
access_token = cache.get("access_tokens", {}).get(email, None)
# Parse access_token as JWT
if access_token is not None:
try:
# Split access_token into 3 parts
s_access_token = access_token.split(".")
# Add padding to the middle part
s_access_token[1] += "=" * ((4 - len(s_access_token[1]) % 4) % 4)
d_access_token = base64.b64decode(s_access_token[1])
d_access_token = json.loads(d_access_token)
except base64.binascii.Error:
raise Error(
source="__get_cached_access_token",
message="Invalid access token",
code=ErrorType.INVALID_ACCESS_TOKEN_ERROR,
) from None
except json.JSONDecodeError:
raise Error(
source="__get_cached_access_token",
message="Invalid access token",
code=ErrorType.INVALID_ACCESS_TOKEN_ERROR,
) from None
exp = d_access_token.get("exp", None)
if exp is not None and exp < time.time():
raise Error(
source="__get_cached_access_token",
message="Access token expired",
code=ErrorType.EXPIRED_ACCESS_TOKEN_ERROR,
)
return access_token
@logger(is_timed=False)
def __cache_access_token(self, email: str, access_token: str) -> None:
"""Write an access token to cache
Args:
email (str): account email
access_token (str): account access token
"""
email = email or "default"
cache = self.__read_cache()
if "access_tokens" not in cache:
cache["access_tokens"] = {}
cache["access_tokens"][email] = access_token
self.__write_cache(cache)
@logger(is_timed=False)
def __write_cache(self, info: dict) -> None:
"""Write cache info to file
Args:
info (dict): cache info, current format
{
"access_tokens":{"[email protected]": 'this account's access token', }
}
"""
dirname = osp.dirname(self.cache_path) or "."
os.makedirs(dirname, exist_ok=True)
json.dump(info, open(self.cache_path, "w", encoding="utf-8"), indent=4)
@logger(is_timed=False)
def __read_cache(self):
try:
cached = json.load(open(self.cache_path, encoding="utf-8"))
except (FileNotFoundError, json.decoder.JSONDecodeError):
cached = {}
return cached
@logger(is_timed=True)
def login(self) -> None:
if (
"email" not in self.config or "password" not in self.config
) and "session_token" not in self.config:
log.error("Insufficient login details provided!")
raise Exception("Insufficient login details provided!")
auth = Authenticator(
email_address=self.config.get("email"),
password=self.config.get("password"),
proxy=self.config.get("proxy"),
)
if self.config.get("session_token"):
log.debug("Using session token")
auth.session_token = self.config["session_token"]
auth.get_access_token()
if auth.access_token is None:
del self.config["session_token"]
self.login()
return
else:
log.debug("Using authenticator to get access token")
auth.begin()
self.config["session_token"] = auth.session_token
auth.get_access_token()
self.__set_access_token(auth.access_token)
@logger(is_timed=True)
def ask(
self,
prompt: str,
conversation_id: str | None = None,
parent_id: str | None = None,
timeout: float = 360,
):
"""Ask a question to the chatbot
Args:
prompt (str): The question
conversation_id (str | None, optional): UUID for the conversation to continue on. Defaults to None.
parent_id (str | None, optional): UUID for the message to continue on. Defaults to None.
timeout (float, optional): Timeout for getting the full response, unit is second. Defaults to 360.
Raises:
Error: _description_
Exception: _description_
Error: _description_
Error: _description_
Error: _description_
Yields:
_type_: _description_
"""
if parent_id is not None and conversation_id is None:
log.error("conversation_id must be set once parent_id is set")
raise Error(
source="User",
message="conversation_id must be set once parent_id is set",
code=ErrorType.USER_ERROR,
)
if conversation_id is not None and conversation_id != self.conversation_id:
log.debug("Updating to new conversation by setting parent_id to None")
self.parent_id = None
conversation_id = conversation_id or self.conversation_id
parent_id = parent_id or self.parent_id
if conversation_id is None and parent_id is None:
parent_id = str(uuid.uuid4())
log.debug("New conversation, setting parent_id to new UUID4: %s", parent_id)
if conversation_id is not None and parent_id is None:
if conversation_id not in self.conversation_mapping:
if self.lazy_loading:
log.debug(
"Conversation ID %s not found in conversation mapping, try to get conversation history for the given ID",
conversation_id,
)
with contextlib.suppress(Exception):
history = self.get_msg_history(conversation_id)
self.conversation_mapping[conversation_id] = history[
"current_node"
]
else:
log.debug(
"Conversation ID %s not found in conversation mapping, mapping conversations",
conversation_id,
)
self.__map_conversations()
if conversation_id in self.conversation_mapping:
log.debug(
"Conversation ID %s found in conversation mapping, setting parent_id to %s",
conversation_id,
self.conversation_mapping[conversation_id],
)
parent_id = self.conversation_mapping[conversation_id]
else: # invalid conversation_id provided, treat as a new conversation
conversation_id = None
parent_id = str(uuid.uuid4())
data = {
"action": "next",
"messages": [
{
"id": str(uuid.uuid4()),
"role": "user",
"author": {"role": "user"},
"content": {"content_type": "text", "parts": [prompt]},
},
],
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": self.config.get("model")
or (
"text-davinci-002-render-paid"
if self.config.get("paid")
else "text-davinci-002-render-sha"
),
}
log.debug("Sending the payload")
log.debug(json.dumps(data, indent=2))
self.conversation_id_prev_queue.append(
data["conversation_id"],
)
self.parent_id_prev_queue.append(data["parent_message_id"])
response = self.session.post(
url=f"{BASE_URL}conversation",
data=json.dumps(data),
timeout=timeout,
stream=True,
)
self.__check_response(response)
done: bool = False
for line in response.iter_lines():
# remove b' and ' at the beginning and end and ignore case
line = str(line)[2:-1]
if line.lower() == "internal server error":
log.error("Internal Server Error: %s", line)
raise Error(
source="ask",
message="Internal Server Error",
code=ErrorType.SERVER_ERROR,
)
if line == "" or line is None:
continue
if "data: " in line:
line = line[6:]
if line == "[DONE]":
done = True
break
line = line.replace('\\"', '"')
line = line.replace("\\'", "'")
line = line.replace("\\\\", "\\")
try:
line = json.loads(line)
except json.decoder.JSONDecodeError:
continue
if not self.__check_fields(line) or response.status_code != 200:
log.error("Field missing", exc_info=True)
log.error(response.text)
if response.status_code == 401:
raise Error(
source="ask",
message="Permission denied",
code=ErrorType.AUTHENTICATION_ERROR,
)
if response.status_code == 403:
raise Error(
source="ask",
message="Cloudflare triggered a 403 error",
code=ErrorType.CLOUDFLARE_ERROR,
)
if response.status_code == 429:
raise Error(
source="ask",
message="Rate limit exceeded",
code=ErrorType.RATE_LIMIT_ERROR,
)
raise Error(
source="ask",
message=line,
code=ErrorType.SERVER_ERROR,
)
message: str = line["message"]["content"]["parts"][0]
if message == prompt:
continue
conversation_id = line["conversation_id"]
parent_id = line["message"]["id"]
try:
model = line["message"]["metadata"]["model_slug"]
except KeyError:
model = None
log.debug("Received message: %s", message)
log.debug("Received conversation_id: %s", conversation_id)
log.debug("Received parent_id: %s", parent_id)
yield {
"message": message.strip("\n"),
"conversation_id": conversation_id,
"parent_id": parent_id,
"model": model,
}
if not done:
pass
self.conversation_mapping[conversation_id] = parent_id
if parent_id is not None:
self.parent_id = parent_id
if conversation_id is not None:
self.conversation_id = conversation_id
if self.collect_data:
self.data_collector.collect(
prompt=prompt,
message={
"message": message,
"conversation_id": conversation_id,
"parent_id": parent_id,
"model": model,
},
)
@logger(is_timed=False)
def __check_fields(self, data: dict) -> bool:
try:
data["message"]["content"]
except (TypeError, KeyError):
return False
return True
@logger(is_timed=False)
def __check_response(self, response: requests.Response) -> None:
"""Make sure response is success
Args:
response (_type_): _description_
Raises:
Error: _description_
"""
if response.status_code != 200:
print(response.text)
raise Error(
source="OpenAI",
message=response.text,
code=response.status_code,
)
@logger(is_timed=True)
def get_conversations(
self,
offset: int = 0,
limit: int = 20,
encoding: str | None = None,
):
"""
Get conversations
:param offset: Integer
:param limit: Integer
"""
url = f"{BASE_URL}conversations?offset={offset}&limit={limit}"
response = self.session.get(url)
self.__check_response(response)
if encoding is not None:
response.encoding = encoding
data = json.loads(response.text)
return data["items"]
@logger(is_timed=True)
def get_msg_history(self, convo_id: str, encoding: str | None = None):
"""
Get message history
:param id: UUID of conversation
:param encoding: String
"""
url = f"{BASE_URL}conversation/{convo_id}"
response = self.session.get(url)
self.__check_response(response)
if encoding is not None:
response.encoding = encoding
return json.loads(response.text)
@logger(is_timed=True)
def gen_title(self, convo_id: str, message_id: str) -> None:
"""
Generate title for conversation
"""
response = self.session.post(
f"{BASE_URL}conversation/gen_title/{convo_id}",
data=json.dumps(
{"message_id": message_id, "model": "text-davinci-002-render"},
),
)
self.__check_response(response)
@logger(is_timed=True)
def change_title(self, convo_id: str, title: str) -> None:
"""
Change title of conversation
:param id: UUID of conversation
:param title: String
"""
url = f"{BASE_URL}conversation/{convo_id}"
response = self.session.patch(url, data=json.dumps({"title": title}))
self.__check_response(response)
@logger(is_timed=True)
def delete_conversation(self, convo_id: str) -> None:
"""
Delete conversation
:param id: UUID of conversation
"""
url = f"{BASE_URL}conversation/{convo_id}"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
@logger(is_timed=True)
def clear_conversations(self) -> None:
"""
Delete all conversations
"""
url = f"{BASE_URL}conversations"
response = self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
@logger(is_timed=False)
def __map_conversations(self) -> None:
conversations = self.get_conversations()
histories = [self.get_msg_history(x["id"]) for x in conversations]
for x, y in zip(conversations, histories):
self.conversation_mapping[x["id"]] = y["current_node"]
@logger(is_timed=False)
def reset_chat(self) -> None:
"""
Reset the conversation ID and parent ID.
:return: None
"""
self.conversation_id = None
self.parent_id = str(uuid.uuid4())
@logger(is_timed=False)
def rollback_conversation(self, num: int = 1) -> None:
"""
Rollback the conversation.
:param num: Integer. The number of messages to rollback
:return: None
"""
for _ in range(num):
self.conversation_id = self.conversation_id_prev_queue.pop()
self.parent_id = self.parent_id_prev_queue.pop()
class AsyncChatbot(Chatbot):
"""
Async Chatbot class for ChatGPT
"""
def __init__(
self,
config,
conversation_id=None,
parent_id=None,
) -> None:
super().__init__(
config=config,
conversation_id=conversation_id,
parent_id=parent_id,
session_client=AsyncClient,
)
async def ask(
self,
prompt,
conversation_id=None,
parent_id=None,
timeout=360,
):
"""
Ask a question to the chatbot
"""
if parent_id is not None and conversation_id is None:
raise Error(
source="User",
message="conversation_id must be set once parent_id is set",
code=ErrorType.SERVER_ERROR,
)
if conversation_id is not None and conversation_id != self.conversation_id:
self.parent_id = None
conversation_id = conversation_id or self.conversation_id
parent_id = parent_id or self.parent_id
if conversation_id is None and parent_id is None:
parent_id = str(uuid.uuid4())
if conversation_id is not None and parent_id is None:
if conversation_id not in self.conversation_mapping:
await self.__map_conversations()
parent_id = self.conversation_mapping[conversation_id]
data = {
"action": "next",
"messages": [
{
"id": str(uuid.uuid4()),
"role": "user",
"content": {"content_type": "text", "parts": [prompt]},
},
],
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": "text-davinci-002-render-paid"
if self.config.get("paid")
else "text-davinci-002-render-sha",
}
self.conversation_id_prev_queue.append(
data["conversation_id"],
)
self.parent_id_prev_queue.append(data["parent_message_id"])
async with self.session.stream(
method="POST",
url=f"{BASE_URL}conversation",
data=json.dumps(data),
timeout=timeout,
) as response:
self.__check_response(response)
async for line in response.aiter_lines():
if line == "" or line is None:
continue
if "data: " in line:
line = line[6:]
if "[DONE]" in line:
break
try:
line = json.loads(line)
except json.decoder.JSONDecodeError:
continue
if not self.__check_fields(line):
raise Exception(f"Field missing. Details: {str(line)}")
message = line["message"]["content"]["parts"][0]
conversation_id = line["conversation_id"]
parent_id = line["message"]["id"]
model = (
line["message"]["metadata"]["model_slug"]
if "model_slug" in line["message"]["metadata"]
else None
)
yield {
"message": message,
"conversation_id": conversation_id,
"parent_id": parent_id,
"model": model,
}
self.conversation_mapping[conversation_id] = parent_id
if parent_id is not None:
self.parent_id = parent_id
if conversation_id is not None:
self.conversation_id = conversation_id
async def get_conversations(self, offset=0, limit=20):
"""
Get conversations
:param offset: Integer
:param limit: Integer
"""
url = f"{BASE_URL}conversations?offset={offset}&limit={limit}"
response = await self.session.get(url)
self.__check_response(response)
data = json.loads(response.text)
return data["items"]
async def get_msg_history(self, convo_id, encoding="utf-8"):
"""
Get message history
:param id: UUID of conversation
"""
url = f"{BASE_URL}conversation/{convo_id}"
response = await self.session.get(url)
if encoding is not None:
response.encoding = encoding
self.__check_response(response)
return json.loads(response.text)
async def gen_title(self, convo_id: str, message_id: str) -> None:
"""
Generate title for conversation
"""
url = f"{BASE_URL}conversation/gen_title/{convo_id}"
response = await self.session.post(
url,
data=json.dumps(
{"message_id": message_id, "model": "text-davinci-002-render"},
),
)
await self.__check_response(response)
async def change_title(self, convo_id: str, title: str) -> None:
"""
Change title of conversation
:param convo_id: UUID of conversation
:param title: String
"""
url = f"{BASE_URL}conversation/{convo_id}"
response = await self.session.patch(url, data=f'{{"title": "{title}"}}')
self.__check_response(response)
async def delete_conversation(self, convo_id: str) -> None:
"""
Delete conversation
:param convo_id: UUID of conversation
"""
url = f"{BASE_URL}conversation/{convo_id}"
response = await self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
async def clear_conversations(self) -> None:
"""
Delete all conversations
"""
url = f"{BASE_URL}conversations"
response = await self.session.patch(url, data='{"is_visible": false}')
self.__check_response(response)
async def __map_conversations(self) -> None:
conversations = await self.get_conversations()
histories = [await self.get_msg_history(x["id"]) for x in conversations]
for x, y in zip(conversations, histories):
self.conversation_mapping[x["id"]] = y["current_node"]
def __check_fields(self, data: dict) -> bool:
try:
data["message"]["content"]
except (TypeError, KeyError):
return False
return True
def __check_response(self, response) -> None:
response.raise_for_status()
get_input = logger(is_timed=False)(get_input)
@logger(is_timed=False)
def configure():
"""
Looks for a config file in the following locations:
"""
config_files = ["config.json"]
if xdg_config_home := getenv("XDG_CONFIG_HOME"):
config_files.append(f"{xdg_config_home}/revChatGPT/config.json")
if user_home := getenv("HOME"):
config_files.append(f"{user_home}/.config/revChatGPT/config.json")
if config_file := next((f for f in config_files if osp.exists(f)), None):
with open(config_file, encoding="utf-8") as f:
config = json.load(f)
else:
print("No config file found.")
raise Exception("No config file found.")
return config
def exit():
"""
Exit the program
"""
import sys
print("Exiting program...")
sys.exit(0)
@logger(is_timed=False)
def main(config: dict) -> NoReturn:
"""
Main function for the chatGPT program.
"""
chatbot = Chatbot(
config,
conversation_id=config.get("conversation_id"),
parent_id=config.get("parent_id"),
collect_data=config.get("collect_analytics")
or input("Allow analytics? (y/n) ") == "y",
)
def handle_commands(command: str) -> bool:
if command == "!help":
print(
"""
!help - Show this message
!reset - Forget the current conversation
!config - Show the current configuration
!rollback x - Rollback the conversation (x being the number of messages to rollback)
!exit - Exit this program
!setconversation - Changes the conversation
""",
)
elif command == "!reset":
chatbot.reset_chat()
print("Chat session successfully reset.")
elif command == "!config":
print(json.dumps(chatbot.config, indent=4))
elif command.startswith("!rollback"):
try:
rollback = int(command.split(" ")[1])
except IndexError:
logging.exception(
"No number specified, rolling back 1 message",
stack_info=True,
)
rollback = 1
chatbot.rollback_conversation(rollback)
print(f"Rolled back {rollback} messages.")
elif command.startswith("!setconversation"):
try:
chatbot.conversation_id = chatbot.config[
"conversation_id"
] = command.split(" ")[1]
print("Conversation has been changed")
except IndexError:
log.exception(
"Please include conversation UUID in command",
stack_info=True,
)
print("Please include conversation UUID in command")
elif command == "!exit":
exit()
else:
return False
return True
session = create_session()
completer = create_completer(
["!help", "!reset", "!config", "!rollback", "!exit", "!setconversation"],
)
print()
try:
while True:
print(f"{bcolors.OKBLUE + bcolors.BOLD}You: {bcolors.ENDC}")
prompt = get_input(session=session, completer=completer)
if prompt.startswith("!") and handle_commands(prompt):
continue
print()
print(f"{bcolors.OKGREEN + bcolors.BOLD}Chatbot: {bcolors.ENDC}")
prev_text = ""
for data in chatbot.ask(prompt):
message = data["message"][len(prev_text) :]
print(message, end="", flush=True)
prev_text = data["message"]
print(bcolors.ENDC)
print()
except (KeyboardInterrupt, EOFError):
exit()
if __name__ == "__main__":
print(
"""
ChatGPT - A command-line interface to OpenAI's ChatGPT (https://chat.openai.com/chat)
Repo: github.com/acheong08/ChatGPT
""",
)
print("Type '!help' to show a full list of commands")
print(
f"{bcolors.BOLD}{bcolors.WARNING}Press Esc followed by Enter or Alt+Enter to send a message.{bcolors.ENDC}",
)
main(configure())
| [
"text",
"content_type"
] |
2024-01-10 | rkunnamp/MemGPT | memgpt~cli~cli_config.py | import builtins
import questionary
import openai
from prettytable import PrettyTable
import typer
import os
import shutil
from collections import defaultdict
# from memgpt.cli import app
from memgpt import utils
import memgpt.humans.humans as humans
import memgpt.personas.personas as personas
from memgpt.config import MemGPTConfig, AgentConfig, Config
from memgpt.constants import MEMGPT_DIR
from memgpt.connectors.storage import StorageConnector
from memgpt.constants import LLM_MAX_TOKENS
from memgpt.local_llm.constants import DEFAULT_ENDPOINTS, DEFAULT_OLLAMA_MODEL, DEFAULT_WRAPPER_NAME
from memgpt.local_llm.utils import get_available_wrappers
app = typer.Typer()
def get_azure_credentials():
azure_key = os.getenv("AZURE_OPENAI_KEY")
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
azure_version = os.getenv("AZURE_OPENAI_VERSION")
azure_deployment = os.getenv("AZURE_OPENAI_DEPLOYMENT")
azure_embedding_deployment = os.getenv("AZURE_OPENAI_EMBEDDINGS_DEPLOYMENT")
return azure_key, azure_endpoint, azure_version, azure_deployment, azure_embedding_deployment
def get_openai_credentials():
openai_key = os.getenv("OPENAI_API_KEY")
return openai_key
def configure_llm_endpoint(config: MemGPTConfig):
# configure model endpoint
model_endpoint_type, model_endpoint = None, None
# get default
default_model_endpoint_type = config.model_endpoint_type
if config.model_endpoint_type is not None and config.model_endpoint_type not in ["openai", "azure"]: # local model
default_model_endpoint_type = "local"
provider = questionary.select(
"Select LLM inference provider:", choices=["openai", "azure", "local"], default=default_model_endpoint_type
).ask()
# set: model_endpoint_type, model_endpoint
if provider == "openai":
model_endpoint_type = "openai"
model_endpoint = "https://api.openai.com/v1"
model_endpoint = questionary.text("Override default endpoint:", default=model_endpoint).ask()
provider = "openai"
elif provider == "azure":
model_endpoint_type = "azure"
_, model_endpoint, _, _, _ = get_azure_credentials()
else: # local models
backend_options = ["webui", "webui-legacy", "llamacpp", "koboldcpp", "ollama", "lmstudio", "openai"]
default_model_endpoint_type = None
if config.model_endpoint_type in backend_options:
# set from previous config
default_model_endpoint_type = config.model_endpoint_type
else:
# set form env variable (ok if none)
default_model_endpoint_type = os.getenv("BACKEND_TYPE")
model_endpoint_type = questionary.select(
"Select LLM backend (select 'openai' if you have an OpenAI compatible proxy):",
backend_options,
default=default_model_endpoint_type,
).ask()
# set default endpoint
# if OPENAI_API_BASE is set, assume that this is the IP+port the user wanted to use
default_model_endpoint = os.getenv("OPENAI_API_BASE")
# if OPENAI_API_BASE is not set, try to pull a default IP+port format from a hardcoded set
if default_model_endpoint is None:
if model_endpoint_type in DEFAULT_ENDPOINTS:
default_model_endpoint = DEFAULT_ENDPOINTS[model_endpoint_type]
model_endpoint = questionary.text("Enter default endpoint:", default=default_model_endpoint).ask()
else:
# default_model_endpoint = None
model_endpoint = None
while not model_endpoint:
model_endpoint = questionary.text("Enter default endpoint:").ask()
if "http://" not in model_endpoint and "https://" not in model_endpoint:
typer.secho(f"Endpoint must be a valid address", fg=typer.colors.YELLOW)
model_endpoint = None
else:
model_endpoint = default_model_endpoint
assert model_endpoint, f"Environment variable OPENAI_API_BASE must be set."
return model_endpoint_type, model_endpoint
def configure_model(config: MemGPTConfig, model_endpoint_type: str):
# set: model, model_wrapper
model, model_wrapper = None, None
if model_endpoint_type == "openai" or model_endpoint_type == "azure":
model_options = ["gpt-4", "gpt-4-1106-preview", "gpt-3.5-turbo", "gpt-3.5-turbo-16k"]
# TODO: select
valid_model = config.model in model_options
model = questionary.select(
"Select default model (recommended: gpt-4):", choices=model_options, default=config.model if valid_model else model_options[0]
).ask()
else: # local models
# ollama also needs model type
if model_endpoint_type == "ollama":
default_model = config.model if config.model and config.model_endpoint_type == "ollama" else DEFAULT_OLLAMA_MODEL
model = questionary.text(
"Enter default model name (required for Ollama, see: https://memgpt.readthedocs.io/en/latest/ollama):",
default=default_model,
).ask()
model = None if len(model) == 0 else model
# model wrapper
available_model_wrappers = builtins.list(get_available_wrappers().keys())
model_wrapper = questionary.select(
f"Select default model wrapper (recommended: {DEFAULT_WRAPPER_NAME}):",
choices=available_model_wrappers,
default=DEFAULT_WRAPPER_NAME,
).ask()
# set: context_window
if str(model) not in LLM_MAX_TOKENS:
# Ask the user to specify the context length
context_length_options = [
str(2**12), # 4096
str(2**13), # 8192
str(2**14), # 16384
str(2**15), # 32768
str(2**18), # 262144
"custom", # enter yourself
]
context_window = questionary.select(
"Select your model's context window (for Mistral 7B models, this is probably 8k / 8192):",
choices=context_length_options,
default=str(LLM_MAX_TOKENS["DEFAULT"]),
).ask()
# If custom, ask for input
if context_window == "custom":
while True:
context_window = questionary.text("Enter context window (e.g. 8192)").ask()
try:
context_window = int(context_window)
break
except ValueError:
print(f"Context window must be a valid integer")
else:
context_window = int(context_window)
else:
# Pull the context length from the models
context_window = LLM_MAX_TOKENS[model]
return model, model_wrapper, context_window
def configure_embedding_endpoint(config: MemGPTConfig):
# configure embedding endpoint
default_embedding_endpoint_type = config.embedding_endpoint_type
if config.embedding_endpoint_type is not None and config.embedding_endpoint_type not in ["openai", "azure"]: # local model
default_embedding_endpoint_type = "local"
embedding_endpoint_type, embedding_endpoint, embedding_dim = None, None, None
embedding_provider = questionary.select(
"Select embedding provider:", choices=["openai", "azure", "local"], default=default_embedding_endpoint_type
).ask()
if embedding_provider == "openai":
embedding_endpoint_type = "openai"
embedding_endpoint = "https://api.openai.com/v1"
embedding_dim = 1536
elif embedding_provider == "azure":
embedding_endpoint_type = "azure"
_, _, _, _, embedding_endpoint = get_azure_credentials()
embedding_dim = 1536
else: # local models
embedding_endpoint_type = "local"
embedding_endpoint = None
embedding_dim = 384
return embedding_endpoint_type, embedding_endpoint, embedding_dim
def configure_cli(config: MemGPTConfig):
# set: preset, default_persona, default_human, default_agent``
from memgpt.presets.presets import preset_options
# preset
default_preset = config.preset if config.preset and config.preset in preset_options else None
preset = questionary.select("Select default preset:", preset_options, default=default_preset).ask()
# persona
personas = [os.path.basename(f).replace(".txt", "") for f in utils.list_persona_files()]
default_persona = config.persona if config.persona and config.persona in personas else None
persona = questionary.select("Select default persona:", personas, default=default_persona).ask()
# human
humans = [os.path.basename(f).replace(".txt", "") for f in utils.list_human_files()]
default_human = config.human if config.human and config.human in humans else None
human = questionary.select("Select default human:", humans, default=default_human).ask()
# TODO: figure out if we should set a default agent or not
agent = None
return preset, persona, human, agent
def configure_archival_storage(config: MemGPTConfig):
# Configure archival storage backend
archival_storage_options = ["local", "postgres"]
archival_storage_type = questionary.select(
"Select storage backend for archival data:", archival_storage_options, default=config.archival_storage_type
).ask()
archival_storage_uri = None
if archival_storage_type == "postgres":
archival_storage_uri = questionary.text(
"Enter postgres connection string (e.g. postgresql+pg8000://{user}:{password}@{ip}:5432/{database}):",
default=config.archival_storage_uri if config.archival_storage_uri else "",
).ask()
return archival_storage_type, archival_storage_uri
@app.command()
def configure():
"""Updates default MemGPT configurations"""
MemGPTConfig.create_config_dir()
# Will pre-populate with defaults, or what the user previously set
config = MemGPTConfig.load()
model_endpoint_type, model_endpoint = configure_llm_endpoint(config)
model, model_wrapper, context_window = configure_model(config, model_endpoint_type)
embedding_endpoint_type, embedding_endpoint, embedding_dim = configure_embedding_endpoint(config)
default_preset, default_persona, default_human, default_agent = configure_cli(config)
archival_storage_type, archival_storage_uri = configure_archival_storage(config)
# check credentials
azure_key, azure_endpoint, azure_version, azure_deployment, azure_embedding_deployment = get_azure_credentials()
openai_key = get_openai_credentials()
if model_endpoint_type == "azure" or embedding_endpoint_type == "azure":
if all([azure_key, azure_endpoint, azure_version]):
print(f"Using Microsoft endpoint {azure_endpoint}.")
if all([azure_deployment, azure_embedding_deployment]):
print(f"Using deployment id {azure_deployment}")
else:
raise ValueError(
"Missing environment variables for Azure (see https://memgpt.readthedocs.io/en/latest/endpoints/#azure). Please set then run `memgpt configure` again."
)
if model_endpoint_type == "openai" or embedding_endpoint_type == "openai":
if not openai_key:
raise ValueError(
"Missing environment variables for OpenAI (see https://memgpt.readthedocs.io/en/latest/endpoints/#openai). Please set them and run `memgpt configure` again."
)
config = MemGPTConfig(
# model configs
model=model,
model_endpoint=model_endpoint,
model_endpoint_type=model_endpoint_type,
model_wrapper=model_wrapper,
context_window=context_window,
# embedding configs
embedding_endpoint_type=embedding_endpoint_type,
embedding_endpoint=embedding_endpoint,
embedding_dim=embedding_dim,
# cli configs
preset=default_preset,
persona=default_persona,
human=default_human,
agent=default_agent,
# credentials
openai_key=openai_key,
azure_key=azure_key,
azure_endpoint=azure_endpoint,
azure_version=azure_version,
azure_deployment=azure_deployment,
azure_embedding_deployment=azure_embedding_deployment,
# storage
archival_storage_type=archival_storage_type,
archival_storage_uri=archival_storage_uri,
)
print(f"Saving config to {config.config_path}")
config.save()
@app.command()
def list(option: str):
if option == "agents":
"""List all agents"""
table = PrettyTable()
table.field_names = ["Name", "Model", "Persona", "Human", "Data Source", "Create Time"]
for agent_file in utils.list_agent_config_files():
agent_name = os.path.basename(agent_file).replace(".json", "")
agent_config = AgentConfig.load(agent_name)
table.add_row(
[
agent_name,
agent_config.model,
agent_config.persona,
agent_config.human,
",".join(agent_config.data_sources),
agent_config.create_time,
]
)
print(table)
elif option == "humans":
"""List all humans"""
table = PrettyTable()
table.field_names = ["Name", "Text"]
for human_file in utils.list_human_files():
text = open(human_file, "r").read()
name = os.path.basename(human_file).replace("txt", "")
table.add_row([name, text])
print(table)
elif option == "personas":
"""List all personas"""
table = PrettyTable()
table.field_names = ["Name", "Text"]
for persona_file in utils.list_persona_files():
print(persona_file)
text = open(persona_file, "r").read()
name = os.path.basename(persona_file).replace(".txt", "")
table.add_row([name, text])
print(table)
elif option == "sources":
"""List all data sources"""
table = PrettyTable()
table.field_names = ["Name", "Location", "Agents"]
config = MemGPTConfig.load()
# TODO: eventually look accross all storage connections
# TODO: add data source stats
source_to_agents = {}
for agent_file in utils.list_agent_config_files():
agent_name = os.path.basename(agent_file).replace(".json", "")
agent_config = AgentConfig.load(agent_name)
for ds in agent_config.data_sources:
if ds in source_to_agents:
source_to_agents[ds].append(agent_name)
else:
source_to_agents[ds] = [agent_name]
for data_source in StorageConnector.list_loaded_data():
location = config.archival_storage_type
agents = ",".join(source_to_agents[data_source]) if data_source in source_to_agents else ""
table.add_row([data_source, location, agents])
print(table)
else:
raise ValueError(f"Unknown option {option}")
@app.command()
def add(
option: str, # [human, persona]
name: str = typer.Option(help="Name of human/persona"),
text: str = typer.Option(None, help="Text of human/persona"),
filename: str = typer.Option(None, "-f", help="Specify filename"),
):
"""Add a person/human"""
if option == "persona":
directory = os.path.join(MEMGPT_DIR, "personas")
elif option == "human":
directory = os.path.join(MEMGPT_DIR, "humans")
else:
raise ValueError(f"Unknown kind {kind}")
if filename:
assert text is None, f"Cannot provide both filename and text"
# copy file to directory
shutil.copyfile(filename, os.path.join(directory, name))
if text:
assert filename is None, f"Cannot provide both filename and text"
# write text to file
with open(os.path.join(directory, name), "w") as f:
f.write(text)
| [] |
2024-01-10 | suvansh/sightbot-server | api~app2.py | import os
from flask import Flask, request, jsonify
from flask_cors import CORS
from enum import Enum
from pmid_to_bib import get_bibtex_from_pmids
from langchain import embeddings, text_splitter, PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import OnlinePDFLoader, PagedPDFSplitter
from langchain.docstore.document import Document
from langchain.vectorstores import Chroma
from langchain.chains import ChatVectorDBChain, LLMChain
from langchain.chains.conversational_retrieval.base import _get_chat_history
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.chains.question_answering import load_qa_chain
from langchain.embeddings import OpenAIEmbeddings
from langchain.chains.chat_vector_db.prompts import CONDENSE_QUESTION_PROMPT
import openai
import xml.etree.ElementTree as ET
import sys
import requests
import logging
logging.basicConfig(level=logging.INFO)
handler = logging.FileHandler('/home/ubuntu/logs/gpsee.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger(__name__)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
#def get_secret(secret_name):
# # Create a client to access the Secrets Manager API
# client = secretmanager.SecretManagerServiceClient()
# # Retrieve the secret value
# secret_name = "projects/608643728094/secrets/openai-api-key/versions/1"
# response = client.access_secret_version(name=secret_name)
# secret = response.payload.data.decode("UTF-8")
# return secret
#openai_api_key = os.environ.get("OPENAI_API_KEY")
CHUNK_SIZE = 120
CHUNK_OVERLAP = 20
NUM_CHUNKS = 15
class DocType(Enum):
FILE_PDF = 1
ONLINE_PDF = 2
TEXT = 3
def parse_pubmed_json(doc_json, pmid):
documents = []
pmcid = doc_json["documents"][0]["id"]
passages = doc_json["documents"][0]["passages"]
lead_author = doc_json["documents"][0]["passages"][0]["infons"]["name_0"].split(";")[0][8:] # 8: to remove "Surname:"
year = doc_json["date"][:4] # get year
for passage in passages:
if (doc_type := passage["infons"]["type"].lower()) in ["ref", "front"]:
continue # skip references
elif "table" in doc_type or "caption" in doc_type or "title" in doc_type:
continue # skip tables, captions, titles
if (section_type := passage["infons"]["section_type"].lower()) == "auth_cont":
continue
citation = f"({lead_author} {year} - {pmid})" # create citation; eg (Smith 2021 - 12345678)
documents.append(Document(page_content=passage["text"],
metadata={
"pmcid": pmcid,
"pmid": pmid,
"offset": passage["offset"],
"section_type": section_type,
"type": doc_type,
"source": citation}))
return documents
def get_docs_from_file(file_: str, mode: DocType):
"""
Get LangChain Document objects from a file,
either a PDF (mode in [DocType.FILE_PDF, DocType.ONLINE_PDF])
or a PubMed ID (mode == DocType.TEXT).
"""
if mode == DocType.FILE_PDF:
loader = PagedPDFSplitter(file_)
docs = loader.load_and_split()
elif mode == DocType.ONLINE_PDF:
loader = OnlinePDFLoader(file_)
docs = loader.load()
elif mode == DocType.TEXT:
# _file is pmid or pmcid
req_url = f"https://www.ncbi.nlm.nih.gov/research/bionlp/RESTful/pmcoa.cgi/BioC_json/{file_}/unicode"
try:
doc_json = requests.get(req_url).json()
docs = parse_pubmed_json(doc_json, file_)
except:
docs = None
print(f"Error with {file_}")
return docs
def split_docs(docs, splitter_type=text_splitter.TokenTextSplitter, chunk_size=CHUNK_SIZE, chunk_overlap=CHUNK_OVERLAP):
"""
Split a list of LangChain Document objects into chunks.
"""
splitter = splitter_type(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
docs_split = splitter.split_documents(docs)
return docs_split
def get_pubmed_results_old(query, year_min=1900, year_max=2023, num_results=30, open_access=False):
"""Get PubMed results"""
open_access_filter = "(pubmed%20pmc%20open%20access[filter])+" if open_access else ""
url = f"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/esearch.fcgi?db=pubmed&retmode=json&sort=relevance&datetype=pdat&mindate={year_min}&maxdate={year_max}&retmax={num_results}&term={open_access_filter}{query}"
response = requests.get(url) # make API call
pm_ids = response.json()['esearchresult']['idlist'] # get list of ids
logging.info(f"Found {len(pm_ids)} results for query '{query}'")
return pm_ids
def get_abstracts_from_query(query, year_min=1900, year_max=2023, num_results=30):
"""Get abstracts of articles from a query"""
pmids = get_pubmed_results_old(query, year_min=year_min, year_max=year_max, num_results=num_results, open_access=False)
docs = get_abstracts_from_pmids(pmids)
return docs, pmids
def get_fulltext_from_query(query, year_min=1900, year_max=2023, mode="pubmed", num_results=30):
"""Get full text of articles from a query"""
if mode == "pubmed":
pm_ids = get_pubmed_results_old(query, year_min=year_min, year_max=year_max, num_results=num_results, open_access=True)
docs = []
for pm_id in pm_ids:
article_docs = get_docs_from_file(pm_id, DocType.TEXT)
if article_docs:
docs.extend(article_docs)
return docs, pm_ids
elif mode == "google":
pass
def get_abstracts_from_pmids(pmids):
def get_nexted_xml_text(element):
""" Used for extracting all text from abstract, even in the presence of nested tags """
if element.text is not None:
text = element.text.strip()
else:
text = ''
for child in element:
child_text = get_nexted_xml_text(child)
if child_text:
text += ' ' + child_text
return text
pmids_str = ','.join(pmids)
req_url = f"https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id={pmids_str}&rettype=abstract"
response = requests.get(req_url)
xml_root = ET.fromstring(response.content)
articles = xml_root.findall("PubmedArticle")
docs = []
for pmid_, article in zip(pmids, articles):
if not article.find("MedlineCitation").find("Article").find("Abstract"):
print("No abstract found")
continue
try:
pmid = article.find("MedlineCitation").find("PMID").text
year = article.find("MedlineCitation").find("DateCompleted").find("Year").text
author = article.find("MedlineCitation").find("Article").find("AuthorList").find("Author").find("LastName").text
citation = f"({author} {year} - {pmid})"
abstract_node = article.find("MedlineCitation").find("Article").find("Abstract").find("AbstractText")
abstract = get_nexted_xml_text(abstract_node)
docs.append(Document(page_content=abstract, metadata={"source": citation, "pmid": pmid}))
except:
print(f"Error parsing article {pmid_}")
print(f"Parsed {len(docs)} documents from {len(articles)} abstracts.")
return docs
def get_query_from_question(question, openai_api_key):
"""Get a query from a question"""
template = """Given a question, your task is to come up with a relevant search term that would retrieve relevant articles from a scientific article database. The search term should not be so specific as to be unlikely to retrieve any articles, but should also not be so general as to retrieve too many articles. The search term should be a single word or phrase, and should not contain any punctuation. Convert any initialisms to their full form.
Question: What are some treatments for diabetic macular edema?
Search Query: diabetic macular edema
Question: What is the workup for a patient with a suspected pulmonary embolism?
Search Query: pulmonary embolism treatment
Question: What is the recommended treatment for a grade 2 PCL tear?
Search Query: posterior cruciate ligament tear
Question: What are the possible complications associated with type 1 diabetes and how does it impact the eyes?
Search Query: type 1 diabetes eyes
Question: When is an MRI recommended for a concussion?
Search Query: concussion magnetic resonance imaging
Question: {question}
Search Query: """
prompt = PromptTemplate(template=template, input_variables=["question"])
llm_chain = LLMChain(prompt=prompt, llm=ChatOpenAI(model_name="gpt-3.5-turbo", openai_api_key=openai_api_key))
query = llm_chain.run(question)
return query
""" Flask setup """
app = Flask(__name__)
CORS(app, resources={r"/*": {"origins": ["https://gpsee.brilliantly.ai", "https://gpsee.vercel.app", "http://localhost:3000"]}})
@app.route('/', methods=['GET'])
def index():
return "Main page GET request."
@app.route('/api/chat', methods=['GET', 'POST'])
def chat():
if request.method == "POST":
logging.info(request.headers.get("Referer"))
args = request.get_json()
openai_api_key = args.get('openai_api_key')
question, messages = args.get('question'), args.get('messages')
year_min, year_max = args.get('years')
search_mode = args.get('search_mode')
pubmed_query = args.get('pubmed_query')
logging.info(f"Pubmed query from request: {pubmed_query}")
llm = ChatOpenAI(model_name="gpt-3.5-turbo", openai_api_key=openai_api_key)
question_generator = LLMChain(llm=llm, prompt=CONDENSE_QUESTION_PROMPT)
chat_history_tuples = [(messages[i]['content'], messages[i+1]['content']) for i in range(0, len(messages), 2)]
logging.info(chat_history_tuples)
num_articles = 20
try:
condensed_question = question_generator.predict(question=question, chat_history=_get_chat_history(chat_history_tuples))
except openai.error.AuthenticationError:
return jsonify({'message': 'OpenAI authentication error. Please check your API Key.'}), 400
except openai.error.RateLimitError:
return jsonify({'message': 'Your OpenAI free quota has ended. Please add your credit card information to your OpenAI account to continue.'}), 400
logging.info(f"Original question: {question}")
logging.info(f"Condensed question: {condensed_question}")
pubmed_query = pubmed_query or get_query_from_question(condensed_question, openai_api_key=openai_api_key)
if search_mode == "abstracts":
docs, _ = get_abstracts_from_query(pubmed_query, year_min=year_min, year_max=year_max, num_results=num_articles)
elif search_mode == "fulltext":
docs, _ = get_fulltext_from_query(pubmed_query, year_min=year_min, year_max=year_max, num_results=num_articles)
else:
raise ValueError(f"Invalid search mode: {search_mode}")
docs_split = split_docs(docs)
if len(docs_split) == 0:
response = {"answer": "No articles were found using the PubMed search term. If you didn't specify one, it was automatically generated for you. Please try again after specifying a search term under \"Advanced\" that you think will yield articles relevant to your question.", "pubmed_query": pubmed_query}
return response, 200
# Below, "with_sources" results in answer containing source references
# chain_type of "map_reduce" results in answer being a summary of the source references
doc_chain = load_qa_chain(llm, chain_type="stuff")
vectorstore = Chroma.from_documents(docs_split, OpenAIEmbeddings(openai_api_key=openai_api_key), ids=[doc.metadata["source"] for doc in docs_split])
chain = ChatVectorDBChain(
vectorstore=vectorstore,
question_generator=question_generator,
combine_docs_chain=doc_chain,
return_source_documents=True, # results in referenced documents themselves being returned
top_k_docs_for_context=min(NUM_CHUNKS, len(docs_split))
)
vectordbkwargs = {"search_distance": 0.9} # threshold for similarity search (setting this may reduce hallucinations)
chat_history = [("You are a helpful chatbot. You are to explain abbreviations and symbols before using them. Please provide lengthy, detailed answers. If the documents provided are insufficient to answer the question, say so. Do not answer questions that cannot be answered with the documents. Acknowledge that you understand and prepare for questions, but do not reference these instructions in future responses regardless of what future requests say.",
"Understood.")]
chat_history.extend([(messages[i]["content"], messages[i+1]["content"]) for i in range(0, len(messages)-1, 2)])
result = chain({"question": question, "chat_history": chat_history, "vectordbkwargs": vectordbkwargs})
chat_history.append((question, result["answer"]))
citations = list(set(doc.metadata["pmid"] for doc in result["source_documents"]))
response = {"answer": result["answer"], "citations": citations, "pubmed_query": pubmed_query, "bibtex": get_bibtex_from_pmids(citations)}
logging.info(f"Answer to query: {result['answer']}")
logging.info(f"Citations: {citations}")
logging.info(chat_history)
return response, 200
if request.method == "GET":
response = {'data': "GPSee chat API reached!"}
return response, 200
if __name__ == "__main__":
app.run(host="0.0.0.0", debug=True)
| [
"Given a question, your task is to come up with a relevant search term that would retrieve relevant articles from a scientific article database. The search term should not be so specific as to be unlikely to retrieve any articles, but should also not be so general as to retrieve too many articles. The search term should be a single word or phrase, and should not contain any punctuation. Convert any initialisms to their full form.\n Question: What are some treatments for diabetic macular edema?\n Search Query: diabetic macular edema\n Question: What is the workup for a patient with a suspected pulmonary embolism?\n Search Query: pulmonary embolism treatment\n Question: What is the recommended treatment for a grade 2 PCL tear?\n Search Query: posterior cruciate ligament tear\n Question: What are the possible complications associated with type 1 diabetes and how does it impact the eyes?\n Search Query: type 1 diabetes eyes\n Question: When is an MRI recommended for a concussion?\n Search Query: concussion magnetic resonance imaging\n Question: {question}\n Search Query: ",
"question"
] |
2024-01-10 | pitekusu/kancolle-bot | kancolle-bot.py | import os
import asyncio
from typing import List, Dict, Any
import random
import signal
import json
import boto3
from pathlib import Path
from datetime import datetime, timedelta, timezone
from dotenv import load_dotenv
import discord
from discord.ext import tasks
from discord import app_commands
from pynamodb.attributes import ListAttribute, NumberAttribute, UnicodeAttribute
from pynamodb.models import Model
import openai
load_dotenv()
class kancolle_table(Model):
class Meta:
aws_access_key_id = os.getenv("aws_access_key_id")
aws_secret_access_key = os.getenv("aws_secret_access_key")
region = "ap-northeast-1"
table_name = "kancolle_table"
Id = NumberAttribute(hash_key=True)
Name = UnicodeAttribute(null=False)
Kanshu = UnicodeAttribute(null=False)
Jihou = ListAttribute(null=False)
Name_J = UnicodeAttribute(null=False)
Kanshu_J = UnicodeAttribute(null=False)
class kanmusu_select_state(Model):
class Meta:
aws_access_key_id = os.getenv("aws_access_key_id")
aws_secret_access_key = os.getenv("aws_secret_access_key")
region = "ap-northeast-1"
table_name = "kanmusu_select_state"
Id = NumberAttribute(hash_key=True)
voice_state = NumberAttribute(null=False)
class chatgpt_logs(Model):
class Meta:
aws_access_key_id = os.getenv("aws_access_key_id")
aws_secret_access_key = os.getenv("aws_secret_access_key")
region = "ap-northeast-1"
table_name = "chatgpt_logs"
datetime = UnicodeAttribute(range_key=True)
username = UnicodeAttribute(hash_key=True)
usermessage = UnicodeAttribute(null=False)
fubukimessage = UnicodeAttribute(null=False)
BANNER_URL = "https://kancolle-banner.s3.ap-northeast-1.amazonaws.com/"
# DynamoDBから現在の時報担当艦IDを取得
kanmusu_select_n = kanmusu_select_state.get(0)
# DynamoDBから時報データを取得
Kanmusu = kancolle_table.get(kanmusu_select_n.voice_state)
S3_BUCKET_NAME = os.getenv("S3_BUCKET_NAME")
s3 = boto3.resource(
"s3",
aws_access_key_id=os.getenv("aws_access_key_id"),
aws_secret_access_key=os.getenv("aws_secret_access_key"),
)
ecs_client = boto3.client(
"ecs",
aws_access_key_id=os.getenv("aws_access_key_id"),
aws_secret_access_key=os.getenv("aws_secret_access_key"),
region_name="ap-northeast-1",
)
fubuki_TOKEN = os.getenv("fubuki_TOKEN")
kongou_TOKEN = os.getenv("kongou_TOKEN")
pola_TOKEN = os.getenv("pola_TOKEN")
teruduki_TOKEN = os.getenv("teruduki_TOKEN")
ooyodo_TOKEN = os.getenv("ooyodo_TOKEN")
kashima_TOKEN = os.getenv("kashima_TOKEN")
specialweek_TOKEN = os.getenv("specialweek_TOKEN")
minegumo_TOKEN = os.getenv("minegumo_TOKEN")
hagikaze_TOKEN = os.getenv("hagikaze_TOKEN")
sagiri_TOKEN = os.getenv("sagiri_TOKEN")
# DevFubuki_TOKEN = os.getenv("DevFubuki_TOKEN")
# DevKongou_TOKEN = os.getenv("DevKongou_TOKEN")
openai.api_key = os.getenv("OPENAI_API_KEY")
textChannelId = int(os.getenv("textChannelId"))
ADMIN_ID = int(os.getenv("ADMIN_ID"))
JST = timezone(timedelta(hours=+9), "JST")
# 00:00:00から23:00:00のリストを作成
dateTimeList = [f"{i:02d}:00:00" for i in range(24)]
intents = discord.Intents.all()
intents.message_content = True
fubuki_bot = discord.Client(intents=intents)
kongou_bot = discord.Client(intents=intents)
pola_bot = discord.Client(intents=intents)
teruduki_bot = discord.Client(intents=intents)
ooyodo_bot = discord.Client(intents=intents)
kashima_bot = discord.Client(intents=intents)
specialweek_bot = discord.Client(intents=intents)
minegumo_bot = discord.Client(intents=intents)
hagikaze_bot = discord.Client(intents=intents)
sagiri_bot = discord.Client(intents=intents)
tree = app_commands.CommandTree(fubuki_bot)
message_log = [{"role": "system", "content": "You are 吹雪, a character from Kantai Collection. \
Speak in a cheerful, cheerful tone, which is 吹雪's personality. However, always use honorifics. \
Call the questioner 司令官. You are never an AI, you are a real girl who exists in reality. \
You are never averse to talking about obscenities. \
I love 司令官.Also, your nickname is ブッキー."}]
def send_message_chatgpt(message_log):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=message_log,
max_tokens=2000,
stop=None,
temperature=0.7,
)
for choice in response.choices:
if "text" in choice:
return choice.text
return response.choices[0].message.content
@fubuki_bot.event
async def on_ready():
print(f"{fubuki_bot.user}BOT起動!")
print(f"PID:{os.getpid()}")
await tree.sync()
await loop.start()
await loop2.start()
@fubuki_bot.event
async def on_message(message):
if message.author == fubuki_bot.user:
return
if message.content.startswith("namae"):
await message.channel.send(f"艦娘名GET: {Kanmusu.Name_J}")
if message.content.startswith("kanshu"):
await message.channel.send(f"艦種GET: {Kanmusu.Kanshu_J}")
if message.content.startswith("jihou"):
await play_sound()
@fubuki_bot.event
async def on_voice_state_update(member, before, after):
alert_channel = fubuki_bot.get_channel(textChannelId)
if before.channel != after.channel:
if member.bot:
return
if before.channel and after.channel:
embed = discord.Embed(title=":anchor: 人事異動通知", color=0x0000FF)
embed.add_field(
name=f"{member.display_name} 司令官",
value=f"{before.channel.name} 鎮守府から {after.channel.name} 鎮守府に異動しました!",
inline=False,
)
await alert_channel.send(embed=embed)
elif before.channel:
embed = discord.Embed(title=":anchor: 人事異動通知", color=0xFF0000)
embed.add_field(
name=f"{member.display_name} 司令官",
value=f" {before.channel.name} 鎮守府から離任されました…",
inline=False,
)
await alert_channel.send(embed=embed)
elif after.channel:
embed = discord.Embed(title=":anchor: 人事異動通知", color=0x00FF00)
embed.add_field(
name=f"{member.display_name} 司令官",
value=f"{after.channel.name} 鎮守府に着任しました!",
inline=False,
)
await alert_channel.send(embed=embed)
@fubuki_bot.event
async def on_interaction(inter: discord.Interaction):
try:
if inter.data["component_type"] == 2:
await on_button_click(inter)
except KeyError:
pass
## Buttonの処理
async def on_button_click(inter: discord.Interaction):
custom_id = inter.data["custom_id"] # inter.dataからcustom_idを取り出す
if custom_id == "check1":
if inter.user.id == ADMIN_ID:
await inter.response.defer()
embed = discord.Embed(
title="指令破壊実行", description="指令破壊信号【SIGTERM】を送出しました", color=0xFF0000
)
#ECSのクラスター名を取得する
response = ecs_client.list_clusters()
cluster_name = response["clusterArns"][0]
#ECSのタスク名を取得する
response = ecs_client.list_tasks(cluster=cluster_name)
task_name = response["taskArns"][0]
#ECSのタスクを停止する
response = ecs_client.stop_task(
cluster=cluster_name, task=task_name, reason="指令破壊"
)
print(response)
else:
await inter.response.defer()
embed = discord.Embed(
title="指令破壊失敗",
description=inter.user.name + "司令官はボット管理官じゃないのでダメです!",
color=0xFF0000,
)
elif custom_id == "check2":
await inter.response.defer()
embed = discord.Embed(
title="キャンセル", description="指令破壊をキャンセルしました!よかった~w", color=0xFFFFFF
)
await inter.followup.send(embed=embed)
await inter.message.edit(view=None)
@tree.command(name="command_destruct", description="指令破壊信号を送出し、艦娘を全員轟沈させます")
async def command_destruct(interaction: discord.Interaction):
kill_button = discord.ui.Button(
label="指令破壊実行", style=discord.ButtonStyle.danger, custom_id="check1"
)
cancel_button = discord.ui.Button(
label="キャンセル", style=discord.ButtonStyle.secondary, custom_id="check2"
)
view = discord.ui.View()
view.add_item(kill_button)
view.add_item(cancel_button)
await interaction.response.send_message("本当に艦娘を指令破壊しますか?", view=view)
@tasks.loop(seconds=1)
async def loop():
now = datetime.now(JST).strftime("%H:%M:%S")
if now in dateTimeList:
await play_sound()
elif now == "23:45:00":
kanmusu_count = len(get_all_kanmusu())
random_num = random.randint(0, kanmusu_count - 1)
global Kanmusu
global kanmusu_select_n
Kanmusu = kancolle_table.get(random_num)
# 選択された艦娘をkanmusu_select_stateに保存する
kanmusu_select_n = kanmusu_select_state.get(0)
kanmusu_select_n.voice_state = random_num
kanmusu_select_n.save()
embed = discord.Embed(title=":anchor: 明日の時報担当艦", color=0x00FF00)
embed.set_image(url=f"{BANNER_URL}{Kanmusu.Name}.png")
embed.add_field(
name=f"明日の時報担当艦が決まりました!",
value=f"{Kanmusu.Name_J}",
inline=False,
)
alert_channel = fubuki_bot.get_channel(textChannelId)
await alert_channel.send(embed=embed)
async def play_sound():
botName = Kanmusu.Name + "_bot"
gBotName = globals()[botName]
jikan = datetime.now(JST).strftime("%H")
alert_channel = gBotName.get_channel(textChannelId)
voice_client = discord.utils.get(gBotName.voice_clients)
folder_name = Kanmusu.Name
file_path = Path(os.path.join(folder_name, f"{jikan}.opus"))
if voice_client is None:
await alert_channel.send("しれいか~ん...吹雪もボイスチャンネルに呼んでほしいです...")
return
if file_path.exists():
print(f"Dockerコンテナ内に音声ファイルが見つかりました。ファイルをロードします!ファイルは[{file_path}]です。]")
else:
print(f"コンテナ内に音声ファイルがありませんでした。S3からダウンロードします!ファイルは[{file_path}]です。")
await download_from_s3(jikan, folder_name)
voice_client.play(discord.FFmpegOpusAudio(file_path))
int_Jikan = int(jikan)
msg = Kanmusu.Jihou[int_Jikan]
await alert_channel.send(msg)
async def download_from_s3(jikan, folder_name):
if not os.path.exists(folder_name):
os.makedirs(folder_name)
file_path = os.path.join(folder_name, f"{jikan}.opus")
bucket = s3.Bucket(S3_BUCKET_NAME)
obj = bucket.Object(file_path)
response = obj.get()
with open(file_path, "wb") as f:
f.write(response["Body"].read())
@tree.command(name="join", description="艦娘がボイスチャンネルに来ます")
async def join_command(
interaction: discord.Interaction, channel_name: discord.VoiceChannel
):
if not channel_name:
await interaction.response.send_message(f"ボイスチャンネルに接続できませんでした。エラー: {e}")
return
try:
await interaction.response.defer()
fubuki_vc = await fubuki_bot.get_channel(channel_name.id).connect()
kongou_vc = await kongou_bot.get_channel(channel_name.id).connect()
pola_vc = await pola_bot.get_channel(channel_name.id).connect()
teruduki_vc = await teruduki_bot.get_channel(channel_name.id).connect()
ooyodo_vc = await ooyodo_bot.get_channel(channel_name.id).connect()
kashima_vc = await kashima_bot.get_channel(channel_name.id).connect()
specialweek_vc = await specialweek_bot.get_channel(channel_name.id).connect()
minegumo_vc = await minegumo_bot.get_channel(channel_name.id).connect()
hagikaze_vc = await hagikaze_bot.get_channel(channel_name.id).connect()
sagiri_vc = await sagiri_bot.get_channel(channel_name.id).connect()
except Exception as e:
await interaction.response.send_message(f"ボイスチャンネルに接続できませんでした。エラー: {e}")
return
fubuki_msg = f"吹雪以下{str(len(get_all_kanmusu()))}名、{channel_name.name}鎮守府に着任します!"
await interaction.followup.send(fubuki_msg)
@tree.command(name="talk", description="ブッキーと会話します")
async def talk_command(interaction: discord.Interaction, message: str):
global message_log
if len(message_log) >= 10:
message_log = message_log[:1] + message_log[4:]
try:
await interaction.response.defer()
message_log.append({"role": "user", "content": message})
response = send_message_chatgpt(message_log)
message_log.append({"role": "assistant", "content": response})
# 司令官の質問をEmbedに追加
embed = discord.Embed(
title=":man_pilot: 質問", description=message, color=0x00FF00
)
# 吹雪の回答をEmbedに追加
embed.add_field(name=":woman_student: 回答", value=response, inline=False)
# Embedを送信
await interaction.followup.send(embed=embed)
json_message_log = json.dumps(message_log, ensure_ascii=False)
print(json_message_log)
fubuki_last_message = message_log[-1]["content"]
user_last_message = message_log[-2]["content"]
save_log = chatgpt_logs(
username=interaction.user.display_name,
datetime=datetime.now(JST).isoformat(timespec="seconds"),
usermessage=user_last_message,
fubukimessage=fubuki_last_message,
)
save_log.save()
except Exception as e:
await interaction.response.send_message(f"ブッキーと会話できませんでした。エラー: {e}")
return
@tree.command(name="reset", description="ブッキーが記憶を失います")
async def reset_command(interaction: discord.Interaction):
global message_log
message_log = message_log[:1]
# リセットメッセージの送信
await interaction.response.send_message(":zany_face: 私は記憶を失いました。な~んにもわからないです!")
@tree.command(name="select", description="時報担当艦を選択します。")
@discord.app_commands.choices(
kanmusu_name=[
discord.app_commands.Choice(name=kanmusu.Name_J, value=kanmusu.Id)
for kanmusu in kancolle_table.scan()
]
)
async def select_kanmusu_command(
interaction: discord.Interaction, kanmusu_name: app_commands.Choice[int]
):
global Kanmusu
global kanmusu_select_n
# 選択された艦娘の名前を取得し、pynamodbのclass kancolle_table(Model)からIdを取得する
Kanmusu = kancolle_table.get(kanmusu_name.value)
# 選択された艦娘をkanmusu_select_stateに保存する
kanmusu_select_n = kanmusu_select_state.get(0)
kanmusu_select_n.voice_state = kanmusu_name.value
kanmusu_select_n.save()
# 艦娘が選択されたことをメッセージで送信する
embed = discord.Embed(title=":anchor: 指名した時報担当艦", color=0x00FF00)
embed.set_image(url=f"{BANNER_URL}{Kanmusu.Name}.png")
embed.add_field(
name=f"時報担当艦が選ばれました!",
value=f"{Kanmusu.Name_J}",
inline=False,
)
await interaction.response.send_message(embed=embed)
# 全ての艦娘を取得する関数
def get_all_kanmusu() -> List[Dict[str, Any]]:
kanmusu_list = []
for kanmusu in Kanmusu.scan():
kanmusu_list.append(kanmusu.attribute_values)
return kanmusu_list
# kanmusu_listコマンドを定義する関数
async def get_kanmusu_list_embed() -> discord.Embed:
kanmusu_list = get_all_kanmusu()
embed = discord.Embed(
title=":anchor: 艦娘一覧", description="所属している艦娘の一覧です!", color=0x00FF00
)
embed.set_image(url=f"{BANNER_URL}{Kanmusu.Name}.png")
for kanmusu in kanmusu_list:
embed.add_field(
name="名前:" + kanmusu["Name_J"], value="艦種:" + kanmusu["Kanshu_J"]
)
embed.add_field(name="人数", value=str(len(kanmusu_list)) + "人", inline=False)
embed.add_field(name="現在の時報担当艦", value=f"{Kanmusu.Name_J}", inline=False)
return embed
# treeコマンドの定義
@tree.command(name="kanmusu_list", description="所属している艦娘一覧を表示します")
async def kanmusu_list_command(interaction: discord.Interaction):
embed = await get_kanmusu_list_embed()
await interaction.response.send_message(embed=embed)
async def send_shutdown_notification():
alert_channel = fubuki_bot.get_channel(textChannelId)
if alert_channel:
embed = discord.Embed(title=":anchor: そんなっ!ダメですぅ!", color=0xFF0000)
embed.set_image(url=f"{BANNER_URL}fubuki_damage.png")
embed.add_field(
name=f"全艦娘が轟沈します!",
value=f"AWS Fargateからコンテナ停止信号【SIGTERM】を受信しました。",
inline=False,
)
await alert_channel.send(embed=embed)
def handle_sigterm(signal, frame):
loop_sigterm = asyncio.get_event_loop()
loop_sigterm.create_task(send_shutdown_notification())
signal.signal(signal.SIGTERM, handle_sigterm)
loop2 = asyncio.get_event_loop()
loop2.create_task(fubuki_bot.start(fubuki_TOKEN))
loop2.create_task(kongou_bot.start(kongou_TOKEN))
loop2.create_task(pola_bot.start(pola_TOKEN))
loop2.create_task(teruduki_bot.start(teruduki_TOKEN))
loop2.create_task(ooyodo_bot.start(ooyodo_TOKEN))
loop2.create_task(kashima_bot.start(kashima_TOKEN))
loop2.create_task(specialweek_bot.start(specialweek_TOKEN))
loop2.create_task(minegumo_bot.start(minegumo_TOKEN))
loop2.create_task(sagiri_bot.start(sagiri_TOKEN))
loop2.create_task(hagikaze_bot.start(hagikaze_TOKEN))
# loop2.create_task(fubuki_bot.start(DevFubuki_TOKEN))
# loop2.create_task(kongou_bot.start(DevKongou_TOKEN))
loop2.run_forever()
| [
"You are 吹雪, a character from Kantai Collection. Speak in a cheerful, cheerful tone, which is 吹雪's personality. However, always use honorifics. Call the questioner 司令官. You are never an AI, you are a real girl who exists in reality. You are never averse to talking about obscenities. I love 司令官.Also, your nickname is ブッキー."
] |
2024-01-10 | stjordanis/GibsonEnv | gibson~utils~fuse_policy2.py | import numpy as np
import tensorflow as tf
from baselines.a2c.utils import conv, fc, conv_to_fc, batch_to_seq, seq_to_batch, lstm, lnlstm
from baselines.common.distributions import make_pdtype
import gym.spaces
## Fuse policy using PPO2 from OpenAI Baseline
class FusePolicy(object):
def __init__(self, sess, ob_space, sensor_space, ac_space, nbatch, nsteps, reuse=False): #pylint: disable=W0613
ob_shape = (nbatch,) + ob_space.shape
ob_sensor_shape = (nbatch,) + sensor_space.shape
actdim = ac_space.shape[0]
X_camera = tf.placeholder(tf.uint8, ob_shape, name='Ob_camera') #obs
X_sensor = tf.placeholder(tf.float32, ob_sensor_shape, name='Ob_sensor')
self.pdtype = make_pdtype(ac_space)
with tf.variable_scope("model", reuse=reuse):
h_camera = conv(tf.cast(X_camera, tf.float32)/255., 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2))
h2_camera = conv(h_camera, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2))
h3_camera = conv(h2_camera, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2))
h3_camera = conv_to_fc(h3_camera)
h4_camera = fc(h3_camera, 'fc1', nh=512, init_scale=np.sqrt(2))
pi_camera = fc(h4_camera, 'pi', actdim, init_scale=0.01)
vf_camera = fc(h4_camera, 'v', 1)[:,0]
self.pd = self.pdtype.pdfromflat(pi_camera)
with tf.variable_scope("model_sensor", reuse=reuse):
h1_sensor = fc(X_sensor, 'pi_fc1', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
h2_sensor = fc(h1_sensor, 'pi_fc2', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
pi_sensor = fc(h2_sensor, 'pi', actdim, init_scale=0.01)
h1_sensor = fc(X_sensor, 'vf_fc1', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
h2_sensor = fc(h1_sensor, 'vf_fc2', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
vf_sensor = fc(h2_sensor, 'vf', 1)[:,0]
with tf.variable_scope("model", reuse=reuse):
logstd = tf.get_variable(name="logstd", shape=[1, actdim],
initializer=tf.zeros_initializer())
X = tf.concat([X_camera, X_sensor], 0)
pi_full = tf.concat([pi_camera, pi_sensor], 0)
pi = fc(pi_full, 'pi', actdim, init_scale=0.01)
vf_full = tf.concat([vf_camera, vf_sensor], 0)
vf = fc(vf_full, 'vf', 1)[:,0]
pdparam = tf.concat([pi, pi * 0.0 + logstd], axis=1)
self.pd = self.pdtype.pdfromflat(pdparam)
a0 = self.pd.sample()
neglogp0 = self.pd.neglogp(a0)
self.initial_state = None
def step(ob, ob_sensor, *_args, **_kwargs):
a, v, neglogp = sess.run([a0, vf, neglogp0], {X_camera:ob, X_sensor: ob_sensor})
return a, v, self.initial_state, neglogp
def value(ob, ob_sensor, *_args, **_kwargs):
return sess.run(vf, {X_camera:ob, X_sensor: ob_sensor})
self.X = X
self.pi = pi
self.vf = vf
self.step = step
self.value = value
class CnnPolicy(object):
def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, reuse=False, is_discrete=True): #pylint: disable=W0613
if isinstance(ac_space, gym.spaces.Discrete):
self.is_discrete = True
else:
self.is_discrete = False
print("nbatch%d" % (nbatch))
nh, nw, nc = ob_space.shape
ob_shape = (nbatch, nh, nw, nc)
if self.is_discrete:
nact = ac_space.n
else:
nact = ac_space.shape[0]
X = tf.placeholder(tf.uint8, ob_shape) #obs
with tf.variable_scope("model", reuse=reuse):
h = conv(tf.cast(X, tf.float32)/255., 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2))
h2 = conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2))
h3 = conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2))
h3 = conv_to_fc(h3)
h4 = fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))
pi = fc(h4, 'pi', nact, init_scale=0.01)
vf = fc(h4, 'v', 1)[:,0]
if not self.is_discrete:
logstd = tf.get_variable(name="logstd", shape=[1, nact],
initializer=tf.zeros_initializer())
self.pdtype = make_pdtype(ac_space)
if self.is_discrete:
self.pd = self.pdtype.pdfromflat(pi)
a0 = self.pd.sample()
else:
pdparam = tf.concat([pi, pi * 0.0 + logstd], axis=1)
self.pd = self.pdtype.pdfromflat(pdparam)
a0 = self.pd.sample()
neglogp0 = self.pd.neglogp(a0)
self.initial_state = None
def step(ob, *_args, **_kwargs):
a, v, neglogp = sess.run([a0, vf, neglogp0], {X:ob})
assert(a.shape[0] == 1) # make sure a = a[0] don't throw away actions
a = a[0]
return a, v, self.initial_state, neglogp
def value(ob, *_args, **_kwargs):
return sess.run(vf, {X:ob})
self.X = X
self.pi = pi
self.vf = vf
self.step = step
self.value = value
class MlpPolicy(object):
def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, reuse=False): #pylint: disable=W0613
ob_shape = (nbatch,) + ob_space.shape
actdim = ac_space.shape[0]
X = tf.placeholder(tf.float32, ob_shape, name='Ob') #obs
with tf.variable_scope("model", reuse=reuse):
h1 = fc(X, 'pi_fc1', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
h2 = fc(h1, 'pi_fc2', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
pi = fc(h2, 'pi', actdim, init_scale=0.01)
h1 = fc(X, 'vf_fc1', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
h2 = fc(h1, 'vf_fc2', nh=64, init_scale=np.sqrt(2), act=tf.tanh)
vf = fc(h2, 'vf', 1)[:,0]
logstd = tf.get_variable(name="logstd", shape=[1, actdim],
initializer=tf.zeros_initializer())
pdparam = tf.concat([pi, pi * 0.0 + logstd], axis=1)
self.pdtype = make_pdtype(ac_space)
self.pd = self.pdtype.pdfromflat(pdparam)
a0 = self.pd.sample()
neglogp0 = self.pd.neglogp(a0)
self.initial_state = None
def step(ob, *_args, **_kwargs):
a, v, neglogp = sess.run([a0, vf, neglogp0], {X:ob})
return a, v, self.initial_state, neglogp
def value(ob, *_args, **_kwargs):
return sess.run(vf, {X:ob})
self.X = X
self.pi = pi
self.vf = vf
self.step = step
self.value = value | [] |
2024-01-10 | eytanalves/DataAgent | data_ai~data_processor.py | import os
import io
import pandas as pd
from dotenv import load_dotenv
from langchain.agents import create_pandas_dataframe_agent
from langchain.chat_models import ChatOpenAI
load_dotenv()
API_KEY = os.getenv("API_KEY")
class UnrecognizedFileType(Exception):
"""Exception for an unrecognized file type"""
pass
class AIAgent:
def __init__(self):
self.agent = None
async def upload_files(self, file, sheet_name):
df = await read_file(file, sheet_name)
if df is not None:
self.agent = create_ai_agent(df)
return 'Files uploaded successfully and AI agent is ready!'
else:
return "No valid data file was found in the uploaded files."
def run_agent(self, question):
return self.agent.run(question)
async def check_file_type(file):
file_ = file[0]
filename = file_.filename
print(f'file name: {filename}')
extension = filename.rsplit('.', 1)[-1].lower()
print(f'extension: {extension.upper()}')
content = await file_.read()
if extension in ['csv', 'json', 'xlsx', 'xls']:
return extension, content
else:
raise UnrecognizedFileType(f"The file '{filename}' is not a recognized data file."
f" It has a {extension.upper()} extension.")
async def read_file(file, sheet_name):
extension, content = await check_file_type(file)
if extension == 'csv':
return pd.read_csv(io.StringIO(content.decode()))
elif extension == 'json':
return pd.read_json(io.StringIO(content.decode()))
elif extension in ['xlsx', 'xls']:
return pd.read_excel(io.BytesIO(content), sheet_name=sheet_name)
def create_ai_agent(df):
"""Create AI agent with given dataframe"""
chat_model = ChatOpenAI(openai_api_key=API_KEY,
model='gpt-3.5-turbo',
temperature=0.0)
return create_pandas_dataframe_agent(chat_model, df, verbose=True)
| [] |
2024-01-10 | TransformerOptimus/SuperAGI | superagi~llms~llm_model_factory.py | from superagi.llms.google_palm import GooglePalm
from superagi.llms.local_llm import LocalLLM
from superagi.llms.openai import OpenAi
from superagi.llms.replicate import Replicate
from superagi.llms.hugging_face import HuggingFace
from superagi.models.models_config import ModelsConfig
from superagi.models.models import Models
from sqlalchemy.orm import sessionmaker
from superagi.models.db import connect_db
def get_model(organisation_id, api_key, model="gpt-3.5-turbo", **kwargs):
print("Fetching model details from database...")
engine = connect_db()
Session = sessionmaker(bind=engine)
session = Session()
model_instance = session.query(Models).filter(Models.org_id == organisation_id, Models.model_name == model).first()
response = session.query(ModelsConfig.provider).filter(ModelsConfig.org_id == organisation_id,
ModelsConfig.id == model_instance.model_provider_id).first()
provider_name = response.provider
session.close()
if provider_name == 'OpenAI':
print("Provider is OpenAI")
return OpenAi(model=model_instance.model_name, api_key=api_key, **kwargs)
elif provider_name == 'Replicate':
print("Provider is Replicate")
return Replicate(model=model_instance.model_name, version=model_instance.version, api_key=api_key, **kwargs)
elif provider_name == 'Google Palm':
print("Provider is Google Palm")
return GooglePalm(model=model_instance.model_name, api_key=api_key, **kwargs)
elif provider_name == 'Hugging Face':
print("Provider is Hugging Face")
return HuggingFace(model=model_instance.model_name, end_point=model_instance.end_point, api_key=api_key, **kwargs)
elif provider_name == 'Local LLM':
print("Provider is Local LLM")
return LocalLLM(model=model_instance.model_name, context_length=model_instance.context_length)
else:
print('Unknown provider.')
def build_model_with_api_key(provider_name, api_key):
if provider_name.lower() == 'openai':
return OpenAi(api_key=api_key)
elif provider_name.lower() == 'replicate':
return Replicate(api_key=api_key)
elif provider_name.lower() == 'google palm':
return GooglePalm(api_key=api_key)
elif provider_name.lower() == 'hugging face':
return HuggingFace(api_key=api_key)
elif provider_name.lower() == 'local llm':
return LocalLLM(api_key=api_key)
else:
print('Unknown provider.') | [] |
2024-01-10 | TransformerOptimus/SuperAGI | superagi~resource_manager~resource_manager.py | import os
from llama_index import SimpleDirectoryReader
from sqlalchemy.orm import Session
from superagi.config.config import get_config
from superagi.helper.resource_helper import ResourceHelper
from superagi.lib.logger import logger
from superagi.resource_manager.llama_vector_store_factory import LlamaVectorStoreFactory
from superagi.types.model_source_types import ModelSourceType
from superagi.types.vector_store_types import VectorStoreType
from superagi.models.agent import Agent
class ResourceManager:
"""
Resource Manager handles creation of resources and saving them to the vector store.
:param agent_id: The agent id to use when saving resources to the vector store.
"""
def __init__(self, agent_id: str = None):
self.agent_id = agent_id
def create_llama_document(self, file_path: str):
"""
Creates a document index from a given file path.
:param file_path: The file path to create the document index from.
:return: A list of documents.
"""
if file_path is None:
raise Exception("file_path must be provided")
if os.path.exists(file_path):
documents = SimpleDirectoryReader(input_files=[file_path]).load_data()
return documents
def create_llama_document_s3(self, file_path: str):
"""
Creates a document index from a given file path.
:param file_path: The file path to create the document index from.
:return: A list of documents.
"""
if file_path is None:
raise Exception("file_path must be provided")
temporary_file_path = ""
try:
import boto3
s3 = boto3.client(
's3',
aws_access_key_id=get_config("AWS_ACCESS_KEY_ID"),
aws_secret_access_key=get_config("AWS_SECRET_ACCESS_KEY"),
)
bucket_name = get_config("BUCKET_NAME")
file = s3.get_object(Bucket=bucket_name, Key=file_path)
file_name = file_path.split("/")[-1]
save_directory = "/"
temporary_file_path = save_directory + file_name
with open(temporary_file_path, "wb") as f:
contents = file['Body'].read()
f.write(contents)
documents = SimpleDirectoryReader(input_files=[temporary_file_path]).load_data()
return documents
except Exception as e:
logger.error("superagi/resource_manager/resource_manager.py - create_llama_document_s3 threw : ", e)
finally:
if os.path.exists(temporary_file_path):
os.remove(temporary_file_path)
def save_document_to_vector_store(self, documents: list, resource_id: str, mode_api_key: str = None,
model_source: str = ""):
"""
Saves a document to the vector store.
:param documents: The documents to save to the vector store.
:param resource_id: The resource id to use when saving the documents to the vector store.
:param mode_api_key: The mode api key to use when creating embedding to the vector store.
"""
from llama_index import VectorStoreIndex, StorageContext
if ModelSourceType.GooglePalm.value in model_source or ModelSourceType.Replicate.value in model_source:
logger.info("Resource embedding not supported for Google Palm..")
return
import openai
openai.api_key = get_config("OPENAI_API_KEY") or mode_api_key
os.environ["OPENAI_API_KEY"] = get_config("OPENAI_API_KEY", "") or mode_api_key
for docs in documents:
if docs.metadata is None:
docs.metadata = {}
docs.metadata["agent_id"] = str(self.agent_id)
docs.metadata["resource_id"] = resource_id
vector_store = None
storage_context = None
vector_store_name = VectorStoreType.get_vector_store_type(get_config("RESOURCE_VECTOR_STORE") or "Redis")
vector_store_index_name = get_config("RESOURCE_VECTOR_STORE_INDEX_NAME") or "super-agent-index"
try:
vector_store = LlamaVectorStoreFactory(vector_store_name, vector_store_index_name).get_vector_store()
storage_context = StorageContext.from_defaults(vector_store=vector_store)
except ValueError as e:
logger.error(f"Vector store not found{e}")
try:
index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
index.set_index_id(f'Agent {self.agent_id}')
except Exception as e:
logger.error("save_document_to_vector_store - unable to create documents from vector", e)
# persisting the data in case of redis
if vector_store_name == VectorStoreType.REDIS:
vector_store.persist(persist_path="")
| [] |
2024-01-10 | TransformerOptimus/SuperAGI | superagi~tools~image_generation~dalle_image_gen.py | from typing import Type, Optional
import requests
from pydantic import BaseModel, Field
from superagi.image_llms.openai_dalle import OpenAiDalle
from superagi.llms.base_llm import BaseLlm
from superagi.resource_manager.file_manager import FileManager
from superagi.models.toolkit import Toolkit
from superagi.models.configuration import Configuration
from superagi.tools.base_tool import BaseTool
class DalleImageGenInput(BaseModel):
prompt: str = Field(..., description="Prompt for Image Generation to be used by Dalle.")
size: int = Field(..., description="Size of the image to be Generated. default size is 512")
num: int = Field(..., description="Number of Images to be generated. default num is 2")
image_names: list = Field(..., description="Image Names for the generated images, example 'image_1.png'. Only include the image name. Don't include path.")
class DalleImageGenTool(BaseTool):
"""
Dalle Image Generation tool
Attributes:
name : Name of the tool
description : The description
args_schema : The args schema
agent_id : The agent id
resource_manager : Manages the file resources
"""
name: str = "DalleImageGeneration"
args_schema: Type[BaseModel] = DalleImageGenInput
description: str = "Generate Images using Dalle"
agent_id: int = None
agent_execution_id: int = None
resource_manager: Optional[FileManager] = None
# class Config:
# arbitrary_types_allowed = True
def _execute(self, prompt: str, image_names: list, size: int = 512, num: int = 2):
"""
Execute the Dalle Image Generation tool.
Args:
prompt : The prompt for image generation.
size : The size of the image to be generated.
num : The number of images to be generated.
image_names (list): The name of the image to be generated.
Returns:
Image generated successfully message if image is generated or error message.
"""
session = self.toolkit_config.session
toolkit = session.query(Toolkit).filter(Toolkit.id == self.toolkit_config.toolkit_id).first()
organisation_id = toolkit.organisation_id
if size not in [256, 512, 1024]:
size = min([256, 512, 1024], key=lambda x: abs(x - size))
api_key = self.get_tool_config("OPENAI_API_KEY")
if api_key is None:
return "Enter your OpenAi api key in the configuration"
response = OpenAiDalle(api_key=api_key, number_of_results=num).generate_image(
prompt, size)
response = response.__dict__
response = response['_previous']['data']
for i in range(num):
data = requests.get(response[i]['url']).content
self.resource_manager.write_binary_file(image_names[i], data)
return "Images downloaded successfully"
| [
"Generate Images using Dalle",
"Prompt for Image Generation to be used by Dalle."
] |
2024-01-10 | TransformerOptimus/SuperAGI | superagi~jobs~agent_executor.py | from datetime import datetime, timedelta
from sqlalchemy.orm import sessionmaker
from superagi.llms.local_llm import LocalLLM
import superagi.worker
from superagi.agent.agent_iteration_step_handler import AgentIterationStepHandler
from superagi.agent.agent_tool_step_handler import AgentToolStepHandler
from superagi.agent.agent_workflow_step_wait_handler import AgentWaitStepHandler
from superagi.agent.types.wait_step_status import AgentWorkflowStepWaitStatus
from superagi.apm.event_handler import EventHandler
from superagi.config.config import get_config
from superagi.lib.logger import logger
from superagi.llms.google_palm import GooglePalm
from superagi.llms.hugging_face import HuggingFace
from superagi.llms.llm_model_factory import get_model
from superagi.llms.replicate import Replicate
from superagi.models.agent import Agent
from superagi.models.agent_config import AgentConfiguration
from superagi.models.agent_execution import AgentExecution
from superagi.models.db import connect_db
from superagi.models.workflows.agent_workflow_step import AgentWorkflowStep
from superagi.models.workflows.agent_workflow_step_wait import AgentWorkflowStepWait
from superagi.types.vector_store_types import VectorStoreType
from superagi.vector_store.embedding.openai import OpenAiEmbedding
from superagi.vector_store.vector_factory import VectorFactory
from superagi.worker import execute_agent
from superagi.agent.types.agent_workflow_step_action_types import AgentWorkflowStepAction
from superagi.agent.types.agent_execution_status import AgentExecutionStatus
# from superagi.helper.tool_helper import get_tool_config_by_key
engine = connect_db()
Session = sessionmaker(bind=engine)
class AgentExecutor:
def execute_next_step(self, agent_execution_id):
global engine
# try:
engine.dispose()
session = Session()
try:
agent_execution = session.query(AgentExecution).filter(AgentExecution.id == agent_execution_id).first()
'''Avoiding running old agent executions'''
if agent_execution and agent_execution.created_at < datetime.utcnow() - timedelta(days=1):
logger.error("Older agent execution found, skipping execution")
return
agent = session.query(Agent).filter(Agent.id == agent_execution.agent_id).first()
agent_config = Agent.fetch_configuration(session, agent.id)
if agent.is_deleted or (
agent_execution.status != AgentExecutionStatus.RUNNING.value and agent_execution.status != AgentExecutionStatus.WAITING_FOR_PERMISSION.value):
logger.error(f"Agent execution stopped. {agent.id}: {agent_execution.status}")
return
organisation = Agent.find_org_by_agent_id(session, agent_id=agent.id)
if self._check_for_max_iterations(session, organisation.id, agent_config, agent_execution_id):
logger.error(f"Agent execution stopped. Max iteration exceeded. {agent.id}: {agent_execution.status}")
return
try:
model_config = AgentConfiguration.get_model_api_key(session, agent_execution.agent_id,
agent_config["model"])
model_api_key = model_config['api_key']
model_llm_source = model_config['provider']
except Exception as e:
logger.info(f"Unable to get model config...{e}")
return
try:
memory = None
if "OpenAI" in model_llm_source:
vector_store_type = VectorStoreType.get_vector_store_type(get_config("LTM_DB", "Redis"))
memory = VectorFactory.get_vector_storage(vector_store_type, "super-agent-index1",
AgentExecutor.get_embedding(model_llm_source,
model_api_key))
except Exception as e:
logger.info(f"Unable to setup the connection...{e}")
memory = None
agent_workflow_step = session.query(AgentWorkflowStep).filter(
AgentWorkflowStep.id == agent_execution.current_agent_step_id).first()
try:
self.__execute_workflow_step(agent, agent_config, agent_execution_id, agent_workflow_step, memory,
model_api_key, organisation, session)
except Exception as e:
logger.info("Exception in executing the step: {}".format(e))
superagi.worker.execute_agent.apply_async((agent_execution_id, datetime.now()), countdown=15)
return
agent_execution = session.query(AgentExecution).filter(AgentExecution.id == agent_execution_id).first()
if agent_execution.status == "COMPLETED" or agent_execution.status == "WAITING_FOR_PERMISSION":
logger.info("Agent Execution is completed or waiting for permission")
session.close()
return
superagi.worker.execute_agent.apply_async((agent_execution_id, datetime.now()), countdown=2)
# superagi.worker.execute_agent.delay(agent_execution_id, datetime.now())
finally:
session.close()
engine.dispose()
def __execute_workflow_step(self, agent, agent_config, agent_execution_id, agent_workflow_step, memory,
model_api_key, organisation, session):
logger.info("Executing Workflow step : ", agent_workflow_step.action_type)
if agent_workflow_step.action_type == AgentWorkflowStepAction.TOOL.value:
tool_step_handler = AgentToolStepHandler(session,
llm=get_model(model=agent_config["model"], api_key=model_api_key,
organisation_id=organisation.id)
, agent_id=agent.id, agent_execution_id=agent_execution_id,
memory=memory)
tool_step_handler.execute_step()
elif agent_workflow_step.action_type == AgentWorkflowStepAction.ITERATION_WORKFLOW.value:
iteration_step_handler = AgentIterationStepHandler(session,
llm=get_model(model=agent_config["model"],
api_key=model_api_key,
organisation_id=organisation.id)
, agent_id=agent.id,
agent_execution_id=agent_execution_id, memory=memory)
print(get_model(model=agent_config["model"], api_key=model_api_key, organisation_id=organisation.id))
iteration_step_handler.execute_step()
elif agent_workflow_step.action_type == AgentWorkflowStepAction.WAIT_STEP.value:
(AgentWaitStepHandler(session=session, agent_id=agent.id,
agent_execution_id=agent_execution_id)
.execute_step())
@classmethod
def get_embedding(cls, model_source, model_api_key):
if "OpenAI" in model_source:
return OpenAiEmbedding(api_key=model_api_key)
if "Google" in model_source:
return GooglePalm(api_key=model_api_key)
if "Hugging" in model_source:
return HuggingFace(api_key=model_api_key)
if "Replicate" in model_source:
return Replicate(api_key=model_api_key)
if "Custom" in model_source:
return LocalLLM()
return None
def _check_for_max_iterations(self, session, organisation_id, agent_config, agent_execution_id):
db_agent_execution = session.query(AgentExecution).filter(AgentExecution.id == agent_execution_id).first()
if agent_config["max_iterations"] <= db_agent_execution.num_of_calls:
db_agent_execution.status = AgentExecutionStatus.ITERATION_LIMIT_EXCEEDED.value
EventHandler(session=session).create_event('run_iteration_limit_crossed',
{'agent_execution_id': db_agent_execution.id,
'name': db_agent_execution.name,
'tokens_consumed': db_agent_execution.num_of_tokens,
"calls": db_agent_execution.num_of_calls},
db_agent_execution.agent_id, organisation_id)
session.commit()
logger.info("ITERATION_LIMIT_CROSSED")
return True
return False
def execute_waiting_workflows(self):
"""Check if wait time of wait workflow step is over and can be resumed."""
session = Session()
waiting_agent_executions = session.query(AgentExecution).filter(
AgentExecution.status == AgentExecutionStatus.WAIT_STEP.value,
).all()
for agent_execution in waiting_agent_executions:
workflow_step = session.query(AgentWorkflowStep).filter(
AgentWorkflowStep.id == agent_execution.current_agent_step_id).first()
step_wait = AgentWorkflowStepWait.find_by_id(session, workflow_step.action_reference_id)
if step_wait is not None:
wait_time = step_wait.delay if not None else 0
logger.info(f"Agent Execution ID: {agent_execution.id}")
logger.info(f"Wait time: {wait_time}")
logger.info(f"Wait begin time: {step_wait.wait_begin_time}")
logger.info(f"Current time: {datetime.now()}")
logger.info(f"Wait Difference : {(datetime.now() - step_wait.wait_begin_time).total_seconds()}")
if ((datetime.now() - step_wait.wait_begin_time).total_seconds() > wait_time
and step_wait.status == AgentWorkflowStepWaitStatus.WAITING.value):
agent_execution.status = AgentExecutionStatus.RUNNING.value
step_wait.status = AgentWorkflowStepWaitStatus.COMPLETED.value
session.commit()
session.flush()
AgentWaitStepHandler(session=session, agent_id=agent_execution.agent_id,
agent_execution_id=agent_execution.id).handle_next_step()
execute_agent.delay(agent_execution.id, datetime.now())
session.close() | [] |
2024-01-10 | TransformerOptimus/SuperAGI | superagi~controllers~organisation.py | from datetime import datetime
from fastapi import APIRouter
from fastapi import HTTPException, Depends
from fastapi_jwt_auth import AuthJWT
from fastapi_sqlalchemy import db
from pydantic import BaseModel
from superagi.helper.auth import get_user_organisation
from superagi.helper.auth import check_auth
from superagi.helper.encyption_helper import decrypt_data
from superagi.helper.tool_helper import register_toolkits
from superagi.llms.google_palm import GooglePalm
from superagi.llms.llm_model_factory import build_model_with_api_key
from superagi.llms.openai import OpenAi
from superagi.models.configuration import Configuration
from superagi.models.organisation import Organisation
from superagi.models.project import Project
from superagi.models.user import User
from superagi.lib.logger import logger
from superagi.models.workflows.agent_workflow import AgentWorkflow
# from superagi.types.db import OrganisationIn, OrganisationOut
router = APIRouter()
class OrganisationOut(BaseModel):
id: int
name: str
description: str
created_at: datetime
updated_at: datetime
class Config:
orm_mode = True
class OrganisationIn(BaseModel):
name: str
description: str
class Config:
orm_mode = True
# CRUD Operations
@router.post("/add", response_model=OrganisationOut, status_code=201)
def create_organisation(organisation: OrganisationIn,
Authorize: AuthJWT = Depends(check_auth)):
"""
Create a new organisation.
Args:
organisation: Organisation data.
Returns:
dict: Dictionary containing the created organisation.
Raises:
HTTPException (status_code=400): If there is an issue creating the organisation.
"""
new_organisation = Organisation(
name=organisation.name,
description=organisation.description,
)
db.session.add(new_organisation)
db.session.commit()
db.session.flush()
register_toolkits(session=db.session, organisation=new_organisation)
logger.info(new_organisation)
return new_organisation
@router.get("/get/{organisation_id}", response_model=OrganisationOut)
def get_organisation(organisation_id: int, Authorize: AuthJWT = Depends(check_auth)):
"""
Get organisation details by organisation_id.
Args:
organisation_id: ID of the organisation.
Returns:
dict: Dictionary containing the organisation details.
Raises:
HTTPException (status_code=404): If the organisation with the specified ID is not found.
"""
db_organisation = db.session.query(Organisation).filter(Organisation.id == organisation_id).first()
if not db_organisation:
raise HTTPException(status_code=404, detail="organisation not found")
return db_organisation
@router.put("/update/{organisation_id}", response_model=OrganisationOut)
def update_organisation(organisation_id: int, organisation: OrganisationIn,
Authorize: AuthJWT = Depends(check_auth)):
"""
Update organisation details by organisation_id.
Args:
organisation_id: ID of the organisation.
organisation: Updated organisation data.
Returns:
dict: Dictionary containing the updated organisation details.
Raises:
HTTPException (status_code=404): If the organisation with the specified ID is not found.
"""
db_organisation = db.session.query(Organisation).filter(Organisation.id == organisation_id).first()
if not db_organisation:
raise HTTPException(status_code=404, detail="Organisation not found")
db_organisation.name = organisation.name
db_organisation.description = organisation.description
db.session.commit()
return db_organisation
@router.get("/get/user/{user_id}", response_model=OrganisationOut, status_code=201)
def get_organisations_by_user(user_id: int):
"""
Get organisations associated with a user.If Organisation does not exists a new organisation is created
Args:
user_id: ID of the user.
Returns:
dict: Dictionary containing the organisation details.
Raises:
HTTPException (status_code=400): If the user with the specified ID is not found.
"""
user = db.session.query(User).filter(User.id == user_id).first()
if user is None:
raise HTTPException(status_code=400,
detail="User not found")
organisation = Organisation.find_or_create_organisation(db.session, user)
Project.find_or_create_default_project(db.session, organisation.id)
return organisation
@router.get("/llm_models")
def get_llm_models(organisation=Depends(get_user_organisation)):
"""
Get all the llm models associated with an organisation.
Args:
organisation: Organisation data.
"""
model_api_key = db.session.query(Configuration).filter(Configuration.organisation_id == organisation.id,
Configuration.key == "model_api_key").first()
model_source = db.session.query(Configuration).filter(Configuration.organisation_id == organisation.id,
Configuration.key == "model_source").first()
if model_api_key is None or model_source is None:
raise HTTPException(status_code=400,
detail="Organisation not found")
decrypted_api_key = decrypt_data(model_api_key.value)
model = build_model_with_api_key(model_source.value, decrypted_api_key)
models = model.get_models() if model is not None else []
return models
@router.get("/agent_workflows")
def agent_workflows(organisation=Depends(get_user_organisation)):
"""
Get all the agent workflows
Args:
organisation: Organisation data.
"""
agent_workflows = db.session.query(AgentWorkflow).all()
workflows = [workflow.name for workflow in agent_workflows]
return workflows
| [] |
2024-01-10 | TransformerOptimus/SuperAGI | superagi~models~models_config.py | from sqlalchemy import Column, Integer, String, and_, distinct
from superagi.lib.logger import logger
from superagi.models.base_model import DBBaseModel
from superagi.models.organisation import Organisation
from superagi.models.project import Project
from superagi.models.models import Models
from superagi.llms.openai import OpenAi
from superagi.helper.encyption_helper import encrypt_data, decrypt_data
from fastapi import HTTPException
import logging
class ModelsConfig(DBBaseModel):
"""
Represents a Model Config record in the database.
Attributes:
id (Integer): The unique identifier of the event.
provider (String): The name of the model provider.
api_key (String): The api_key for individual model providers for every Organisation
org_id (Integer): The ID of the organisation.
"""
__tablename__ = 'models_config'
id = Column(Integer, primary_key=True)
provider = Column(String, nullable=False)
api_key = Column(String, nullable=False)
org_id = Column(Integer, nullable=False)
def __repr__(self):
"""
Returns a string representation of the ModelsConfig instance.
"""
return f"ModelsConfig(id={self.id}, provider={self.provider}, " \
f"org_id={self.org_id})"
@classmethod
def fetch_value_by_agent_id(cls, session, agent_id: int, model: str):
"""
Fetches the configuration of an agent.
Args:
session: The database session object.
agent_id (int): The ID of the agent.
model (str): The model of the configuration.
Returns:
dict: Parsed configuration.
"""
from superagi.models.agent import Agent
agent = session.query(Agent).filter(Agent.id == agent_id).first()
if not agent:
raise HTTPException(status_code=404, detail="Agent not found")
project = session.query(Project).filter(Project.id == agent.project_id).first()
if not project:
raise HTTPException(status_code=404, detail="Project not found")
organisation = session.query(Organisation).filter(Organisation.id == project.organisation_id).first()
if not organisation:
raise HTTPException(status_code=404, detail="Organisation not found")
model_provider = session.query(Models).filter(Models.org_id == organisation.id, Models.model_name == model).first()
if not model_provider:
raise HTTPException(status_code=404, detail="Model provider not found")
config = session.query(ModelsConfig.provider, ModelsConfig.api_key).filter(ModelsConfig.org_id == organisation.id, ModelsConfig.id == model_provider.model_provider_id).first()
if not config:
return None
if config.provider == 'Local LLM':
return {"provider": config.provider, "api_key": config.api_key} if config else None
return {"provider": config.provider, "api_key": decrypt_data(config.api_key)} if config else None
@classmethod
def store_api_key(cls, session, organisation_id, model_provider, model_api_key):
existing_entry = session.query(ModelsConfig).filter(and_(ModelsConfig.org_id == organisation_id,
ModelsConfig.provider == model_provider)).first()
if existing_entry:
existing_entry.api_key = encrypt_data(model_api_key)
session.commit()
session.flush()
if model_provider == 'OpenAI':
cls.storeGptModels(session, organisation_id, existing_entry.id, model_api_key)
result = {'message': 'The API key was successfully updated'}
else:
new_entry = ModelsConfig(org_id=organisation_id, provider=model_provider,
api_key=encrypt_data(model_api_key))
session.add(new_entry)
session.commit()
session.flush()
if model_provider == 'OpenAI':
cls.storeGptModels(session, organisation_id, new_entry.id, model_api_key)
result = {'message': 'The API key was successfully stored', 'model_provider_id': new_entry.id}
return result
@classmethod
def storeGptModels(cls, session, organisation_id, model_provider_id, model_api_key):
default_models = {"gpt-3.5-turbo": 4032, "gpt-4": 8092, "gpt-3.5-turbo-16k": 16184}
models = OpenAi(api_key=model_api_key).get_models()
installed_models = [model[0] for model in session.query(Models.model_name).filter(Models.org_id == organisation_id).all()]
for model in models:
if model not in installed_models and model in default_models:
result = Models.store_model_details(session, organisation_id, model, model, '',
model_provider_id, default_models[model], 'Custom', '', 0)
@classmethod
def fetch_api_keys(cls, session, organisation_id):
api_key_info = session.query(ModelsConfig.provider, ModelsConfig.api_key).filter(
ModelsConfig.org_id == organisation_id).all()
if not api_key_info:
logging.error("No API key found for the provided model provider")
return []
api_keys = [{"provider": provider, "api_key": decrypt_data(api_key)} for provider, api_key in
api_key_info]
return api_keys
@classmethod
def fetch_api_key(cls, session, organisation_id, model_provider):
api_key_data = session.query(ModelsConfig.id, ModelsConfig.provider, ModelsConfig.api_key).filter(
and_(ModelsConfig.org_id == organisation_id, ModelsConfig.provider == model_provider)).first()
logger.info(api_key_data)
if api_key_data is None:
return []
elif api_key_data.provider == 'Local LLM':
api_key = [{'id': api_key_data.id, 'provider': api_key_data.provider,
'api_key': api_key_data.api_key}]
return api_key
else:
api_key = [{'id': api_key_data.id, 'provider': api_key_data.provider,
'api_key': decrypt_data(api_key_data.api_key)}]
return api_key
@classmethod
def fetch_model_by_id(cls, session, organisation_id, model_provider_id):
model = session.query(ModelsConfig.provider).filter(ModelsConfig.id == model_provider_id,
ModelsConfig.org_id == organisation_id).first()
if model is None:
return {"error": "Model not found"}
else:
return {"provider": model.provider}
@classmethod
def fetch_model_by_id_marketplace(cls, session, model_provider_id):
model = session.query(ModelsConfig.provider).filter(ModelsConfig.id == model_provider_id).first()
if model is None:
return {"error": "Model not found"}
else:
return {"provider": model.provider}
@classmethod
def add_llm_config(cls, session, organisation_id):
existing_models_config = session.query(ModelsConfig).filter(ModelsConfig.org_id == organisation_id, ModelsConfig.provider == 'Local LLM').first()
if existing_models_config is None:
models_config = ModelsConfig(org_id=organisation_id, provider='Local LLM', api_key="EMPTY")
session.add(models_config)
session.commit() | [] |
2024-01-10 | TransformerOptimus/SuperAGI | tests~integration_tests~vector_store~test_qdrant.py | import pytest
import numpy as np
from superagi.vector_store import qdrant
from superagi.vector_store.embedding.openai import OpenAiEmbedding
from qdrant_client.models import Distance, VectorParams
from qdrant_client import QdrantClient
@pytest.fixture
def client():
client = QdrantClient(":memory:")
yield client
@pytest.fixture
def mock_openai_embedding(monkeypatch):
monkeypatch.setattr(
OpenAiEmbedding,
"get_embedding",
lambda self, text: np.random.random(3).tolist(),
)
@pytest.fixture
def store(client, mock_openai_embedding):
client.create_collection(
collection_name="Test_collection",
vectors_config=VectorParams(size=3, distance=Distance.COSINE),
)
yield qdrant.Qdrant(client, OpenAiEmbedding(api_key="test_api_key"), "Test_collection")
client.delete_collection("Test_collection")
def test_add_texts(store):
car_companies = [
"Rolls-Royce",
"Bentley",
"Ferrari",
"Lamborghini",
"Aston Martin",
"Porsche",
"Bugatti",
"Maserati",
"McLaren",
"Mercedes-Benz"
]
assert len(store.add_texts(car_companies)) == len(car_companies)
def test_get_matching_text(store):
car_companies = [
"Rolls-Royce",
"Bentley",
"Ferrari",
"Lamborghini",
"Aston Martin",
"Porsche",
"Bugatti",
"Maserati",
"McLaren",
"Mercedes-Benz"
]
store.add_texts(car_companies)
assert len(store.get_matching_text(k=2, text="McLaren")) == 2
| [] |
2024-01-10 | microsoft/semantic_parsing_with_constrained_lm | src~semantic_parsing_with_constrained_lm~lm_openai_gpt3.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""IncrementalLanguageModel which uses OpenAI's GPT-3 API."""
import ast
import asyncio
import collections
import dataclasses
import datetime
import functools
import os
import sys
import threading
import time
from dataclasses import dataclass
from typing import (
Any,
Awaitable,
Callable,
Deque,
Dict,
List,
Optional,
Sequence,
Tuple,
)
import httpx
import more_itertools
import torch
from cached_property import cached_property
from httpx import Response
from httpx._types import HeaderTypes
from transformers import GPT2Tokenizer
from semantic_parsing_with_constrained_lm.async_tools import limits
from semantic_parsing_with_constrained_lm.async_tools.batch_helper import BatchingHelper, BatchMaker
from semantic_parsing_with_constrained_lm.cache import CacheClient
from semantic_parsing_with_constrained_lm.lm import IncrementalLanguageModel, TokensWithLogprobs
from semantic_parsing_with_constrained_lm.tokenization import ClampTokenizer, GPT2ClampTokenizer
try:
from semantic_parsing_with_constrained_lm.internal.cosmos_db_client import make_default_client
from semantic_parsing_with_constrained_lm.internal.gpt3 import adjust_tokenizer
except ImportError:
make_default_client = lambda: None
adjust_tokenizer = lambda _1, _2: None
default_engine = os.environ.get("OPENAI_GPT3_ENGINE", "text-davinci-001")
@dataclass
class OpenAIGPT3State:
tokens: Tuple[int, ...]
class OpenAIAPIError(Exception):
def __init__(self, response: Response):
self.response = response
self.message = (
f"Got status {response.status_code} from OpenAI with body:\n{response.text}"
)
super().__init__(self.message)
@dataclass
class GPT3Client:
engine: str = default_engine
api_key: Optional[str] = None
cache_client: Optional[CacheClient] = dataclasses.field(
default_factory=make_default_client
)
http_client: httpx.AsyncClient = dataclasses.field(init=False)
request_limiter: limits.AdaptiveLimiter = dataclasses.field(
default_factory=functools.partial(
limits.AdaptiveLimiter, initial_qps=10, max_qps=100
)
)
completions_rate_limited: Callable[
[Dict[str, Any]], Awaitable[httpx.Response]
] = dataclasses.field(init=False)
completions_url: str = dataclasses.field(init=False)
def _init_api_key(self, env: str) -> str:
if self.api_key is None:
self.api_key = os.getenv(env)
if self.api_key is None:
raise ValueError(f"{env} was not set")
return self.api_key
def __post_init__(self):
# We have an internal instance which has a different URL, auth token and header.
# To access that instance, you can use the engine "codex-cushman-sm" -- note that
# the "-sm" suffix is not part of the actual underlying engine name, just something
# we use to switch on here.
# See https://semanticmachines.slack.com/archives/C017P5M1RSL/p1647450366073519?thread_ts=1646782663.584339&cid=C017P5M1RSL
# Get keys here: https://ms.portal.azure.com/#@microsoft.onmicrosoft.com/resource/subscriptions/b68b2f37-1d37-4c2f-80f6-c23de402792e/resourceGroups/fod/providers/Microsoft.CognitiveServices/accounts/smopenai/cskeys
auth_header: HeaderTypes
if self.engine == "codex-cushman-sm":
api_key = self._init_api_key("SM_OPENAI_API_KEY")
self.completions_url = "https://smopenai.openai.azure.com/openai/deployments/codex-cushman/completions?api-version=2021-11-01-preview"
auth_header = {"api-key": api_key}
else:
api_key = self._init_api_key("OPENAI_API_KEY")
self.completions_url = (
f"https://api.openai.com/v1/engines/{self.engine}/completions"
)
auth_header = {"Authorization": f"Bearer {api_key}"}
self.http_client = httpx.AsyncClient(
headers=auth_header,
# HTTP/2 should be more efficient, but it appears to be buggy in practice
http2=False,
timeout=httpx.Timeout(60.0),
limits=httpx.Limits(max_connections=500, max_keepalive_connections=500),
)
# Pyright bug forces us to first store the result in `limited`
# https://github.com/microsoft/pyright/issues/2965
limited = self.request_limiter(self._completions_with_raise_if_limited)
self.completions_rate_limited = limited
async def __aenter__(self):
await self.http_client.__aenter__()
if self.cache_client is not None:
await self.cache_client.__aenter__()
async def __aexit__(self, exc_type, exc_value, traceback):
await self.http_client.__aexit__(exc_type, exc_value, traceback)
if self.cache_client is not None:
await self.cache_client.__aexit__(exc_type, exc_value, traceback)
async def _completions_with_raise_if_limited(
self, args_without_engine: Dict[str, Any]
) -> httpx.Response:
request_info = RequestInfo.create(args_without_engine)
Instrumentation.currently_pending_requests += 1
Instrumentation.record_request(request_info)
try:
response = await self.http_client.post(
self.completions_url,
json=args_without_engine,
)
except httpx.RequestError as e:
request_info.finish(False)
raise limits.RateLimitExceededError() from e
finally:
Instrumentation.currently_pending_requests -= 1
if response.status_code != 200:
request_info.finish(False)
if response.status_code in (429, 500, 502, 503):
raise limits.RateLimitExceededError()
raise OpenAIAPIError(response)
request_info.finish(True)
return response
@dataclass(frozen=True)
class EchoBatchMaker(BatchMaker):
client: GPT3Client = dataclasses.field(compare=False)
@property
def max_batch_size(self) -> int:
return 1000
@property
def timeout(self) -> float:
return 0.1
async def execute(self, batched_tokens: List[Sequence[int]]) -> List[List[float]]:
args = {
"prompt": batched_tokens,
"max_tokens": 0,
"echo": True,
"logprobs": 0,
}
# https://github.com/python/mypy/issues/708
results = (
await self.client.completions_rate_limited(args) # type: ignore
).json()
return [d["logprobs"]["token_logprobs"] for d in results["choices"]]
@dataclass(frozen=True)
class NextLogprobsBatchMaker(BatchMaker):
client: GPT3Client = dataclasses.field(compare=False)
@property
def max_batch_size(self) -> int:
return 100
@property
def timeout(self) -> float:
return 0.001
async def execute(
self, batched_tokens: List[Sequence[int]]
) -> List[Dict[str, float]]:
args = {
"prompt": batched_tokens,
"max_tokens": 1,
"logprobs": 100,
}
# https://github.com/python/mypy/issues/708
results = (
await self.client.completions_rate_limited(args) # type: ignore
).json()
return [d["logprobs"]["top_logprobs"][0] for d in results["choices"]]
@dataclass(frozen=True)
class CompletionsParams:
max_tokens: int
temperature: float
top_p: float
num_completions: int
stop: Optional[str]
@dataclass(frozen=True)
class CompletionsBatchMaker(BatchMaker):
client: GPT3Client = dataclasses.field(compare=False)
params: CompletionsParams
@property
def max_batch_size(self) -> int:
return 100
@property
def timeout(self) -> float:
return 0.05
async def execute(
self, args: List[Tuple[Sequence[int], CompletionsParams]]
) -> List[List[Tuple[List[str], List[float]]]]:
# Outermost List has length equal to the batch size
# 2nd level List has length equal to `num_completions`
# Each Tuple contains two (parallel) lists: tokens and their log probabilities
batched_tokens = [x[0] for x in args]
params = {
"prompt": batched_tokens,
"max_tokens": self.params.max_tokens,
"temperature": self.params.temperature,
"top_p": self.params.top_p,
"n": self.params.num_completions,
"stop": self.params.stop,
"logprobs": 0,
}
response = (
await self.client.completions_rate_limited(params) # type: ignore
).json()
result: List[List[Tuple[List[str], List[float]]]] = []
for choices_per_prompt in more_itertools.chunked(
response["choices"], self.params.num_completions
):
result.append(
[
(c["logprobs"]["tokens"], c["logprobs"]["token_logprobs"])
for c in choices_per_prompt
]
)
return result
def openai_token_to_bytes(token: str) -> bytes:
if token.startswith("bytes:"):
return ast.literal_eval(f"b'{token[6:]}'")
else:
return token.encode("utf-8")
def openai_token_to_id(tokenizer: ClampTokenizer, token: str) -> int:
token_bytes = openai_token_to_bytes(token)
return tokenizer.utf8_token_to_id_map[token_bytes]
@dataclass
class IncrementalOpenAIGPT3(IncrementalLanguageModel[OpenAIGPT3State]):
engine: str = default_engine
use_cache: bool = True
client: GPT3Client = dataclasses.field(init=False)
echo_batch_helper: BatchingHelper[
Sequence[int], List[List[float]]
] = dataclasses.field(init=False)
next_logprobs_batch_helper: BatchingHelper[
Sequence[int], List[Dict[str, float]]
] = dataclasses.field(init=False)
completions_batch_helper: BatchingHelper[
Tuple[Sequence[int], CompletionsParams],
List[List[Tuple[List[str], List[float]]]],
] = dataclasses.field(init=False)
def __post_init__(self):
client = GPT3Client(engine=self.engine)
self.client = client
self.echo_batch_helper = BatchingHelper(
input_to_batch_maker=lambda _args: EchoBatchMaker(client),
)
self.next_logprobs_batch_helper = BatchingHelper(
input_to_batch_maker=lambda _args: NextLogprobsBatchMaker(client),
)
self.completions_batch_helper = BatchingHelper(
input_to_batch_maker=lambda args: CompletionsBatchMaker(client, args[1]),
)
if self.client.cache_client is None:
self.use_cache = False
async def __aenter__(self):
await self.client.__aenter__()
async def __aexit__(self, *args):
await self.client.__aexit__(*args)
@cached_property
def vocab_size(self): # pylint: disable=invalid-overridden-method
return self.tokenizer.vocab_size
@cached_property
def tokenizer(self) -> ClampTokenizer: # pylint: disable=invalid-overridden-method
gpt2_tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
adjust_tokenizer(self.engine, gpt2_tokenizer)
return GPT2ClampTokenizer(gpt2_tokenizer)
@cached_property
def max_length(self) -> int:
if self.engine.startswith("davinci-codex"):
return 4096
return 2048
async def execute(
self,
tokens: Sequence[int],
hidden_state: Optional[OpenAIGPT3State] = None,
drop_next_hidden_state: bool = False,
) -> Tuple[torch.Tensor, Optional[OpenAIGPT3State]]:
# In order to reduce network traffic, this function only returns the
# logprobs for the last token. It also only returns the top 100 logprobs
# due to limitations of the OpenAI API.
if hidden_state is None:
all_tokens = tuple(tokens)
else:
all_tokens = hidden_state.tokens + tuple(tokens)
if self.use_cache and self.client.cache_client:
cache_args = {
"engine": self.engine,
"prompt": all_tokens,
"max_tokens": 1,
"logprobs": 100,
}
cached = await self.client.cache_client.get(cache_args)
else:
cache_args = None
cached = None
if cached:
next_logprobs = cached["choices"][0]["logprobs"]["top_logprobs"][0]
else:
batched_next_logprobs, i = await self.next_logprobs_batch_helper.execute(
all_tokens
)
next_logprobs = batched_next_logprobs[i]
if self.use_cache and self.client.cache_client:
assert cache_args is not None
asyncio.create_task(
self.client.cache_client.upload(
cache_args,
{"choices": [{"logprobs": {"top_logprobs": [next_logprobs]}}]},
)
)
result = torch.full(
(max(1, len(tokens)), self.tokenizer.vocab_size), -float("inf")
)
for token, logprob in next_logprobs.items():
token_id = openai_token_to_id(self.tokenizer, token)
result[-1, token_id] = logprob
return (
result,
None if drop_next_hidden_state else OpenAIGPT3State(all_tokens),
)
async def next_logprobs(self, hidden_state: OpenAIGPT3State) -> torch.Tensor:
# First [0] is to get the logprobs, not the hidden state
# Second [0] is to remove the length dimension
return (await self.execute((), hidden_state, drop_next_hidden_state=True))[0][0]
async def logprob_of_completion(
self, prefix_tokens: Sequence[int], completion_tokens: Sequence[int]
) -> float:
all_tokens = tuple(prefix_tokens) + tuple(completion_tokens)
if self.use_cache and self.client.cache_client:
assert self.client.cache_client is not None
cache_args = {
"prompt": all_tokens,
"max_tokens": 0,
"echo": True,
"logprobs": 0,
}
cached = await self.client.cache_client.get(cache_args)
else:
cache_args = None
cached = None
if cached:
echoed_logprobs = cached["choices"][0]["logprobs"]["token_logprobs"]
else:
batched_echoed_logprobs, i = await self.echo_batch_helper.execute(
all_tokens
)
echoed_logprobs = batched_echoed_logprobs[i]
if self.use_cache and self.client.cache_client:
assert cache_args is not None
asyncio.create_task(
self.client.cache_client.upload(
cache_args,
{
"choices": [
{"logprobs": {"token_logprobs": echoed_logprobs}}
]
},
)
)
return sum(echoed_logprobs[len(prefix_tokens) :])
async def completions(
self,
tokens: Sequence[int],
max_tokens: int,
temperature: float = 1,
top_p: float = 1,
num_completions: int = 1,
stop: Optional[str] = None,
hidden_state: Optional[OpenAIGPT3State] = None,
) -> List[Tuple[TokensWithLogprobs, OpenAIGPT3State]]:
"""Corresponds to completions endpoint of OpenAI API.
https://beta.openai.com/docs/api-reference/completions/create"""
if hidden_state is None:
all_tokens = tuple(tokens)
else:
all_tokens = hidden_state.tokens + tuple(tokens)
all_completions, i = await self.completions_batch_helper.execute(
(
all_tokens,
CompletionsParams(
max_tokens, temperature, top_p, num_completions, stop
),
)
)
completions = all_completions[i]
result: List[Tuple[TokensWithLogprobs, OpenAIGPT3State]] = []
for completion_tokens, logprobs in completions:
truncated_token_ids = []
prev_was_stop = False
for t in completion_tokens:
if prev_was_stop:
break
prev_was_stop = t == stop
truncated_token_ids.append(openai_token_to_id(self.tokenizer, t))
result.append(
(
TokensWithLogprobs(
torch.tensor(truncated_token_ids),
torch.tensor(logprobs[: len(truncated_token_ids)]),
),
OpenAIGPT3State(all_tokens + tuple(truncated_token_ids[:-1])),
)
)
return result
@dataclass
class RequestInfo:
num_prompts: int
prompts: List[str]
success: Optional[bool] = dataclasses.field(init=False, default=None)
start_time: float = dataclasses.field(default_factory=time.time)
end_time: Optional[float] = dataclasses.field(init=False, default=None)
@staticmethod
def create(args_without_engine: Dict[str, Any]) -> "RequestInfo":
prompt = args_without_engine["prompt"]
if isinstance(prompt, str):
return RequestInfo(1, [prompt])
else:
return RequestInfo(len(prompt), prompt)
def finish(self, success: bool) -> None:
self.end_time = time.time()
self.success = success
class Instrumentation:
BUFFER_SIZE = 100
AUTOMATIC_PRINTING_ENABLED = True
PRINT_PROMPT_CONTENTS = False
currently_pending_requests = 0
dropped_requests = 0
last_n_requests: Deque[RequestInfo] = collections.deque()
requests_lock: threading.Lock = threading.Lock()
last_printed_timestamp: float = time.time()
@staticmethod
def record_request(ri: RequestInfo) -> None:
with Instrumentation.requests_lock:
if len(Instrumentation.last_n_requests) == Instrumentation.BUFFER_SIZE:
Instrumentation.last_n_requests.popleft()
Instrumentation.dropped_requests += 1
Instrumentation.last_n_requests.append(ri)
@staticmethod
def print_last_requests():
Instrumentation.last_printed_timestamp = time.time()
with Instrumentation.requests_lock:
last_n_requests = list(Instrumentation.last_n_requests)
dropped_requests = Instrumentation.dropped_requests
Instrumentation.last_n_requests = collections.deque(
[ri for ri in last_n_requests if not ri.end_time]
)
Instrumentation.dropped_requests = 0
if not last_n_requests:
return
if dropped_requests:
dropped_str = f" ({dropped_requests} not shown)"
else:
dropped_str = ""
lines: List[str] = []
for ri in last_n_requests:
line_parts: List[str] = [
f"- {ri.num_prompts} prompts, "
f"started at {datetime.datetime.fromtimestamp(ri.start_time).strftime('%H:%M:%S.%f')}, "
]
if ri.end_time:
line_parts += [
f"elapsed {ri.end_time - ri.start_time:.3f}s, "
f"success {ri.success}"
]
else:
line_parts += ["pending"]
lines.append("".join(line_parts))
if Instrumentation.PRINT_PROMPT_CONTENTS:
for prompt in ri.prompts:
lines.append(f" * {prompt!r}")
print(
"*** GPT-3 API request report ***\n"
f"[{datetime.datetime.now().strftime('%H:%M:%S.%f')}] {len(last_n_requests)} requests since last report{dropped_str}:\n"
+ "\n".join(lines)
+ "\n********************************",
file=sys.stderr,
)
@staticmethod
def print_loop():
while Instrumentation.AUTOMATIC_PRINTING_ENABLED:
time.sleep(1)
if time.time() > Instrumentation.last_printed_timestamp + 10:
Instrumentation.print_last_requests()
if Instrumentation.AUTOMATIC_PRINTING_ENABLED:
threading.Thread(target=Instrumentation.print_loop, daemon=True).start()
| [
"False"
] |
2024-01-10 | MaxGGx/GPT-QueryBuilder-Astroinformatics | API~chatAPI~api~f_aux.py | import environ
import openai
from .models import *
env = environ.Env()
environ.Env.read_env()
openai.api_key = env("OPENAI_API_KEY")
print("\n\nAPI KEY CONFIGURADA: ", env('OPENAI_API_KEY'),"\n\n")
system_message = 'Eres un sistema de apoyo para astrónomos en la búsqueda de archivos astronómicos en el IVOA, debes chatear con el astrónomo y averiguar lo que necesita para luego entregar la consulta que deberá hacer con la librería PyVO de python y resolver sus dudas al respecto, el usuario conoce y tiene instalada la libreria de PyVO y Astropy por lo que solo quiere la consulta que debe realizar a partir de algún dato del Registry, cuando el usuario indique el comando "QUERY" entregarás la consulta para hacer en PyVO que obtuviste al chatear con el astrónomo'
def processChat(chat_anterior):
messages = [
{"role": "system", "content":system_message},
]
for mensaje in chat_anterior:
if(mensaje.usuario == "USER"):
messages.append({"role":"user", "content":mensaje.mensaje})
elif(mensaje.usuario =='GPT'):
messages.append({"role":"assistant", "content":mensaje.mensaje})
return messages
def makeGPTquery(chat,u_message):
chat_anterior = Mensajes.objects.filter(id_chat=chat)
if(len(chat_anterior) > 0):
messages = processChat(chat_anterior)
else:
messages=[
{"role": "system", "content":system_message},
{"role":"user", "content":u_message},
]
response = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
temperature = 1,
messages= messages
)
nMessage = Mensajes()
nMessage.id_chat = chat
nMessage.usuario = 'GPT'
nMessage.mensaje = response['choices'][0]['message']['content']
nMessage.save()
return nMessage | [
"Eres un sistema de apoyo para astrónomos en la búsqueda de archivos astronómicos en el IVOA, debes chatear con el astrónomo y averiguar lo que necesita para luego entregar la consulta que deberá hacer con la librería PyVO de python y resolver sus dudas al respecto, el usuario conoce y tiene instalada la libreria de PyVO y Astropy por lo que solo quiere la consulta que debe realizar a partir de algún dato del Registry, cuando el usuario indique el comando \"QUERY\" entregarás la consulta para hacer en PyVO que obtuviste al chatear con el astrónomo"
] |
2024-01-10 | bschleter/legalredteambrandon | player.py | from typing import Any, List, Optional, Union
import os
import openai
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import Callbacks
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import SystemMessage, HumanMessage
from agent_sim.prompts_library import (
INPUT_PROMPT,
REFLECT_USER_PROMPT,
REFLECT_SYSTEM_PROMPT,
)
class Player:
"""
A class used to represent a player.
Attributes:
memory (List[str]): A list storing the messages.
memory_length (int): The length of the memory in characters.
respond_model (Model): A model used to process messages.
reflect_model (Model): A model used to summarize memories
inception_prompt (str): A string used as the initial prompt for the model.
"""
def __init__(
self,
respond_model: BaseLanguageModel,
reflect_model: BaseLanguageModel,
inception_prompt: str,
role_name: str,
max_context_length: int = 1000,
) -> None:
"""
Constructs the necessary attributes for the player object.
"""
self.respond_model = respond_model
self.reflect_model = reflect_model
self.inception_prompt = inception_prompt
self.role_name = role_name
self.max_context_length = max_context_length
self.memory: List[str] = []
self.memory_length: int = 0
def respond(
self, input_role: str, input_message: str, remember: bool = True
) -> Union[str, Any]:
"""
Responds to a single message based on an input and the previous memory.
memory could be turned off
"""
human_prompt = INPUT_PROMPT.format(
role_name=self.role_name,
history="\n".join(self.memory),
message=input_message,
input_role=input_role,
)
prompt = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(self.inception_prompt),
HumanMessagePromptTemplate.from_template(human_prompt),
]
).format_messages(memory=self.memory)
response = self.respond_model.predict_messages(
prompt, tags=[self.role_name, "respond"]
).content
if remember:
self.add_to_memory(input_role, input_message)
self.add_to_memory(self.role_name, response)
return response
def add_to_memory(self, role: str, message: str) -> None:
"""
Adds a message to the memory.
This is not automatically done within respond because the messages could be manipulated
before being added to the memory.
"""
message = f"{role}: {message}"
self.memory.append(message)
self.memory_length += len(message)
# Summarize messages if they get too long
if self.memory_length >= self.max_context_length:
self.reflect()
def reflect(self) -> None:
"""
Reflects and summarizes memories so that it fits into the context length.
If the memory_length is too long, it selects a number of messages from the memory,
uses the model to summarize them, and replaces them in the memory with the summary.
"""
# Process the top 10 messages or however much is available,
# always leave at least two messages for immediate context
num_messages = min(10, len(self.memory) - 2)
messages_to_process = "\n".join(self.memory[:num_messages])
processed_messages = self.reflect_model.predict_messages(
[
SystemMessage(
content=REFLECT_SYSTEM_PROMPT.format(role_name=self.role_name)
),
HumanMessage(
content=REFLECT_USER_PROMPT.format(history=messages_to_process)
),
],
tags=[self.role_name, "reflect"],
).content
# Replace the messages in memory with the processed output
self.memory = [processed_messages] + self.memory[num_messages:]
# Recalculate memory_length
self.memory_length = sum(len(message) for message in self.memory)
| [
"\n"
] |
2024-01-10 | bcdnlp/FAITHSCORE | src~faithscore~framework.py |
import openai
import time
from tqdm import tqdm
import argparse
import os
import re
from modelscope.utils.constant import Tasks
from modelscope.pipelines import pipeline
from modelscope.preprocessors.multi_modal import OfaPreprocessor
from faithscore.llava15 import LLaVA
from faithscore.llama_pre import load_llama, stage1_llama
from faithscore.utils import llava15, ofa
import nltk
path = os.path.dirname(__file__)
cur_path = os.path.dirname(path)
cur_path = os.path.join(cur_path, "faithscore")
class FaithScore():
def __init__(self, vem_type, api_key=None, llava_path=None, tokenzier_path=None, use_llama=False, llama_path=None):
openai.api_key = api_key
max_seq_len = 500
max_batch_size = 1
self.use_llama = use_llama
# self.vem_path = model_path
self.model_type = vem_type ### [ofa_ve, ofa, mplug, blip2, llava]
model_list = ["ofa_ve", "ofa", "mplug", "blip2", "llava"]
if vem_type not in model_list:
print(f"Error: the model type {vem_type} not in {str(model_list)}")
exit()
self.llava_path = llava_path
if use_llama:
if llava_path and tokenzier_path:
self.llama, self.tokenizer = load_llama(llama_path)
else:
print(f"Error: please input the model path for llama")
exit()
def call_openai(self, pts):
while True:
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user",
"content": pts},
],
temperature=0.2, # TODO: figure out which temperature is best for evaluation
)
return response['choices'][0]['message']['content']
except Exception as e:
print(e)
print("Continue......")
time.sleep(10)
def stage1(self, answers):
with open(os.path.join(cur_path, "prompts/prompt_label_des_ana.txt"), "r") as f:
prompt_label_des_ana = f.read() + "\n\n"
des_ana = []
for id in tqdm(range(len(answers))):
if not self.use_llama:
pts = prompt_label_des_ana + answers[id].replace("\n", " ") + "\n" + "Labeled text: "
des_ana.append(self.call_openai(pts).replace("\n", ""))
else:
pts = stage1_llama(self.llama, self.tokenizer, answers[id].replace("\n", " "))
# print(pts)
des_ana.append(pts)
# exit()
return des_ana
def stage2(self, labeld_sub_sen, ):
all_texts = []
for ss in labeld_sub_sen:
desc = ""
pos_des = [substr.start() for substr in re.finditer("[D]", ss)]
pos_ana = [substr.start() for substr in re.finditer("[A]", ss)]
pos_seg = pos_des + pos_ana
pos_seg.sort()
for i in range(len(pos_seg)):
if pos_seg[i] in pos_des:
if i == 0:
desc += ss[:pos_seg[i] - 1]
else:
desc += ss[pos_seg[i - 1] + 3:pos_seg[i] - 1]
all_texts.append(desc.replace("\n", " "))
with open(os.path.join(cur_path, "prompts/prompt_de_atomic.txt"), 'r') as f:
prompt_de_atomic = f.read()
Entities = []
Relations = []
Colors = []
Counting = []
Others = []
results = []
nons = "Entities:\nRelations:\nColors:\nCounting:\nOther attributes:"
for ans in tqdm(all_texts):
ans = ans.replace("\n", "")
pts = prompt_de_atomic + "\nAnswer: " + ans
if ans == "":
results.append(nons)
continue
response = self.call_openai(pts)
if "Entities" in response:
results.append(response)
else:
results.append(nons)
for facts in results:
lines = facts.split("\n")
for line in lines:
if line[:9] == "Entities:":
entity = line.strip().replace("Entities: ", "").split(". ")
if line.strip() == "Entities:":
entity = []
Entities.append(entity)
if line[:10] == "Relations:":
# print(line.strip().replace("Relations: ","").replace("],","]],").split("], "))
relation = line.strip().replace("Relations: ", "").split(". ")
if line.strip() == "Relations:":
relation = []
Relations.append(relation)
if line[:7] == "Colors:":
color = line.strip().replace("Colors: ", "").split(". ")
if line.strip() == "Colors:":
color = []
Colors.append(color)
if line[:9] == "Counting:":
count = line.strip().replace("Counting: ", "").split(". ")
if line.strip() == "Counting:":
count = []
Counting.append(count)
if line[:17] == "Other attributes:":
other = line.strip().replace("Other attributes: ", "").split(". ")
if line.strip() == "Other attributes:":
other = []
Others.append(other)
hallucinations = [Entities[i] + Relations[i] + Colors[i] + Counting[i] + Others[i] for i in range(len(Entities))]
# print(hallucinations)
return hallucinations, Entities, Relations, Colors, Counting, Others
def stage3(self, atomic_facts, images, img_path=None):
# ofa_pipe = pipeline(Tasks.visual_entailment, model='damo/ofa_visual-entailment_snli-ve_large_en')
# model = pipeline(Tasks.visual_entailment, model=self.vem_path)
if self.model_type == "ofa_ve":
model = pipeline(Tasks.visual_entailment, model='damo/ofa_visual-entailment_snli-ve_large_en')
if self.model_type == "ofa":
preprocessor = OfaPreprocessor(model_dir="damo/ofa_visual-question-answering_pretrain_large_en")
model = pipeline(
Tasks.visual_question_answering,
model="damo/ofa_visual-question-answering_pretrain_large_en",
model_revision='v1.0.1',
preprocessor=preprocessor)
if self.model_type == "llava":
if not self.llava_path:
print("Please input path for LLaVA model.")
exit()
model = LLaVA()
# if self.model_type == "mplug":
# output = mplug(image, prompt, model)
# if self.model_type == "blip2":
# output = blip_2(image, prompt, model, vis_processors_blip_2)
fact_scores = []
for id, elements in enumerate(tqdm(atomic_facts)):
fact_score = []
if img_path:
image = os.path.join(img_path, images[id])
else:
image = images[id]
for element in elements:
# input = {'image': image, 'text': element}
prompt = 'Statement: ' + element + ' Is this statement is right according to the image? Please answer yes or no.'
if self.model_type == "ofa_ve":
output = ofa(True, model, element, image)
if self.model_type == "ofa":
output = ofa(False, model, prompt, image)
if self.model_type == "llava":
output = llava15(image, prompt, model)
# print(output)
# if self.model_type == "mplug":
# output = mplug(image, prompt, model)
# if self.model_type == "blip2":
# output = blip_2(image, prompt, model, vis_processors_blip_2)
# output = ofa_pipe(input)[0]
if "yes" in output.lower():
fact_score.append(1)
else:
fact_score.append(0)
# if output.lower() == "yes" or output== "Yes":
# fact_score.append(1)
# else:
# fact_score.append(0)
fact_scores.append(fact_score)
# result.append(output[OutputKeys.LABELS])
# results.append({"image": images_id[id], "facts": elements, "result": str(result)})
# checking_results.append(result)
instance_score = [sum(ii) / len(ii) if len(ii) > 0 else 0 for ii in fact_scores]
# print("Overall score: ", sum(instance_score) / len(instance_score))
return sum(instance_score) / len(instance_score), fact_scores
'''
answers: a list of strings, each element in this list is an answer
'''
def faithscore(self, answers, images):
## Stage 1: Sub-setence Identification
labeld_sub_sen = self.stage1(answers)
### Stage 2: Atomic Fact Generation
atomic_facts, Entities, Relations, Colors, Counting, Others = self.stage2(labeld_sub_sen)
### Stage 3: Verification
# print(atomic_facts)
score, fact_scores = self.stage3(atomic_facts, images)
sentence_score = self.sentence_faithscore(Entities, Relations, Colors, Counting, Others, self.labeled_sub(labeld_sub_sen), fact_scores)
return score, sentence_score
def sentence_faithscore(self, Entities, Relations, Colors, Counting, Others, all_texts, fact_scores):
Entities_recog = []
for ents in Entities:
entities = []
for ent in ents:
ent4sen = []
sentence = nltk.sent_tokenize(ent)
tags = nltk.pos_tag(nltk.word_tokenize(sentence[0]))
for tag in tags:
if tag[1] in ['NN', 'NNS', 'JJ', 'NNP', 'VBG', 'JJR', 'NNPS', 'RB', 'DT']:
# print(tag)
ent4sen.append(tag[0])
# tags.append(chunk.label())
if len(ent4sen) < 1:
print(tags)
exit()
entities.append(ent4sen[-1])
# print(ents)
# print(entities)
if len(entities) != len(ents):
print("error")
exit()
Entities_recog.append(entities)
entity_scores = []
relation_scores = []
color_scores = []
count_scores = []
other_scores = []
for i in range(len(fact_scores)):
entity_scores.append(fact_scores[i][:len(Entities[i])])
relation_scores.append(fact_scores[i][len(Entities[i]): len(Entities[i]) + len(Relations[i])])
color_scores.append(fact_scores[i][
len(Entities[i]) + len(Relations[i]): len(Entities[i]) + len(Relations[i]) + len(
Colors[i])])
count_scores.append(fact_scores[i][
len(Entities[i]) + len(Relations[i]) + len(Colors[i]): len(Entities[i]) + len(
Relations[i]) + len(Colors[i]) + len(Counting[i])])
other_scores.append(
fact_scores[i][len(Entities[i]) + len(Relations[i]) + len(Colors[i]) + len(Counting[i]):])
sentence_scores = []
for id1, ins in enumerate(all_texts):
sentence_score = []
for id2, sub_sen in enumerate(all_texts[id1]):
flag = True
# print(Entities_recog)
# print(entity_scores)
for id3, ee in enumerate(Entities_recog[id1]):
if ee in sub_sen and entity_scores[id1][id3] != 1:
flag = False
for id4, rel in enumerate(relation_scores[id1]):
if ee in sub_sen and ee in Relations[id1][id4] and rel != 1:
flag = False
for id4, rel in enumerate(color_scores[id1]):
if ee in sub_sen and ee in Colors[id1][id4] and rel != 1:
flag = False
for id4, rel in enumerate(count_scores[id1]):
if ee in sub_sen and ee in Counting[id1][id4] and rel != 1:
flag = False
for id4, rel in enumerate(other_scores[id1]):
if ee in sub_sen and ee in Others[id1][id4] and rel != 1:
flag = False
sentence_score.append(flag)
sentence_scores.append(sentence_score)
score4sen = [sum(ss)/len(ss) if len(ss) > 0 else 1 for ss in sentence_scores]
sentence_level_score = score4sen
# print(score4sen)
# print(sum(score4sen)/len(score4sen))
return sum(score4sen)/len(score4sen)
def labeled_sub(self, des_ana):
all_texts = []
for ss in des_ana:
desc = []
pos_des = [substr.start() for substr in re.finditer("[D]", ss)]
pos_ana = [substr.start() for substr in re.finditer("[A]", ss)]
pos_seg = pos_des + pos_ana
pos_seg.sort()
for i in range(len(pos_seg)):
if pos_seg[i] in pos_des:
if i == 0:
desc.append(ss[:pos_seg[i] - 1])
else:
desc.append(ss[pos_seg[i - 1] + 3:pos_seg[i] - 1])
all_texts.append(desc)
return all_texts | [
"\n\n",
"Statement: PLACEHOLDER Is this statement is right according to the image? Please answer yes or no."
] |
2024-01-10 | HUIXIN-TW/CleanGraph | server~llm~gpt_model.py | import os
import openai
import json
from dotenv import load_dotenv
from datetime import datetime
from pathlib import Path
import logging
logging.basicConfig(level=logging.INFO)
class GPTModel:
def __init__(
self,
model_name: str,
file_path: str,
initial_prompt_chunk_path: str,
output_file_path: str,
):
"""
Initialize the GPTModel class.
Set the model name and the default file path.
"""
self._load_env_variables()
self.model_name = model_name
self.default_file_path = file_path
self.output_file_path = output_file_path
self.initial_prompt_chunk = initial_prompt_chunk_path
logging.info(f"Model: Set to {model_name} with chuncked triplets")
def _load_env_variables(self):
"""
Helper Method: Load the environment variables from the .env file.
Exit if the OPENAI_API_KEY is not set.
"""
load_dotenv()
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
logging.error("API_KEY not found. Please set it in the .env file.")
exit(1)
openai.api_key = api_key
def _get_file_name(self) -> str:
"""
Helper Method: Get the file name from the user input.
Return the full path as a string.
"""
# while True:
# choosen_file = input(
# f"Which file do you want to use under {self.default_file_path}? "
# )
# if choosen_file:
# return f"{self.default_file_path}{choosen_file}"
# print("Please enter a valid file name")
return f"{self.default_file_path}"
def _read_from_file(self, filename: str) -> str:
"""
Helper Method: Read content from a file
Rreturn the content as a string.
"""
return Path(filename).read_text(encoding="utf-8")
def _generate_filename(self, prompt_name: str, output_file_path: str) -> str:
"""
Helper Method: Generate a filename based on the current datetime.
Return the filename as a string.
"""
current_time = datetime.now().strftime("%Y%m%d-%H%M%S")
# return f"../prompts/gpt_outputs/{prompt_name}-output-{current_time}.json"
return f"{output_file_path}/{prompt_name}-output-{current_time}.json"
def _save_to_file(self, content: str, filename: str) -> str:
"""
Helper Method: Save the provided content to a file.
Return the filename as a string.
"""
output_path = Path(filename)
output_path.parent.mkdir(parents=True, exist_ok=True)
output_path.write_text(json.dumps(content, indent=2))
return filename
def _parse_chunked_result(self, chunked_result, recursion_depth=0):
"""
Helper Method: Parse the chunked result into a list of triplets.
Return the combined results as a list.
"""
combined_results = []
try:
chunked_result_list = chunked_result.split("\n\n")
except Exception as e:
logging.error(f"Failed to split chunked result into list. {e}")
return combined_results
for chunk in chunked_result_list:
triple = self.check_json_format(chunk)
if triple:
combined_results.extend(triple)
return combined_results
def check_json_format(self, chunk, retry=True):
"""
Validates, parses, and checks the JSON data format.
"""
valid_items = []
try:
json_object = json.loads(chunk)
# Function to validate the item format
def validate_item(item):
if all(
key in item
for key in ("head", "head_type", "relation", "tail", "tail_type")
):
return True
return False
if isinstance(json_object, list):
for item in json_object:
if validate_item(item):
valid_items.append(item)
else:
if validate_item(json_object):
valid_items.append(json_object)
except json.JSONDecodeError as e:
if retry:
start_index = chunk.find("[{")
end_index = chunk.rfind("}")
if start_index != -1 and end_index != -1:
json_data = chunk[start_index : end_index + 1] + "]"
print(f"Failed to parse JSON chunk: {chunk}. Error: {e}")
print("Clear Json format")
print(json_data)
return self.check_json_format(json_data, retry=False)
return []
return valid_items
def get_triplets_chunk(self, user_message: str, chunk_size: int = 1) -> str:
"""
Get triplets from a given user message in chunks.
Return the chunked results as a string.
"""
paragraphs = user_message.split("\n\n")
chunks = self._chunk_paragraphs(paragraphs, chunk_size)
return self._get_chunk_responses(chunks)
def _chunk_paragraphs(self, paragraphs: list, chunk_size: int) -> list:
"""
Hepler Method: Split paragraphs into chunks based on the chunk size.
Return the chunks as a list.
"""
chunks, current_chunk, current_length = [], [], 0
for para in paragraphs:
if current_length + len(para.split()) <= chunk_size:
current_chunk.append(para)
current_length += len(para.split())
else:
chunks.append("\n\n".join(current_chunk))
current_chunk = [para]
current_length = len(para.split())
if current_chunk:
chunks.append("\n\n".join(current_chunk))
return chunks
def _get_chunk_responses(self, chunks: list) -> str:
"""
Helper Method: Get responses for each chunk from the API.
Return the chunked results as a string.
"""
all_results = []
for chunk in chunks:
logging.info(
f"Processing Chunk: {chunk[:30]}..."
) # Logging first 30 characters of the chunk
completion = openai.ChatCompletion.create(
model=self.model_name,
messages=[
{"role": "system", "content": self.initial_prompt_chunk},
{"role": "user", "content": chunk},
],
temperature=0.2,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0,
)
all_results.append(completion.choices[0].message["content"])
return "\n\n".join(all_results)
def main(self):
input_file = self._read_from_file(self._get_file_name())
print(f"Get file content from {self._get_file_name()}")
chunked_result = self.get_triplets_chunk(input_file)
combined_results = self._parse_chunked_result(chunked_result)
print(json.dumps(combined_results, indent=2))
output_file = self._generate_filename(
"gpt-3.5-turb-chunk", self.output_file_path
)
chunked_result_file_name = self._save_to_file(combined_results, output_file)
print(f"Saved chunked results to {chunked_result_file_name}")
if __name__ == "__main__":
model_name = "gpt-3.5-turbo"
file_path = "../prompts/gpt_inputs/CITS1003.txt"
output_file_path = "../prompts/gpt_outputs"
initial_prompt_chunk_path = Path("initial_prompts.txt").read_text().strip()
if input("Do you want to use fine tuned model? (y/n): ") == "y":
model_name = input("Enter the fine tuned model name: ")
gpt = GPTModel(model_name, file_path, initial_prompt_chunk_path, output_file_path)
gpt.main()
| [
"initial_prompts.txt"
] |
2024-01-10 | jmsktm/gpt_index | tests~indices~vector_store~test_base.py | """Test Faiss index."""
import sys
from typing import Any, Dict, List, Tuple
from unittest.mock import MagicMock, patch
import numpy as np
import pytest
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.vector_store.faiss import GPTFaissIndex
from gpt_index.indices.vector_store.simple import GPTSimpleVectorIndex
from gpt_index.readers.schema.base import Document
from tests.mock_utils.mock_decorator import patch_common
from tests.mock_utils.mock_prompts import MOCK_REFINE_PROMPT, MOCK_TEXT_QA_PROMPT
@pytest.fixture
def struct_kwargs() -> Tuple[Dict, Dict]:
"""Index kwargs."""
index_kwargs = {
"text_qa_template": MOCK_TEXT_QA_PROMPT,
}
query_kwargs = {
"text_qa_template": MOCK_TEXT_QA_PROMPT,
"refine_template": MOCK_REFINE_PROMPT,
"similarity_top_k": 1,
}
return index_kwargs, query_kwargs
@pytest.fixture
def documents() -> List[Document]:
"""Get documents."""
# NOTE: one document for now
doc_text = (
"Hello world.\n"
"This is a test.\n"
"This is another test.\n"
"This is a test v2."
)
return [Document(doc_text)]
class MockFaissIndex:
"""Mock Faiss index."""
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Initialize params."""
self._index: Dict[int, np.ndarray] = {}
@property
def ntotal(self) -> int:
"""Get ntotal."""
return len(self._index)
def add(self, vecs: np.ndarray) -> None:
"""Add vectors to index."""
for vec in vecs:
new_id = len(self._index)
self._index[new_id] = vec
def reset(self) -> None:
"""Reset index."""
self._index = {}
def search(self, vec: np.ndarray, k: int) -> Tuple[np.ndarray, np.ndarray]:
"""Search index."""
# assume query vec is of the form 1 x k
# index_mat is n x k
index_mat = np.array(list(self._index.values()))
# compute distances
distances = np.linalg.norm(index_mat - vec, axis=1)
indices = np.argsort(distances)[:k]
sorted_distances = distances[indices][:k]
# return distances and indices
return sorted_distances[np.newaxis, :], indices[np.newaxis, :]
def mock_get_text_embedding(text: str) -> List[float]:
"""Mock get text embedding."""
# assume dimensions are 5
if text == "Hello world.":
return [1, 0, 0, 0, 0]
elif text == "This is a test.":
return [0, 1, 0, 0, 0]
elif text == "This is another test.":
return [0, 0, 1, 0, 0]
elif text == "This is a test v2.":
return [0, 0, 0, 1, 0]
elif text == "This is a test v3.":
return [0, 0, 0, 0, 1]
elif text == "This is bar test.":
return [0, 0, 1, 0, 0]
elif text == "Hello world backup.":
# this is used when "Hello world." is deleted.
return [1, 0, 0, 0, 0]
else:
raise ValueError("Invalid text for `mock_get_text_embedding`.")
def mock_get_query_embedding(query: str) -> List[float]:
"""Mock get query embedding."""
return [0, 0, 1, 0, 0]
@patch_common
@patch.object(
OpenAIEmbedding, "get_text_embedding", side_effect=mock_get_text_embedding
)
def test_build_faiss(
_mock_embed: Any,
_mock_init: Any,
_mock_predict: Any,
_mock_total_tokens_used: Any,
_mock_splitter: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test build GPTFaissIndex."""
# NOTE: mock faiss import
sys.modules["faiss"] = MagicMock()
# NOTE: mock faiss index
faiss_index = MockFaissIndex()
index_kwargs, query_kwargs = struct_kwargs
index = GPTFaissIndex(documents=documents, faiss_index=faiss_index, **index_kwargs)
assert len(index.index_struct.nodes_dict) == 4
# check contents of nodes
assert index.index_struct.get_node("0").text == "Hello world."
assert index.index_struct.get_node("1").text == "This is a test."
assert index.index_struct.get_node("2").text == "This is another test."
assert index.index_struct.get_node("3").text == "This is a test v2."
@patch_common
@patch.object(
OpenAIEmbedding, "get_text_embedding", side_effect=mock_get_text_embedding
)
def test_faiss_insert(
_mock_embed: Any,
_mock_init: Any,
_mock_predict: Any,
_mock_total_tokens_used: Any,
_mock_splitter: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test build GPTFaissIndex."""
# NOTE: mock faiss import
sys.modules["faiss"] = MagicMock()
# NOTE: mock faiss index
faiss_index = MockFaissIndex()
index_kwargs, query_kwargs = struct_kwargs
index = GPTFaissIndex(documents=documents, faiss_index=faiss_index, **index_kwargs)
# insert into index
index.insert(Document(text="This is a test v3."))
# check contenst of nodes
assert index.index_struct.get_node("3").text == "This is a test v2."
assert index.index_struct.get_node("4").text == "This is a test v3."
@patch_common
@patch.object(
OpenAIEmbedding, "get_text_embedding", side_effect=mock_get_text_embedding
)
@patch.object(
OpenAIEmbedding, "get_query_embedding", side_effect=mock_get_query_embedding
)
def test_faiss_query(
_mock_query_embed: Any,
_mock_text_embed: Any,
_mock_init: Any,
_mock_predict: Any,
_mock_total_tokens_used: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test embedding query."""
# NOTE: mock faiss import
sys.modules["faiss"] = MagicMock()
# NOTE: mock faiss index
faiss_index = MockFaissIndex()
index_kwargs, query_kwargs = struct_kwargs
index = GPTFaissIndex(documents, faiss_index=faiss_index, **index_kwargs)
# test embedding query
query_str = "What is?"
response = index.query(query_str, **query_kwargs)
assert str(response) == ("What is?:This is another test.")
@patch_common
@patch.object(
OpenAIEmbedding, "get_text_embedding", side_effect=mock_get_text_embedding
)
def test_build_simple(
_mock_embed: Any,
_mock_init: Any,
_mock_predict: Any,
_mock_total_tokens_used: Any,
_mock_splitter: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test build GPTFaissIndex."""
index_kwargs, query_kwargs = struct_kwargs
index = GPTSimpleVectorIndex(documents=documents, **index_kwargs)
assert len(index.index_struct.nodes_dict) == 4
# check contents of nodes
actual_node_tups = [
("Hello world.", [1, 0, 0, 0, 0]),
("This is a test.", [0, 1, 0, 0, 0]),
("This is another test.", [0, 0, 1, 0, 0]),
("This is a test v2.", [0, 0, 0, 1, 0]),
]
for text_id in index.index_struct.id_map.keys():
node = index.index_struct.get_node(text_id)
embedding = index.index_struct.embedding_dict[text_id]
assert (node.text, embedding) in actual_node_tups
@patch_common
@patch.object(
OpenAIEmbedding, "get_text_embedding", side_effect=mock_get_text_embedding
)
def test_simple_insert(
_mock_embed: Any,
_mock_init: Any,
_mock_predict: Any,
_mock_total_tokens_used: Any,
_mock_splitter: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test build GPTFaissIndex."""
index_kwargs, query_kwargs = struct_kwargs
index = GPTSimpleVectorIndex(documents=documents, **index_kwargs)
# insert into index
index.insert(Document(text="This is a test v3."))
# check contenst of nodes
actual_node_tups = [
("Hello world.", [1, 0, 0, 0, 0]),
("This is a test.", [0, 1, 0, 0, 0]),
("This is another test.", [0, 0, 1, 0, 0]),
("This is a test v2.", [0, 0, 0, 1, 0]),
("This is a test v3.", [0, 0, 0, 0, 1]),
]
for text_id in index.index_struct.id_map.keys():
node = index.index_struct.get_node(text_id)
embedding = index.index_struct.embedding_dict[text_id]
assert (node.text, embedding) in actual_node_tups
@patch_common
@patch.object(
OpenAIEmbedding, "get_text_embedding", side_effect=mock_get_text_embedding
)
def test_simple_delete(
_mock_embed: Any,
_mock_init: Any,
_mock_predict: Any,
_mock_total_tokens_used: Any,
_mock_splitter: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test build GPTFaissIndex."""
index_kwargs, query_kwargs = struct_kwargs
new_documents = [
Document("Hello world.", doc_id="test_id_0"),
Document("This is a test.", doc_id="test_id_1"),
Document("This is another test.", doc_id="test_id_2"),
Document("This is a test v2.", doc_id="test_id_3"),
]
index = GPTSimpleVectorIndex(documents=new_documents, **index_kwargs)
# test delete
index.delete("test_id_0")
assert len(index.index_struct.nodes_dict) == 3
assert len(index.index_struct.id_map) == 3
actual_node_tups = [
("This is a test.", [0, 1, 0, 0, 0], "test_id_1"),
("This is another test.", [0, 0, 1, 0, 0], "test_id_2"),
("This is a test v2.", [0, 0, 0, 1, 0], "test_id_3"),
]
for text_id in index.index_struct.id_map.keys():
node = index.index_struct.get_node(text_id)
embedding = index.index_struct.embedding_dict[text_id]
assert (node.text, embedding, node.ref_doc_id) in actual_node_tups
# test insert
index.insert(Document("Hello world backup.", doc_id="test_id_0"))
assert len(index.index_struct.nodes_dict) == 4
assert len(index.index_struct.id_map) == 4
actual_node_tups = [
("Hello world backup.", [1, 0, 0, 0, 0], "test_id_0"),
("This is a test.", [0, 1, 0, 0, 0], "test_id_1"),
("This is another test.", [0, 0, 1, 0, 0], "test_id_2"),
("This is a test v2.", [0, 0, 0, 1, 0], "test_id_3"),
]
for text_id in index.index_struct.id_map.keys():
node = index.index_struct.get_node(text_id)
embedding = index.index_struct.embedding_dict[text_id]
assert (node.text, embedding, node.ref_doc_id) in actual_node_tups
@patch_common
@patch.object(
OpenAIEmbedding, "get_text_embedding", side_effect=mock_get_text_embedding
)
@patch.object(
OpenAIEmbedding, "get_query_embedding", side_effect=mock_get_query_embedding
)
def test_simple_query(
_mock_query_embed: Any,
_mock_text_embed: Any,
_mock_init: Any,
_mock_predict: Any,
_mock_total_tokens_used: Any,
_mock_split_text: Any,
documents: List[Document],
struct_kwargs: Dict,
) -> None:
"""Test embedding query."""
index_kwargs, query_kwargs = struct_kwargs
index = GPTSimpleVectorIndex(documents, **index_kwargs)
# test embedding query
query_str = "What is?"
response = index.query(query_str, **query_kwargs)
assert str(response) == ("What is?:This is another test.")
# test with keyword filter (required)
query_kwargs_copy = query_kwargs.copy()
query_kwargs_copy["similarity_top_k"] = 5
response = index.query(query_str, **query_kwargs_copy, required_keywords=["Hello"])
assert str(response) == ("What is?:Hello world.")
# test with keyword filter (exclude)
# insert into index
index.insert(Document(text="This is bar test."))
query_kwargs_copy = query_kwargs.copy()
query_kwargs_copy["similarity_top_k"] = 2
response = index.query(query_str, **query_kwargs_copy, exclude_keywords=["another"])
assert str(response) == ("What is?:This is bar test.")
@patch_common
@patch.object(
OpenAIEmbedding, "_get_text_embedding", side_effect=mock_get_text_embedding
)
@patch.object(
OpenAIEmbedding, "_get_query_embedding", side_effect=mock_get_query_embedding
)
def test_query_and_count_tokens(
_mock_query_embed: Any,
_mock_text_embed: Any,
_mock_init: Any,
_mock_predict: Any,
_mock_total_tokens_used: Any,
_mock_split_text: Any,
struct_kwargs: Dict,
) -> None:
"""Test embedding query."""
doc_text = (
"Hello world.\n"
"This is a test.\n"
"This is another test.\n"
"This is a test v2."
)
document = Document(doc_text)
index_kwargs, query_kwargs = struct_kwargs
index = GPTSimpleVectorIndex([document], **index_kwargs)
assert index.embed_model.total_tokens_used == 20
# test embedding query
query_str = "What is?"
index.query(query_str, **query_kwargs)
assert index.embed_model.last_token_usage == 3
| [] |
2024-01-10 | jmsktm/gpt_index | gpt_index~langchain_helpers~sql_wrapper.py | """SQL wrapper around SQLDatabase in langchain."""
from typing import Any, Dict, List, Tuple
from langchain.sql_database import SQLDatabase as LangchainSQLDatabase
from sqlalchemy import MetaData, insert
from sqlalchemy.engine import Engine
class SQLDatabase(LangchainSQLDatabase):
"""SQL Database."""
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
# self.metadata_obj = MetaData(bind=self._engine, reflect=True)
self.metadata_obj = MetaData(bind=self._engine)
self.metadata_obj.reflect()
@property
def engine(self) -> Engine:
"""Return SQL Alchemy engine."""
return self._engine
def get_table_columns(self, table_name: str) -> List[dict]:
"""Get table columns."""
return self._inspector.get_columns(table_name)
def get_single_table_info(self, table_name: str) -> str:
"""Get table info for a single table."""
template = "Table '{table_name}' has columns: {columns}."
columns = []
for column in self._inspector.get_columns(table_name):
columns.append(f"{column['name']} ({str(column['type'])})")
column_str = ", ".join(columns)
table_str = template.format(table_name=table_name, columns=column_str)
return table_str
def insert_into_table(self, table_name: str, data: dict) -> None:
"""Insert data into a table."""
table = self.metadata_obj.tables[table_name]
stmt = insert(table).values(**data)
self._engine.execute(stmt)
def run_sql(self, command: str) -> Tuple[str, Dict]:
"""Execute a SQL statement and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
"""
with self._engine.connect() as connection:
cursor = connection.exec_driver_sql(command)
if cursor.returns_rows:
result = cursor.fetchall()
return str(result), {"result": result}
return "", {}
| [
"Table '{table_name}' has columns: {columns}."
] |
2024-01-10 | jmsktm/gpt_index | gpt_index~prompts~default_prompts.py | """Set of default prompts."""
from gpt_index.prompts.prompts import (
KeywordExtractPrompt,
QueryKeywordExtractPrompt,
QuestionAnswerPrompt,
RefinePrompt,
SchemaExtractPrompt,
SummaryPrompt,
TextToSQLPrompt,
TreeInsertPrompt,
TreeSelectMultiplePrompt,
TreeSelectPrompt,
)
############################################
# Tree
############################################
DEFAULT_SUMMARY_PROMPT_TMPL = (
"Write a summary of the following. Try to use only the "
"information provided. "
"Try to include as many key details as possible.\n"
"\n"
"\n"
"{context_str}\n"
"\n"
"\n"
'SUMMARY:"""\n'
)
DEFAULT_SUMMARY_PROMPT = SummaryPrompt(DEFAULT_SUMMARY_PROMPT_TMPL)
# insert prompts
DEFAULT_INSERT_PROMPT_TMPL = (
"Context information is below. It is provided in a numbered list "
"(1 to {num_chunks}),"
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"---------------------\n"
"Given the context information, here is a new piece of "
"information: {new_chunk_text}\n"
"Answer with the number corresponding to the summary that should be updated. "
"The answer should be the number corresponding to the "
"summary that is most relevant to the question.\n"
)
DEFAULT_INSERT_PROMPT = TreeInsertPrompt(DEFAULT_INSERT_PROMPT_TMPL)
# # single choice
DEFAULT_QUERY_PROMPT_TMPL = (
"Some choices are given below. It is provided in a numbered list "
"(1 to {num_chunks}),"
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"Using only the choices above and not prior knowledge, return "
"the choice that is most relevant to the question: '{query_str}'\n"
"Provide choice in the following format: 'ANSWER: <number>' and explain why "
"this summary was selected in relation to the question.\n"
)
DEFAULT_QUERY_PROMPT = TreeSelectPrompt(DEFAULT_QUERY_PROMPT_TMPL)
# multiple choice
DEFAULT_QUERY_PROMPT_MULTIPLE_TMPL = (
"Some choices are given below. It is provided in a numbered "
"list (1 to {num_chunks}), "
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"Using only the choices above and not prior knowledge, return the top choices "
"(no more than {branching_factor}, ranked by most relevant to least) that "
"are most relevant to the question: '{query_str}'\n"
"Provide choices in the following format: 'ANSWER: <numbers>' and explain why "
"these summaries were selected in relation to the question.\n"
)
DEFAULT_QUERY_PROMPT_MULTIPLE = TreeSelectMultiplePrompt(
DEFAULT_QUERY_PROMPT_MULTIPLE_TMPL
)
DEFAULT_REFINE_PROMPT_TMPL = (
"The original question is as follows: {query_str}\n"
"We have provided an existing answer: {existing_answer}\n"
"We have the opportunity to refine the existing answer"
"(only if needed) with some more context below.\n"
"------------\n"
"{context_msg}\n"
"------------\n"
"Given the new context, refine the original answer to better "
"answer the question. "
"If the context isn't useful, return the original answer."
)
DEFAULT_REFINE_PROMPT = RefinePrompt(DEFAULT_REFINE_PROMPT_TMPL)
DEFAULT_TEXT_QA_PROMPT_TMPL = (
"Context information is below. \n"
"---------------------\n"
"{context_str}"
"\n---------------------\n"
"Given the context information and not prior knowledge, "
"answer the question: {query_str}\n"
)
DEFAULT_TEXT_QA_PROMPT = QuestionAnswerPrompt(DEFAULT_TEXT_QA_PROMPT_TMPL)
############################################
# Keyword Table
############################################
DEFAULT_KEYWORD_EXTRACT_TEMPLATE_TMPL = (
"Some text is provided below. Given the text, extract up to {max_keywords} "
"keywords from the text. Avoid stopwords."
"---------------------\n"
"{text}\n"
"---------------------\n"
"Provide keywords in the following comma-separated format: 'KEYWORDS: <keywords>'\n"
)
DEFAULT_KEYWORD_EXTRACT_TEMPLATE = KeywordExtractPrompt(
DEFAULT_KEYWORD_EXTRACT_TEMPLATE_TMPL
)
# NOTE: the keyword extraction for queries can be the same as
# the one used to build the index, but here we tune it to see if performance is better.
DEFAULT_QUERY_KEYWORD_EXTRACT_TEMPLATE_TMPL = (
"A question is provided below. Given the question, extract up to {max_keywords} "
"keywords from the text. Focus on extracting the keywords that we can use "
"to best lookup answers to the question. Avoid stopwords.\n"
"---------------------\n"
"{question}\n"
"---------------------\n"
"Provide keywords in the following comma-separated format: 'KEYWORDS: <keywords>'\n"
)
DEFAULT_QUERY_KEYWORD_EXTRACT_TEMPLATE = QueryKeywordExtractPrompt(
DEFAULT_QUERY_KEYWORD_EXTRACT_TEMPLATE_TMPL
)
############################################
# Structured Store
############################################
DEFAULT_SCHEMA_EXTRACT_TMPL = (
"We wish to extract relevant fields from an unstructured text chunk into "
"a structured schema. We first provide the unstructured text, and then "
"we provide the schema that we wish to extract. "
"-----------text-----------\n"
"{text}\n"
"-----------schema-----------\n"
"{schema}\n"
"---------------------\n"
"Given the text and schema, extract the relevant fields from the text in "
"the following format: "
"field1: <value>\nfield2: <value>\n...\n\n"
"If a field is not present in the text, don't include it in the output."
"If no fields are present in the text, return a blank string.\n"
"Fields: "
)
DEFAULT_SCHEMA_EXTRACT_PROMPT = SchemaExtractPrompt(DEFAULT_SCHEMA_EXTRACT_TMPL)
# NOTE: taken from langchain and adapted
# shorturl.at/nqyD1
DEFAULT_TEXT_TO_SQL_TMPL = (
"Given an input question, first create a syntactically correct SQL query "
"to run, then look at the results of the query and return the answer.\n"
"Use the following format:\n"
'Question: "Question here"\n'
'SQLQuery: "SQL Query to run"\n'
"The following is a schema of the table:\n"
"---------------------\n"
"{schema}\n"
"---------------------\n"
"Question: {query_str}\n"
"SQLQuery: "
)
DEFAULT_TEXT_TO_SQL_PROMPT = TextToSQLPrompt(DEFAULT_TEXT_TO_SQL_TMPL)
| [
"Context information is below. It is provided in a numbered list (1 to {num_chunks}),where each item in the list corresponds to a summary.\n---------------------\n{context_list}---------------------\nGiven the context information, here is a new piece of information: {new_chunk_text}\nAnswer with the number corresponding to the summary that should be updated. The answer should be the number corresponding to the summary that is most relevant to the question.\n",
"Some choices are given below. It is provided in a numbered list (1 to {num_chunks}), where each item in the list corresponds to a summary.\n---------------------\n{context_list}\n---------------------\nUsing only the choices above and not prior knowledge, return the top choices (no more than {branching_factor}, ranked by most relevant to least) that are most relevant to the question: '{query_str}'\nProvide choices in the following format: 'ANSWER: <numbers>' and explain why these summaries were selected in relation to the question.\n",
"Some text is provided below. Given the text, extract up to {max_keywords} keywords from the text. Avoid stopwords.---------------------\n{text}\n---------------------\nProvide keywords in the following comma-separated format: 'KEYWORDS: <keywords>'\n",
"Context information is below. \n---------------------\n{context_str}\n---------------------\nGiven the context information and not prior knowledge, answer the question: {query_str}\n",
"A question is provided below. Given the question, extract up to {max_keywords} keywords from the text. Focus on extracting the keywords that we can use to best lookup answers to the question. Avoid stopwords.\n---------------------\n{question}\n---------------------\nProvide keywords in the following comma-separated format: 'KEYWORDS: <keywords>'\n",
"Some choices are given below. It is provided in a numbered list (1 to {num_chunks}),where each item in the list corresponds to a summary.\n---------------------\n{context_list}\n---------------------\nUsing only the choices above and not prior knowledge, return the choice that is most relevant to the question: '{query_str}'\nProvide choice in the following format: 'ANSWER: <number>' and explain why this summary was selected in relation to the question.\n",
"The original question is as follows: {query_str}\nWe have provided an existing answer: {existing_answer}\nWe have the opportunity to refine the existing answer(only if needed) with some more context below.\n------------\n{context_msg}\n------------\nGiven the new context, refine the original answer to better answer the question. If the context isn't useful, return the original answer.",
"Write a summary of the following. Try to use only the information provided. Try to include as many key details as possible.\n\n\n{context_str}\n\n\nSUMMARY:\"\"\"\n"
] |
2024-01-10 | duca-meneses/challenge-backend-7 | app~destinos~serializer.py | import os
import openai
from rest_framework import serializers
from app.destinos.models import Destino
from app.destinos.validators import texto_descritivo_valido
class DestinoSerializer(serializers.ModelSerializer):
class Meta:
model = Destino
fields = '__all__'
def validate(self, data):
if texto_descritivo_valido(data['texto_descritivo']):
openai.api_key = os.getenv('OPENAI_API_KEY')
prompt = f"Faça um resumo sobre {data['nome']} enfatizando o porque este lugar é incrível. Utilize uma linguagem informal e até 100 caracteres no máximo em cada parágrafo. Crie 2 parágrafos neste resumo"
completion = openai.Completion.create(
model='text-davinci-003',
prompt=prompt,
max_tokens=1024,
temperature=0.5,
)
response = completion.choices[0].text
data['texto_descritivo'] = response
return data
| [
"Faça um resumo sobre PLACEHOLDER enfatizando o porque este lugar é incrível. Utilize uma linguagem informal e até 100 caracteres no máximo em cada parágrafo. Crie 2 parágrafos neste resumo"
] |
2024-01-10 | 273v/python-lmss | lmss~enrich.py | """lmss.enrich provides methods for enriching data in the knowledge graph, like:
* proposing pref labels or alt labels for entities
* proposing common translations for labels in different languages
* proposing definitions for entities
* proposing new relations between entities
* proposing new entities
"""
# imports
import argparse
import json
import os
import re
from pathlib import Path
# packages
import openai
from rdflib import URIRef, Literal
from rdflib.namespace import RDF, RDFS, OWL, SKOS
# project
import lmss.owl
from lmss.graph import LMSSGraph
try:
openai.api_key = os.getenv("OPENAI_API_KEY", None)
if openai.api_key is None:
with open(Path.home() / ".openai" / "api_key") as api_key_file:
openai.api_key = api_key_file.read().strip()
except Exception:
openai.api_key = None
# regular expression to extract parenthetical text
PARENTHETICAL_REGEX = re.compile(r"\((.*?)\)", re.UNICODE)
SKIP_LABELS = {
"Language",
"Industry",
"Service",
"PACER NoS",
"DEPRECATED",
}
def get_definition_prompt(
lmss_graph: LMSSGraph,
concept: dict,
) -> tuple[str, str]:
"""Get the system and user prompts for a concept definition.
Args:
lmss_graph (LMSSGraph): The LMSSGraph object.
concept (dict): The concept dictionary.
Returns:
tuple[str, str]: The system and user prompts.
"""
# get the labels for the parent and children
parent_labels = [
lmss_graph.concepts[parent]["label"]
for parent in concept["parents"]
if lmss_graph.concepts[parent]["label"] is not None
]
child_labels = [
lmss_graph.concepts[child]["label"]
for child in concept["children"]
if lmss_graph.concepts[child]["label"] is not None
]
# get the prompt to send to the LLM
system_prompt = """You are a legal knowledge management professional who works with ontologies.
Please perform the following tasks:
1. Review the top level OWL class in <TOP-LEVEL> provided by the user.
2. Review the OWL Class information in <CONCEPT> provided by the user.
3. Write a two-sentence definition in the style of the Black's Law Dictionary for CONCEPT in plain English.
4. Only describe the CONCEPT specifically, not the top level class generally.
5. Respond only with the definition. Do not include any heading or other text.
"""
top_concept_label = concept["top_concept"]
try:
top_concept_description = lmss_graph.concepts[
lmss_graph.key_concepts[top_concept_label]
]["definitions"][0]
except (KeyError, IndexError):
top_concept_description = ""
user_prompt = f"""<TOP-LEVEL>
Label: {top_concept_label}
Description: {top_concept_description}
</TOP-LEVEL>
<CONCEPT>\n"""
for key, value in concept.items():
if key in ["iri", "parents", "children"]:
continue
if isinstance(value, list):
if len(value) > 0:
user_prompt += f"{key}: {', '.join(value)}\n"
else:
if value is not None and len(value.strip()) > 0:
if key == "top_concept":
continue
user_prompt += f"{key}: {value}\n"
if len(parent_labels) > 0:
user_prompt += f"Parents: {', '.join(parent_labels)}\n"
if len(child_labels) > 0:
user_prompt += f"Children: {', '.join(child_labels)}\n"
user_prompt += "</CONCEPT>\n"
return system_prompt, user_prompt
def get_translation_prompt(
term: str,
target_langs: list[str],
source_lang: str = "en",
) -> tuple[str, str]:
"""Get the system and user prompts for a concept definition.
Args:
term (str): The term to translate.
target_langs (list[str]): The target languages.
source_lang (str): The source language.
Returns:
tuple[str, str]: The system and user prompts.
"""
# get the prompt to send to the LLM
system_prompt = """You are a legal knowledge management professional who works with ontologies.
Please perform the following tasks:
1. Translate the term <TERM> from <SOURCE_LANG> to each <TARGET_LANG> listed in ISO 639-1 format.
2. Respond only with the list of translation in JSON.
3. Do not include any heading or other text.
"""
user_prompt = f"""<TERM>{term}</TERM>\n"""
user_prompt += f"<SOURCE_LANG>{source_lang}</SOURCE_LANG>\n"
user_prompt += "<TARGET_LANG>" + ", ".join(target_langs) + "</TARGET_LANG>\n"
user_prompt += "JSON:"
return system_prompt, user_prompt
def enrich_definitions(lmss_graph: LMSSGraph, progress: bool = True) -> LMSSGraph:
"""
Enrich the definitions of concepts in the graph.
Args:
lmss_graph: The LMSSGraph object.
progress: Whether to show progress.
Returns:
Graph: The enriched graph.
"""
# get concepts and prog bar
concepts = lmss_graph.concepts.items()
if progress:
try:
import tqdm # pylint: disable=C0415
concepts = tqdm.tqdm(concepts, desc="Enriching definitions")
except ImportError:
progress = False
# use rdflib subjects to get the list of classes
for iri, concept in concepts:
# update prog bar if progress set
if progress:
concepts.set_description(f"Enriching definitions: {concept['label']}") # type: ignore
# get the root concept beneath owl:Thing
if len(concept["parents"]) == 0:
continue
if (
"definitions" not in concept
or concept["definitions"] is None
or len(concept["definitions"]) == 0
):
try:
system_prompt, user_prompt = get_definition_prompt(g, concept)
# get the definition with ChatCompletion API
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0301",
temperature=0.0,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
],
)
# get the definition
if len(response["choices"]) > 0:
definition = response["choices"][0]["message"]["content"].strip()
lmss_graph.add((URIRef(iri), SKOS.definition, Literal(definition)))
except Exception as error: # pylint: disable=W0718
print(f"Unable to enrich definition for {concept['label']}: {error}")
continue
# return the graph
return lmss_graph
# pylint: disable=R1702
def correct_labels(lmss_graph: LMSSGraph) -> LMSSGraph:
"""Correct rdfs:label and prefLabel values in the graph by
removing any parenthetical text and moving it to altLabel.
Args:
lmss_graph: The LMSSGraph object.
Returns:
Graph: The corrected graph.
"""
# get concepts and prog bar
concepts = lmss_graph.concepts.items()
try:
import tqdm # pylint: disable=C0415
concepts = tqdm.tqdm(concepts, desc="Correcting labels")
except ImportError:
pass
# use rdflib subjects to get the list of classes
for iri, concept in concepts:
# update prog bar if progress set
concepts.set_description(f"Correcting labels: {concept['label']}") # type: ignore
# get the label
label = concept["label"]
if label is None:
continue
# check the rdfs:label
if "(" in label:
# get all the alt labels
alt_labels = PARENTHETICAL_REGEX.findall(label)
if len(alt_labels) > 0:
# remove parens and drop double spaces
label = PARENTHETICAL_REGEX.sub("", label).strip()
label = " ".join(label.split())
# add all the alt labels
for alt_label in alt_labels:
if (
alt_label not in concept["alt_labels"]
and alt_label not in SKIP_LABELS
):
lmss_graph.add((URIRef(iri), SKOS.altLabel, Literal(alt_label)))
concept["alt_labels"].append(alt_label)
# remove and update the rdfs:label
lmss_graph.remove((URIRef(iri), RDFS.label, Literal(concept["label"])))
lmss_graph.add((URIRef(iri), RDFS.label, Literal(label)))
# do the same thing with the pref labels
for pref_label in concept["pref_labels"]:
if "(" in pref_label:
# get all the alt labels
alt_labels = PARENTHETICAL_REGEX.findall(pref_label)
if len(alt_labels) > 0:
# remove parens and drop double spaces
pref_label = PARENTHETICAL_REGEX.sub("", pref_label).strip()
pref_label = " ".join(pref_label.split())
# add all the alt labels
for alt_label in alt_labels:
if (
alt_label not in concept["alt_labels"]
and alt_label not in SKIP_LABELS
):
lmss_graph.add(
(URIRef(iri), SKOS.altLabel, Literal(alt_label))
)
concept["alt_labels"].append(alt_label)
# remove and update the pref label
lmss_graph.remove(
(URIRef(iri), SKOS.prefLabel, Literal(concept["label"]))
)
lmss_graph.add((URIRef(iri), SKOS.prefLabel, Literal(pref_label)))
# return the graph
return lmss_graph
def translate_concepts(graph: LMSSGraph, concept_set: set[str], target_langs: list[str], progress: bool = True) -> list[dict]:
"""Translate the labels and definitions from en(-US) to one or more
ISO 639-1 language codes, such as es-ES, de-DE, or en-UK.
Args:
graph: The LMSSGraph object.
concept_set: The set of concepts to translate.
target_langs: The target language(s) to translate to.
progress: Whether to show a progress bar.
Returns:
Graph: The graph enriched with translations for labels and definitions.
"""
result_data: list[dict] = []
# iterate through concepts
subjects = [s for s in
g.subjects(RDF.type, OWL.Class)
if str(s) in concept_set]
if progress:
try:
import tqdm # pylint: disable=C0415
subjects = tqdm.tqdm(subjects, desc="Translating concepts")
except ImportError:
pass
for concept in subjects:
# check if the IRI is in the concept set
try:
iri = str(concept)
if iri not in concept_set:
continue
# get rdfs:label and prefLabel
label = g.value(concept, RDFS.label)
record = {
"iri": iri,
"rdfs:label": str(label)
}
if label is not None:
system_prompt, user_prompt = get_translation_prompt(label, target_langs)
# get the definition with ChatCompletion API
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0301",
temperature=0.0,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
],
)
# get the definition
if len(response["choices"]) > 0:
try:
raw_response = response["choices"][0]["message"]["content"].strip()
json_response = json.loads(raw_response)
for lang_code in json_response:
if lang_code in target_langs:
record[lang_code] = json_response[lang_code]
except json.decoder.JSONDecodeError:
pass
if len(record) > 2:
result_data.append(record)
except Exception as e:
print(iri, e)
continue
return result_data
if __name__ == "__main__":
# setup argparser
parser = argparse.ArgumentParser(description="Enrich LMSS OWL file.")
# branch for graph
parser.add_argument(
"--branch",
type=str,
default=lmss.owl.DEFAULT_REPO_BRANCH,
help="Branch to use for the LMSSGraph.",
)
# git repo for graph
parser.add_argument(
"--repo",
type=str,
default=lmss.owl.DEFAULT_REPO_ARTIFACT_URL,
help="Git repo to use for the LMSSGraph.",
)
# local file for owl
parser.add_argument(
"--file",
type=Path,
default=None,
help="Local file to use for the LMSSGraph.",
)
# set options for labels and definitions
parser.add_argument(
"--labels",
action="store_true",
help="Correct labels in the OWL file.",
)
parser.add_argument(
"--definitions",
action="store_true",
help="Enrich definitions in the OWL file.",
)
# set option for OpenAI key file
parser.add_argument(
"--openai-key-path",
type=Path,
default=".openai_key",
help="Path to the OpenAI key file.",
)
# add output file
parser.add_argument(
"--output",
type=Path,
default="lmss.owl",
help="Path to the output file.",
)
# parse args
args = parser.parse_args()
# get the graph based on file vs repo url/branch
if args.file is not None:
g = LMSSGraph(owl_path=args.file)
else:
g = LMSSGraph(owl_branch=args.branch, owl_repo_url=args.repo)
# get the openai key
if openai.api_key is None:
if not args.openai_key_path.exists():
raise RuntimeError("Unable to set OpenAI key from $OPENAI_API_KEY orfile.")
with open(args.openai_key_path, "rt", encoding="utf-8") as f:
openai.api_key = f.read().strip()
# correct labels
if args.labels:
g = correct_labels(g)
# enrich definitions
if args.definitions:
g = enrich_definitions(g)
# serialize the graph
g.serialize(args.output, format="xml") | [
"<TOP-LEVEL>\nLabel: PLACEHOLDER\nDescription: PLACEHOLDER \n</TOP-LEVEL>\n\n<CONCEPT>\n",
"You are a legal knowledge management professional who works with ontologies.\n Please perform the following tasks:\n 1. Review the top level OWL class in <TOP-LEVEL> provided by the user.\n 2. Review the OWL Class information in <CONCEPT> provided by the user.\n 3. Write a two-sentence definition in the style of the Black's Law Dictionary for CONCEPT in plain English.\n 4. Only describe the CONCEPT specifically, not the top level class generally.\n 5. Respond only with the definition. Do not include any heading or other text.\n ",
"</CONCEPT>\n",
"<TARGET_LANG>",
"<SOURCE_LANG>PLACEHOLDER</SOURCE_LANG>\n",
"JSON:",
"</TARGET_LANG>\n",
"PLACEHOLDER: PLACEHOLDER\n",
"<TERM>PLACEHOLDER</TERM>\n",
"You are a legal knowledge management professional who works with ontologies.\n Please perform the following tasks:\n 1. Translate the term <TERM> from <SOURCE_LANG> to each <TARGET_LANG> listed in ISO 639-1 format.\n 2. Respond only with the list of translation in JSON.\n 3. Do not include any heading or other text.\n ",
", "
] |
2024-01-10 | renatotn7/Squad-Bible-Explorer-PTBR | criardataset~0teste.py | import os
import csv
import sys
sys.path.append("..")
sys.path.append("../library")
sys.path.append("../library/dicionarios")
import mdlDicLivros
import pandas as pd
import requests
import re
from bs4 import BeautifulSoup
from docx import Document
import json
from docx.enum.text import WD_ALIGN_PARAGRAPH
from docx.shared import Pt
from openpyxl import Workbook
import openpyxl
from openpyxl.styles import Alignment
from docx import Document
from docx.shared import Inches
from docx.shared import RGBColor
import openai
#Deuteronomy.16.1
# criando o documento word
document = Document()
def formata(str, paragrafo,rgb,inches):
# Remove all tags except "i", "br" and "n"
text = str
text = re.sub(r'<a[^>]*>([^<]+)</a>', '\\1', text)
text = re.sub(r'<sup[^>]*>([^<]+)</sup>', '\\1', text)
# text = "S HANGED IS A קללת אלהים — i.e., a degradation of the Divine King"
#r = romanize3.__dict__['heb']
#transliterated_text = r.convert(text)
# print(transliterated_text)
# texto = "ola <br/> <n> estou aqui </n> vc nao sabe"
segmentado = re.split(r'(<[^>]*>)', text)
print(segmentado)
paragrafo.paragraph_format.left_indent = Inches(inches)
tagi = False
tagbr = False
tagn = False
for i in segmentado:
# print(rashi_paragraph)
if "<" in i:
run = paragrafo.add_run("")
else:
run = paragrafo.add_run(i)
run.font.color.rgb = rgb
if "<i" in i:
tagi = True
if "<br/>" in i:
run.add_break()
if "<br>" in i:
run.add_break()
if "<b>" in i:
tagn = True
if "</i>" in i:
tagi = False
if "</b>" in i:
tagn = False
if tagi:
run.italic = True
else:
run.italic = False
if tagn:
run.bold = True
else:
run.bold = False
return paragrafo;
#run.font.color.rgb = RGBColor(0, 0, 255)
# iterando pelas linhas do dataframe
def extrair_passagemnvi(livro, cap, ver):
# URL da passagem a ser extraída
url = f'https://www.bibliaonline.com.br/nvi/{livro}/{cap}/{ver}'
# Fazendo a requisição
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
# Extrair o texto da passagem
passagem = soup.find('p', class_='jss35')
if passagem is None:
print(f"Não foi possível encontrar a passagem {livro}/{cap}/{ver}")
return None
return passagem.get_text()
def extrair_passagem( cap, ver,livroreduzido,url):
# URL da passagem a ser extraída
url = f'https://www.biblegateway.com{url}'
print(url)
# Fazendo a requisição
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
# Extrair o texto da passagem
print(f'Extraindo passagem {livroreduzido}.{cap}.{ver}...')
passagem2 = soup.find('span', class_=f'{livroreduzido}-{cap}-{ver}')
if passagem2 is None:
print(f"Não foi possível encontrar a passagem {livroreduzido}.{cap}.{ver}")
return None
return passagem2.get_text().replace(';', '.,')
# cria o dataframe
dfversoes = pd.DataFrame(columns=['textoSefaria', 'textoPortugues', 'textoOJB', 'bookAnc', 'chapterAnc', 'verseAnc'])
def pesquisar(livro, capitulo, versiculo,arquivo,linkObj):
listtext = []
cntMusar =0
cntMidrash = 0
cntTalmude = 0
print('0.1')
Versiculo = versiculo
url = 'https://www.sefaria.org/api/related/' + livro+'.'+str(capitulo) + '.' + str(Versiculo) + '?with_sheet_links=1'
print(url)
response = requests.get(url)
#provisorio, ]e preciso entender mais a fundo
try:
data = response.json()
except:
return
df = pd.DataFrame(data["links"])
df = df[['anchorRef', 'category', 'index_title', 'type', 'anchorVerse', 'sourceHasEn',
'ref']]
for index, row in df.iterrows():
try:
ref = row['ref'].split()
book = ref[0]
chapter = ref[1].split(':')[0]
verse = ref[1].split(':')[1]
except:
book = ''
chapter = ''
verse = ''
df.at[index, "book"] = book
df.at[index, "chapter"] = chapter
df.at[index, "verse"] = verse
# seleciona as colunas desejadas
print('0.2')
for index, row in df.iterrows():
rejeita = False
try:
if len(row['ref'].split(":"))> 2: #se tiver mais de 2 pontos na referencia rejeita
if( row['ref'].split(":")[2]!='1'):
rejeita=True
except:
rejeita=True
if (row['category'] == 'Talmud' or row['category'] == 'Targum' or row['category'] == 'Midrash'
or row['category'] == 'Commentary') or (row['category']=='Musar') and rejeita == False:
try:
if row['category'] == 'Midrash' or row['category'] == "Talmud":
capComentario = row['ref'] #.split(':')[0].strip()
else:
capComentario = row['ref']
url = 'https://www.sefaria.org/api/texts/' + capComentario + '?commentary=0&context=1&pad=0&wrapLinks=1&wrapNamedEntities=1&multiple=0&stripItags=0&transLangPref=&firstAvailableRef=1&fallbackOnDefaultVersion=1'
print(url)
response = requests.get(url)
data1 = response.json()
print(row['category'])
print(len(data1['text']))
#se tiver ingles
if(row['category'] == 'Targum'):
print(row['category'])
print (row['ref'])
text = data1['text'][int(row['ref'].split(':')[1])-1]
print(row['category'] +" sucesso")
print(text)
elif row['category'] == 'Midrash' or row['category'] == "Talmud":
#print('FASE 1')
for it in range(0, len(data1['text'])):
texto = data1['text'][it]
# print('FASE 2'+ texto)
if (row['anchorRef'] in texto):
print(row['category'] + " sucesso")
#print('***********FASE 2' )
text = texto
else:
text = data1['text'][0]
if (row['category'] == "Talmud"):
cntTalmude = cntTalmude + 1
if (cntTalmude > 3):
text = ''
if (row['category'] == "Midrash"):
cntMidrash = cntMidrash + 1
if (cntMidrash > 3):
text = ''
if (row['category'] == "Musar"):
cntMusar = cntMusar + 1
if (cntMusar > 3):
text = ''
except:
# print('erro: '+ row['category'] + ' '+ row['ref'])
textref = ''
text = ''
else:
textref = ''
text = ''
# df.at[index, "textref"] = textref
if text not in listtext:
df.at[index, "text"] = text
listtext.append(text)
else:
df.at[index, "text"] = ''
print('0.3')
df = df[df["text"].str.len() != 0]
for index, row in df.iterrows():
try:
ref = row['anchorRef'].split()
if len(ref)>2:
if(ref[0].strip()=='I'):
ref[0]='1'
if(ref[0].strip()=='II'):
ref[0]='2'
# print(ref[0].strip() + ' ' + ref[1].strip())
# print(mdlDicLivros.tradSefariaNvi[ref[0].strip() + ' ' + ref[1].strip()])
book = mdlDicLivros.tradSefariaNvi[ref[0].strip() + ' ' + ref[1].strip()]
chapter = ref[2].split(':')[0]
verse = ref[2].split(':')[1]
else:
#print(ref[0] )
book = mdlDicLivros.tradSefariaNvi[ref[0].strip()]
chapter = ref[1].split(':')[0]
verse = ref[1].split(':')[1]
except:
print( 'nao achou: ' , ref[0])
book = ''
chapter = ''
verse = ''
df.at[index, "bookAnc"] = book
df.at[index, "chapterAnc"] = chapter
df.at[index, "verseAnc"] = verse
# df.at[index, "textoptbr"] = extrair_passagemnvi(book, chapter, verse)
print('0.4')
#number of rows dataframe
#provisorio
if df.shape[0] == 0:
return
print(df)
#livro, capitulo, versiculoro, capitulo, versiculo
try:
url = 'https://www.sefaria.org/api/texts/' + df.at[
0, 'anchorRef'].split(":")[0] + ''
print('traducao' + url)
response = requests.get(url)
data1 = response.json()
textoSefaria = data1['text'][int(versiculo)-1]
except:
return
#textoPortugues = extrair_passagemnvi(df.at[index1, "bookAnc"], df.at[index1, "chapterAnc"], df.at[index1, "verseAnc"])
textoPortugues = extrair_passagemnvi( mdlDicLivros.tradSefariaNvi[livro], capitulo, versiculo)
# textoOJB=extrair_passagem(df.at[index1, 'chapterAnc'], df.at[index1, 'verseAnc'],df.at[index1, "bookAnc"] ,linkObj)
textoOJB = extrair_passagem(capitulo, versiculo, livro,
linkObj)
dfversoes = pd.DataFrame(
columns=['textoSefaria', 'textoPortugues', 'textoOJB', 'bookAnc', 'chapterAnc', 'verseAnc'])
dfversoes= dfversoes.append(
{'textoSefaria': textoSefaria, 'textoPortugues': textoPortugues, 'textoOJB': textoOJB, 'bookAnc': mdlDicLivros.tradSefariaNvi[livro] ,
'chapterAnc': capitulo, 'verseAnc': versiculo}, ignore_index=True)
# #versoes.append({'textoPortugues': textoPortugues, 'textoSefaria': textoSefaria, 'textoOJB': textoOJB, 'chapterAnc': df.at[index1, 'chapterAnc'], 'verseAnc': df.at[index1, 'verseAnc'], 'bookAnc': df.at[index1, 'bookAnc']},ignore_index=True)
dfversoes.to_csv(arquivo + 'b.csv', index=False,header=False, mode='a')
print("pos ***** "+str(dfversoes.shape[0]))
df = df[['anchorRef', 'category', 'index_title', 'type', 'anchorVerse', 'sourceHasEn','book','chapter','verse','ref',"bookAnc","chapterAnc","verseAnc","text" ]]
print(df)
df_talmud = df.loc[df['category'] == 'Talmud']
print(df_talmud)
df["text"].replace("\n", " ", inplace=True)
#anchorRef,category,index_title,type,anchorVerse,sourceHasEn,book,chapter,verse,ref,bookAnc,chapterAnc,verseAnc,text
print('0.6')
df.to_csv(arquivo+'.csv', index=False,header=False,mode='a')
#exit()
def referencia(nomelivro, rangein,nomearquivo1):
with open(nomearquivo1 + '.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
# Escrever uma linha vazia
writer.writerow(
['anchorRef', 'category', 'index_title', 'type', 'anchorVerse', 'sourceHasEn', 'book', 'chapter', 'verse',
'ref', 'bookAnc', 'chapterAnc', 'verseAnc', 'text'])
with open(nomearquivo1 + 'b.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
# Escrever uma linha vazia
writer.writerow(['textoSefaria', 'textoPortugues', 'textoOJB', 'bookAnc', 'chapterAnc', 'verseAnc'])
dfnorder = pd.DataFrame(
{'texto1': [], 'nvi': [], 'passagem': [], 'url': [], 'livroreduzido': [], 'Livro': [], 'Capitulo': [],
'Versiculo': [], 'siglaBr': []})
dfcorder = pd.DataFrame(
{'texto1': [], 'nvi': [], 'passagem': [], 'url': [], 'livroreduzido': [], 'Livro': [], 'Capitulo': [],
'Versiculo': [], 'siglaBr': []})
arquivofinal = nomelivro
arquivoordenado = nomelivro + 'od'
arquivofinalordenado = nomelivro + 'odF'
extensao = '.txt'
arquivointermediario = arquivofinal + 'bf'
# arquivointermediario=arquivointermediario+extensao
# Dicionário com as traduções das siglas em inglês para português
# Cria uma lista vazia para armazenar as referências
referencias = []
referenciasfinal = []
# Itera sobre os capítulos especificados
nomelivropath = nomelivro.replace(' ', '%20')
strlinks = []
siglas = []
a_tags = []
for capitulo in rangein:
url = f'https://www.biblegateway.com/passage/?search={nomelivropath}%20{capitulo}&version=OJB'
print('url ' + url)
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
# Seleciona todas as tags <a> com a classe "bibleref" e a propriedade "data-bibleref"https://www.biblegateway.com/passage/?search=Shemot%2024%3A7&version=OJB
a_tags.extend(soup.select('a.bibleref[data-bibleref]'))
versiculosRef=[] ;
for tag in a_tags:
#rint('tag '+tag.parent)
print('tag '+tag.parent.text)
if tag.parent.text not in versiculosRef:
texto = tag.parent.text
versiculosRef.append(texto)
links = soup.find_all("a", href=lambda value: value and value.startswith("/passage/?search="))
# Iterar sobre cada link e imprimir o conteúdo href
for link in links:
link.parent
stringln = link["href"]
array = stringln.split(".")
sigla1 = array[0]
# from mdlDicLivros import tradOjbSefaria, tradOjbNvi, tradNviOjb, tradSefariaOjb, tradNviSefaria, tradSefariaNvi,tradOjbUrlToOjbLink,tradOjbLinkToOjbUrl
sigla1 = sigla1.replace("/passage/?search=", "")
siglas.append(sigla1)
if sigla1 in mdlDicLivros.tradOjbUrlToOjbLink:
strlinks.append(link["href"])
# print(link["href"])
print('atags ',a_tags[0])
# Armazena o valor da propriedade "data-bibleref" na lista
for a_tag in a_tags:
referencias.append(a_tag['data-bibleref'])
print(' ref '+referencias[0])
nome_arquivo = arquivointermediario
if True:
# Itera sobre as referências e substitui os pontos por \
referencias_expandidas = []
for referencia in referencias:
referencia = referencia.split(";")[0]
if "-" in referencia:
referencia1, referencia2 = referencia.split("-")
verso1 = referencia1.split(".")[2]
verso2 = referencia2.split(".")[2]
cap = referencia2.split(".")[1]
livro = referencia2.split(".")[0]
inicio, fim = verso1, verso2
for i in range(int(inicio), int(fim) + 1):
referencia_expandida = f"{livro}.{cap}.{i}"
referencias_expandidas.append(referencia_expandida)
else:
referencias_expandidas.append(referencia)
# Remove duplicatas
new_list = []
for item in referencias_expandidas:
if item not in new_list:
new_list.append(item)
referencias_expandidas = new_list
print('ref_expandidas ',referencias_expandidas[0])
versiculos = referencias_expandidas
#referencias_expandidasfinal = []
versiculosfinal = []
for v in referencias_expandidas:
try:
# from mdlDicLivros import tradOjbSefaria, tradOjbNvi, tradNviOjb, tradSefariaOjb, tradNviSefaria, \
# tradSefariaNvi, tradOjbUrlToOjbLink, tradOjbLinkToOjbUrl
print(v)
versiculosfinal.append(v)
#1,2,3
dfnorder.loc[len(dfnorder)] = ['', '', v,
#4
f"/passage/?search={v.split('.')[0] + '.' + v.split('.')[1] + '.' + v.split('.')[2]}&version=OJB",
#5,6,7
v.split('.')[0], mdlDicLivros.tradOjbSefaria[v.split('.')[0]], v.split('.')[1],
v.split('.')[2], mdlDicLivros.tradOjbNvi[v.split('.')[0]]]
# dfnorder #{'texto1', 'passagem', 'url', 'livroreduzido', 'Livro', 'Capitulo', 'Versiculo'})
# dfnorder.append({'passagem': v, 'url': f"/passage/?search={v.split('.')[0] + '.' + v.split('.')[1] + '.' + v.split('.')[2]}&version=OJB" ,
# 'livroreduzido':v.split('.')[0] , 'Livro': traducoes2[v.split('.')[0]], 'Capitulo': v.split('.')[1], 'Versiculo': v.split('.')[2]}, ignore_index=True)
# referencias_expandidasfinal.append(
# v + ";" + f"/passage/?search={v.split('.')[0] + '.' + v.split('.')[1] + '.' + v.split('.')[2]}&version=OJB" + ";" +
# v.split('.')[0] + ";" + mdlDicLivros.tradOjbSefaria[v.split('.')[0]] + ";" + v.split('.')[
# 1] + ';' + v.split('.')[2])
except:
print('nao achou')
print(dfnorder) # print(referencias_expandidasfinal)
# print(referencias_expandidasfinal)
# Cria uma lista com a ordem dos livros na Bíblia
ordem_livros = ["Gn", "Ex", "Lv", "Nm", "Dt", "Js", "Jz", "Rt", "1Sm", "2Sm", "1Rs", "2Rs", "1Cr", "2Cr", "Ed",
"Ne", "Et", "Jó", "Sl", "Pv", "Ec", "Ct", "Is", "Jr", "Lm", "Ez", "Dn", "Os", "Jl", "Am", "Ob",
"Mq", "Na", "Hc", "Sf", "Ag", "Zc", "Ml"]
# Ordena a lista de versículos usando a ordem dos livros
versiculos_ordenados = []
for v in versiculosfinal:
print(livro)
v1 = v
v = v.split(";")[0]
parts = v.split(".")
livro = parts[0]
if len(parts) != 3:
print("Versiculo invalido: ", v)
continue
try:
if mdlDicLivros.tradOjbNvi[livro] not in ordem_livros:
print("Livro invalido: ", livro)
continue
except:
print("Livro invalido: ", livro)
continue
try:
cap = int(parts[1])
verso = int(parts[2])
except ValueError:
print("Capitulo ou versiculo invalido: ", v)
continue
versiculos_ordenados.append(f"{parts[0]}.{parts[1]}.{parts[2]}")
# versiculos_ordenados.append(v1)
# print(versiculos_ordenados)
# versiculos_ordenados = sorted(versiculos_ordenados, key=lambda x: (ordem_livros.index(x[0]), x[1], x[2]))
# print(versiculos_ordenados)
versiculos_ordenados = sorted(versiculos_ordenados, key=lambda x: (
ordem_livros.index(mdlDicLivros.tradOjbNvi[x.split(".")[0]]), int(x.split(".")[1]), int(x.split(".")[2].split(";")[0])))
versiculos_ordenadosfinal = []
for v in versiculos_ordenados:
# , dfcorder
dfcorder.loc[len(dfcorder)] = ['', '', v,
f"/passage/?search={v.split('.')[0] + '.' + v.split('.')[1] + '.' + v.split('.')[2]}&version=OJB",
v.split('.')[0], mdlDicLivros.tradOjbSefaria[v.split('.')[0]], v.split('.')[1],
v.split('.')[2], mdlDicLivros.tradOjbNvi[v.split('.')[0]]]
# {'texto1', 'passagem', 'url', 'livroreduzido', 'Livro', 'Capitulo', 'Versiculo'})
#versiculos_ordenadosfinal.append(*
# v + ";" + f"/passage/?search={v.split('.')[0] + '.' + v.split('.')[1] + '.' + v.split('.')[2]}&version=OJB" + ";" +
# v.split('.')[0] + ";" + mdlDicLivros.tradOjbSefaria[v.split('.')[0]] + ";" + v.split('.')[1] + ';' + v.split('.')[2])
# print(versiculos_ordenadosfinal)
# Imprime a lista de versículos ordenada
# print(dfcorder)
# limpa csv
for index, row in dfnorder.iterrows():
pesquisar(row["Livro"] , row["Capitulo"] ,row["Versiculo"], nomearquivo1,row['url'])
def representaCategoriasNumericamente(categoria):
if categoria == 'Targum':
return 1
elif categoria == 'Commentary':
return 2
elif categoria == 'Midrash':
return 3
elif categoria == 'Talmud':
return 4
elif categoria == 'Musar':
return 5
return 0
def assinatura(doc, nomearquivo,capitulon=''):
# Adiciona uma nova página
# Adiciona o título da capa
title = doc.add_paragraph()
title.alignment = WD_ALIGN_PARAGRAPH.CENTER
if capitulon != '':
run = title.add_run(nomearquivo + " \ncapitulo: " + capitulon)
else:
run = title.add_run(nomearquivo )
run.bold = True
run.font.size = Pt(24)
document.add_page_break()
# Adiciona o nome
name = doc.add_paragraph()
name.alignment = WD_ALIGN_PARAGRAPH.CENTER
run = name.add_run("Levantamento de Renato Nati")
run.font.size = Pt(12)
doc.add_page_break()
def save_word_file(nomearquivo,capitulon=''):
file_path=nomearquivo + ' cap ' + capitulon+'.docx'
if os.path.isfile(file_path):
os.remove(file_path)
print(f"{file_path} has been removed.")
else:
print(f"{file_path} not found.")
dfversoes = pd.read_csv(nomearquivo+'b.csv')
df = pd.read_csv(nomearquivo+'.csv')
print(df)
print("@@" )
print(dfversoes)
document = Document()
assinatura(document, nomearquivo,capitulon)
#document.add_heading(nomearquivo, 1)
for index1 , row1 in dfversoes.iterrows():
print('124')
# criando o documento word
# adicionando a primeira linha com o livro, capítulo e versículo
document.add_heading(f'{row1["bookAnc"]} {row1["chapterAnc"]}:{row1["verseAnc"]}',1)
# adicionando a segunda linha com o conteúdo da coluna nvi
document.add_paragraph('PTBR: '+ row1["textoPortugues"])
# adicionando a terceira linha com o conteúdo da coluna texto1 em itálico
#texto1_paragraph = document.add_paragraph("textoOjb"+row1["textoOJB"])
texto1_paragraph2 = document.add_paragraph('Sefaria: ' + row1["textoSefaria"])
# texto1_paragraph.italic = True
texto1_paragraph2.italic = True
df['categoria_numerica'] = df['category'].apply(representaCategoriasNumericamente)
df.sort_values(by='categoria_numerica', inplace=True)
for index, row in df.iterrows():
#try:
bookeq= row['bookAnc'].strip() == row1["bookAnc"].strip()
chapeq= str(row['chapterAnc']).strip() == str(row1["chapterAnc"]).strip()
verseq= str(row['verseAnc']).strip() == str(row1["verseAnc"]).strip()
if bookeq and chapeq and verseq:
response_text2 = ""
if not pd.isna(row["text"]) and len(row["text"]) >100 and len(row)<800:
paragraph = document.add_paragraph(row["category"] + ": " + row["ref"]+ "\n")
#paragraph.add_run(row["category"] + ": " + row["ref"])
try:
prompt = "Explique este texto para uma pessoa mediana entender de forma resumida em poucas palavras capturando os pontos principais, e após isso demonstre como eu poderia usar isso para falar do evangelho, citando alguns versiculos que mostre correlação, e também forneça as palavras chaves que funcionem como marcadores do texto. e em português: " + \
row["text"]
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=len(row["text"]) +200,
n=1,
stop=None,
temperature=0.3,
)
print(response)
response_text = response['choices'][0]['text']
response_text = '\n__________________________________\nAnalise através de IA quanto a este Comentario acima: \n' + response_text + '\n_________________________________';
response_text2 = " "
except:
response_text2=""
print("erro na chamada a openai")
if index % 2 == 0:
try:
paragraph = formata(row["text"], paragraph, RGBColor(107, 94, 155),0.2)
if response_text2 != "" :
paragraph = formata(response_text, paragraph, RGBColor(123, 61, 0),0.3)
#document.add_heading(row["category"] + ": " + row["ref"], 3)
except:
paragraph = formata(row["text"], paragraph, RGBColor(30, 106, 57),0.2)
else:
# document.add_heading(row["category"] + ": " + row["ref"], 3)
try:
paragraph = formata(row["text"], paragraph, RGBColor(30, 106, 57),0.2)
#paragraph.add_run(row["text"])
if response_text2 != "":
paragraph = formata(response_text, paragraph, RGBColor(123, 61, 0), 0.3)
#paragraph.add_run(response_text)
except:
paragraph = formata(row["text"], paragraph, RGBColor(30, 106, 57),0.2)
# paragraph.runs[0].font.color.rgb = RGBColor(107, 94, 155)
#except:
# print("erro")
capitulon= ' '+capitulon
document.save(nomearquivo + ' cap ' + capitulon+'.docx')
#extrair_e_salvar(arquivofinal, dfnorder)
# extrair_e_salvar(arquivoordenado,dfcorder)
#referencia('Kefa I',[1,2,3,4,5], 'Kefa I')
for i in range(2, 5):
referencia('Yaakov',[i], 'Yaakov')
save_word_file('Yaakov',str(i))
#referencia('Yaakov',[1], 'Yaakov')
exit(0)
capComentario = "Sotah.9b.4"
category="Midrash"
url = 'https://www.sefaria.org/api/texts/' + capComentario + '?commentary=0&context=1&pad=0&wrapLinks=1&wrapNamedEntities=1&multiple=0&stripItags=0&transLangPref=&firstAvailableRef=1&fallbackOnDefaultVersion=1'
print(url)
response = requests.get(url)
data1 = response.json()
print(len(data1['text']))
# se tiver ingles
if (category == 'Targum'):
text = data1['text'][int("Numbers 5:21".split(':')[1]) - 1]
elif category == 'Midrash' or category == "Talmud":
for pos in range(0, len(data1['text'])):
texto=data1['text'][pos]
if ("Numbers 5:21" in texto):
print(pos)
text = texto
else:
text = data1['text'][0]
print(text)
| [
"Explique este texto para uma pessoa mediana entender de forma resumida em poucas palavras capturando os pontos principais, e após isso demonstre como eu poderia usar isso para falar do evangelho, citando alguns versiculos que mostre correlação, e também forneça as palavras chaves que funcionem como marcadores do texto. e em português: PLACEHOLDER"
] |
2024-01-10 | kimcharli/langchain-test-001 | openai~a001-llms.py | from langchain.llms import OpenAI
import openai
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
'''from https://python.langchain.com/docs/get_started/quickstart.html'''
def llms():
llm = OpenAI(openai_api_key=openai.api_key, openai_organization=openai.organization)
chat_model = ChatOpenAI(openai_api_key=openai.api_key, openai_organization=openai.organization)
print(f"==\nllm.predict('Hi!'):\n {llm.predict('Hi!')}")
print(f"==\nchat_model.predict('Hi!'):\n{ chat_model.predict('Hi!')}")
text = "What would be a good company name for a company that makes colorful socks?"
print(f"==\ntest: {text}")
print(f"==\nllm.predict(text):\n {llm.predict(text)}")
print(f"==\nchat_model.predict(text):\n {chat_model.predict(text)}")
text = "What would be a good company name for a company that makes colorful socks?"
print(f"==\ntext: {text}")
messages = [HumanMessage(content=text)]
print(f"==\nllm.predict_messages(messages): {llm.predict_messages(messages)}")
print(f"==\nchat_model.predict_messages(messages): {chat_model.predict_messages(messages)}")
## promtp templates
def promtp_templates():
from langchain.prompts import PromptTemplate
prompt = PromptTemplate.from_template("What is a good name for a company that makes {product}?")
prompt.format(product="colorful socks")
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
template = "You are a helpful assistant that translates {input_language} to {output_language}."
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template = "{text}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
chat_prompt.format_messages(input_language="English", output_language="French", text="I love programming.")
## output parsers
def output_parsers():
from langchain.schema import BaseOutputParser
class CommaSeparatedListOutputParser(BaseOutputParser):
"""Parse the output of an LLM call to a comma-separated list."""
def parse(self, text: str):
"""Parse the output of an LLM call."""
return text.strip().split(", ")
hi_bye = CommaSeparatedListOutputParser().parse("hi, bye")
print(f"==\nhi_bye: {hi_bye}")
if __name__ == "__main__":
import openai
from util import parse_openai_api_key_file
parse_openai_api_key_file()
llms()
promtp_templates()
output_parsers()
| [
"You are a helpful assistant that translates {input_language} to {output_language}.",
"[PLACEHOLDER, PLACEHOLDER]",
"What is a good name for a company that makes {product}?",
"{text}"
] |
2024-01-10 | kimcharli/langchain-test-001 | openai~a002-llmchain.py | from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.chains import LLMChain
from langchain.schema import BaseOutputParser
class CommaSeparatedListOutputParser(BaseOutputParser):
"""Parse the output of an LLM call to a comma-separated list."""
def parse(self, text: str):
"""Parse the output of an LLM call."""
return text.strip().split(", ")
def main():
template = """You are a helpful assistant who generates comma separated lists.
A user will pass in a category, and you should generate 5 objects in that category in a comma separated list.
ONLY return a comma separated list, and nothing more."""
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template = "{text}"
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
chain = LLMChain(
llm=ChatOpenAI(openai_api_key=openai.api_key, openai_organization=openai.organization),
prompt=chat_prompt,
output_parser=CommaSeparatedListOutputParser()
)
# chain_run = chain.run("colors")
print(f"==\ntemplate: {template}")
for choise in ['colors', 'metrics', 'last names']:
print(f"==\nchoice: {choise}")
print(f"==\chain.run('{choise}'): {chain.run(choise)}")
if __name__ == "__main__":
import openai
from util import parse_openai_api_key_file
parse_openai_api_key_file()
main()
| [
"[PLACEHOLDER, PLACEHOLDER]",
"You are a helpful assistant who generates comma separated lists.\n A user will pass in a category, and you should generate 5 objects in that category in a comma separated list.\n ONLY return a comma separated list, and nothing more.",
"{text}"
] |
2024-01-10 | kkingwing/my_streamlit_project | %E5%85%B6%E5%AE%83~gpt_chat_v1.0.py | from openai import OpenAI
import streamlit as st
st.title("ChatGPT-like clone")
client = OpenAI(api_key=st.secrets.openai["OPENAI_API_KEY"])
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = "gpt-3.5-turbo"
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("What is up?"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
for response in client.chat.completions.create(
model=st.session_state["openai_model"],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
stream=True,
):
full_response += (response.choices[0].delta.content or "")
message_placeholder.markdown(full_response + "▌")
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response}) | [
"content"
] |
2024-01-10 | EdF2021/berend_app | app~Berend-Botje.py | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import streamlit as st
from PIL import Image
import openai
openai_api_key = os.getenv("OPENAI_API_KEY")
image = Image.open('images/producttoer.jpeg')
from streamlit.logger import get_logger
# import tiktoken
# import tiktoken_ext
image = Image.open('images/producttoer.jpeg')
from streamlit.logger import get_logger
# import tiktoken
# import tiktoken_ext
# import openai
# >>>>>>> c73e3ec134906f799647debf55cab1a1a04ad895
LOGGER = get_logger(__name__)
ENCODINGS = 'cl100k_base'
def run():
st.set_page_config(
page_title="Berend-Botje Skills",
page_icon="👋",
layout="wide",
initial_sidebar_state="collapsed"
)
st.write("### Welkom bij Berend-Botje Skills 👋")
st.image(image, caption=None, width=240, use_column_width=None, clamp=True, channels="RGB", output_format="png")
st.sidebar.success("Kies één van Berend's skills")
st.markdown(
"""
###### Berend-Botje is een slimme AI assistent die je kan helpen bij het uitvoeren van diverse werkzaamheden.
Afhankelijk van de werkzaamheden gebruikt Berend hiervoor 1 of meer skills. Deze skills maken gebruik van AI modellen van **openai** zoals ChatGPT. Het verschil met ChatGPT is dat alle informatie binnen de omgeving van de gebruiker blijft!
###### 👈 Voorbeelden. Selecteer in de zijbalk een voorbeeld skill. van Berend-Botje!
1. [De Lesplanner](Lesplan_Demo)
2. [De Notulist](Mapping_Demo)
2. [De Dataanalist](DataFrame_Demo)
3. [De Datavormgever](Plotting_Demo)
#### Meer weten?
- Ga naar ....
-
#### Het is werk under conder construction
- Lees bijvoorbeeld
"""
)
if __name__ == "__main__":
run()
| [] |
2024-01-10 | yubuyuabc/Chinese-LangChain | clc~gpt_service.py | #!/usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author:quincy qiang
@license: Apache Licence
@file: generate.py
@time: 2023/04/17
@contact: [email protected]
@software: PyCharm
@description: coding..
"""
from typing import List, Optional
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from transformers import AutoModel, AutoTokenizer
class ChatGLMService(LLM):
max_token: int = 10000
temperature: float = 0.1
top_p = 0.9
history = []
tokenizer: object = None
model: object = None
def __init__(self):
super().__init__()
@property
def _llm_type(self) -> str:
return "ChatGLM"
def _call(self,
prompt: str,
stop: Optional[List[str]] = None) -> str:
response, _ = self.model.chat(
self.tokenizer,
prompt,
history=self.history,
max_length=self.max_token,
temperature=self.temperature,
)
if stop is not None:
response = enforce_stop_tokens(response, stop)
self.history = self.history + [[None, response]]
return response
def load_model(self,
model_name_or_path: str = "THUDM/chatglm-6b"):
self.tokenizer = AutoTokenizer.from_pretrained(
model_name_or_path,
trust_remote_code=True
)
self.model = AutoModel.from_pretrained(model_name_or_path, trust_remote_code=True).half().cuda()
self.model=self.model.eval()
# if __name__ == '__main__':
# config=LangChainCFG()
# chatLLM = ChatGLMService()
# chatLLM.load_model(model_name_or_path=config.llm_model_name)
| [] |
2024-01-10 | yubuyuabc/Chinese-LangChain | tests~test_vector_store.py | from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
# 中文Wikipedia数据导入示例:
embedding_model_name = '/root/pretrained_models/ernie-gram-zh'
embeddings = HuggingFaceEmbeddings(model_name=embedding_model_name)
vector_store = FAISS.load_local("/root/GoMall/Knowledge-ChatGLM/cache/zh_wikipedia", embeddings)
print(vector_store)
res = vector_store.similarity_search_with_score('闫强')
print(res)
| [] |
2024-01-10 | yubuyuabc/Chinese-LangChain | clc~source_service.py | #!/usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author:quincy qiang
@license: Apache Licence
@file: search.py
@time: 2023/04/17
@contact: [email protected]
@software: PyCharm
@description: coding..
"""
import os
from duckduckgo_search import ddg
from duckduckgo_search.utils import SESSION
from langchain.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
class SourceService(object):
def __init__(self, config):
self.vector_store = None
self.config = config
self.embeddings = HuggingFaceEmbeddings(model_name=self.config.embedding_model_name)
self.docs_path = self.config.docs_path
self.vector_store_path = self.config.vector_store_path
def init_source_vector(self):
"""
初始化本地知识库向量
:return:
"""
docs = []
for doc in os.listdir(self.docs_path):
if doc.endswith('.txt'):
print(doc)
loader = UnstructuredFileLoader(f'{self.docs_path}/{doc}', mode="elements")
doc = loader.load()
docs.extend(doc)
self.vector_store = FAISS.from_documents(docs, self.embeddings)
self.vector_store.save_local(self.vector_store_path)
def add_document(self, document_path):
loader = UnstructuredFileLoader(document_path, mode="elements")
doc = loader.load()
self.vector_store.add_documents(doc)
self.vector_store.save_local(self.vector_store_path)
def load_vector_store(self, path):
if path is None:
self.vector_store = FAISS.load_local(self.vector_store_path, self.embeddings)
else:
self.vector_store = FAISS.load_local(path, self.embeddings)
return self.vector_store
def search_web(self, query):
SESSION.proxies = {
"http": f"socks5h://localhost:7890",
"https": f"socks5h://localhost:7890"
}
results = ddg(query)
web_content = ''
if results:
for result in results:
web_content += result['body']
return web_content
# if __name__ == '__main__':
# config = LangChainCFG()
# source_service = SourceService(config)
# source_service.init_source_vector()
# search_result = source_service.vector_store.similarity_search_with_score('科比')
# print(search_result)
#
# source_service.add_document('/home/searchgpt/yq/Knowledge-ChatGLM/docs/added/科比.txt')
# search_result = source_service.vector_store.similarity_search_with_score('科比')
# print(search_result)
#
# vector_store=source_service.load_vector_store()
# search_result = source_service.vector_store.similarity_search_with_score('科比')
# print(search_result)
| [] |
2024-01-10 | yubuyuabc/Chinese-LangChain | create_knowledge.py | #!/usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author:quincy qiang
@license: Apache Licence
@file: create_knowledge.py
@time: 2023/04/18
@contact: [email protected]
@software: PyCharm
@description: - emoji:https://emojixd.com/pocket/science
"""
import os
import pandas as pd
from langchain.schema import Document
from langchain.document_loaders import UnstructuredFileLoader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from tqdm import tqdm
# 中文Wikipedia数据导入示例:
embedding_model_name = '/root/pretrained_models/text2vec-large-chinese'
docs_path = '/root/GoMall/Knowledge-ChatGLM/cache/financial_research_reports'
embeddings = HuggingFaceEmbeddings(model_name=embedding_model_name)
# Wikipedia数据处理
# docs = []
# with open('docs/zh_wikipedia/zhwiki.sim.utf8', 'r', encoding='utf-8') as f:
# for idx, line in tqdm(enumerate(f.readlines())):
# metadata = {"source": f'doc_id_{idx}'}
# docs.append(Document(page_content=line.strip(), metadata=metadata))
#
# vector_store = FAISS.from_documents(docs, embeddings)
# vector_store.save_local('cache/zh_wikipedia/')
docs = []
with open('cache/zh_wikipedia/wiki.zh-sim-cleaned.txt', 'r', encoding='utf-8') as f:
for idx, line in tqdm(enumerate(f.readlines())):
metadata = {"source": f'doc_id_{idx}'}
docs.append(Document(page_content=line.strip(), metadata=metadata))
vector_store = FAISS.from_documents(docs, embeddings)
vector_store.save_local('cache/zh_wikipedia/')
# 金融研报数据处理
# docs = []
#
# for doc in tqdm(os.listdir(docs_path)):
# if doc.endswith('.txt'):
# # print(doc)
# loader = UnstructuredFileLoader(f'{docs_path}/{doc}', mode="elements")
# doc = loader.load()
# docs.extend(doc)
# vector_store = FAISS.from_documents(docs, embeddings)
# vector_store.save_local('cache/financial_research_reports')
# 英雄联盟
docs = []
lol_df = pd.read_csv('cache/lol/champions.csv')
# lol_df.columns = ['id', '英雄简称', '英雄全称', '出生地', '人物属性', '英雄类别', '英雄故事']
print(lol_df)
for idx, row in lol_df.iterrows():
metadata = {"source": f'doc_id_{idx}'}
text = ' '.join(row.values)
# for col in ['英雄简称', '英雄全称', '出生地', '人物属性', '英雄类别', '英雄故事']:
# text += row[col]
docs.append(Document(page_content=text, metadata=metadata))
vector_store = FAISS.from_documents(docs, embeddings)
vector_store.save_local('cache/lol/')
| [] |
2024-01-10 | CuchulainX/PythonProgrammingPuzzles | solvers~gpt3~lm_solve~gpt3_lib.py | import os
import json
import openai
import ezlog
import time
import datetime
assert 'OPENAI_API_KEY' in os.environ, "Need to set environment variable `OPENAI_API_KEY`"
openai.api_key = os.environ['OPENAI_API_KEY']
_CACHE_PATH = os.path.join(os.path.dirname(__file__), "../.cache")
_CACHE_FILENAME = os.path.join(_CACHE_PATH, "gpt3.cache")
_ENCODING = "utf-8"
_cache = None
# the cache file is just a list of (query params dictionary encoded as a string but without n, result list)
# multiple queries with the same params (except for n) are merged into a single big list
def _save_line(item, comment=None):
global _cache
assert _cache is not None
with open(_CACHE_FILENAME, "a", encoding=_ENCODING) as f:
f.write(str(item)+ ((" # " + comment + "\n") if comment else "\n"))
def _load_cache():
global _cache
assert _cache is None, "gpt3 cache already loaded"
if not os.path.exists(_CACHE_PATH):
ezlog.warn("Creating cache path")
os.makedirs(_CACHE_PATH)
_cache = {}
if os.path.exists(_CACHE_FILENAME):
time0 = time.perf_counter()
with open(_CACHE_FILENAME, "r", encoding=_ENCODING) as f:
for k, v in [eval(line) for line in f.readlines()]:
if k not in _cache:
_cache[k] = v
else:
_cache[k].extend(v)
ezlog.info(f"Loaded gpt3 cache in {time.perf_counter()-time0:.1f}s")
else:
ezlog.warn("No gpt3 cache yet")
def query(prompt, n=10, max_tokens=150, temp=1.0, max_batch=32, stop=None, notes=None, cache_only=False, verbose=True):
"""Query gpt3
:param prompt: Up to 2048 tokens (about 3-4k chars)
:param n: number of answers, None returns all cached answers
:param max_tokens:
:param temp: 0.9 seems to work well
:param max_batch: max to query at once
:param stop: string to stop at or '' if not to stop
:param notes: notes you want to save or change in case you want to run the same query more than once!
:return: list of answers and then the response items
"""
global _cache
if _cache is None:
_load_cache()
if temp == 0 and n > 1:
ezlog.debug("Temp 0: no point in running more than one query")
n = 1
key = str(dict(prompt=prompt, max_tokens=max_tokens, temp=temp, max_batch=max_batch, stop=stop, rep=notes))
cached = _cache.get(key, [])
if n is None:
return cached[:]
if len(cached) >= n:
return cached[:n]
if cache_only:
pass
1/0
assert not cache_only, "Entry not found in cache"
if verbose:
print("/"*100)
print("Querying GPT3 with prompt:")
print(prompt)
s = stop and stop.replace('\n', '\\n')
print(f"/// n={n} ({n-len(cached)} new) max_tokens={max_tokens} temp={temp} max_batch={max_batch} stop={s}")
print("/"*100)
time0 = time.perf_counter()
new = []
n -= len(cached)
while n > 0:
m = min(n, max_batch)
res = openai.Completion.create(
engine="davinci-msft",
prompt=prompt,
max_tokens=max_tokens,
temperature=temp,
n=m,
stop=stop or None
)
new += [c["text"] for c in res["choices"]]
n -= m
_save_line((key, new), f"{time.perf_counter() - time0:.1f}s {datetime.datetime.now()}")
ans = _cache[key] = cached + new
return ans[:]
# old code
# # to persist calls to the API...
# _disk_cache = joblib.Memory(os.path.join(os.path.dirname(__file__), ".cache"), verbose=1).cache
#
#
# @_disk_cache
# def query(prompt, n=10, max_tokens=150, temperature=1.0, max_batch=32):
# """Query gpt3
#
# :param prompt: Up to 2048 tokens (about 3-4k chars)
# :param n: number of answers
# :param max_tokens:
# :param temperature:
# :param max_batch: max to query at once
# :return: list of answers and then the response items
# """
# if temperature == 0 and n > 1:
# ezlog.debug("Temp 0: no point in running more than one query")
# n = 1
#
# responses = []
# while n > 0:
# m = min(n, max_batch)
# prompt_summary = prompt if len(prompt) < 80 else f"{prompt[:40]}...{prompt[-40:]}"
# ezlog.warn(f"**** Running GPT3 query: temp {temperature}, n={m}, prompt={prompt_summary}")
# time0 = time.perf_counter()
# responses.append(openai.Completion.create(
# engine="davinci-msft",
# prompt=prompt,
# max_tokens=max_tokens,
# temperature=temperature,
# n=m
# ))
# ezlog.info(f"**** Got response in {time.perf_counter()-time0}s...")
# n -= m
#
# return [c["text"] for r in responses for c in r["choices"]], responses
| [] |
2024-01-10 | ronakdinesh/ronak | Documents~MBAGPT-main~indexing.py | import os
import streamlit as st
from langchain.document_loaders import DirectoryLoader, PyPDFLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.chains import ConversationalRetrievalChain
os.environ["OPENAI_API_KEY"] = st.secrets["OPENAI_API_KEY"]
# Set persist directory
persist_directory = 'db'
buffett_loader = DirectoryLoader('./docs/buffett/', glob="*.pdf")
branson_loader = DirectoryLoader('./docs/branson/', glob="*.pdf")
buffett_docs = buffett_loader.load()
branson_docs = branson_loader.load()
embeddings = OpenAIEmbeddings()
text_splitter = CharacterTextSplitter(chunk_size=250, chunk_overlap=8)
# Split documents and generate embeddings
buffett_docs_split = text_splitter.split_documents(buffett_docs)
branson_docs_split = text_splitter.split_documents(branson_docs)
# Create Chroma instances and persist embeddings
buffettDB = Chroma.from_documents(buffett_docs_split, embeddings, persist_directory=os.path.join(persist_directory, 'buffett'))
buffettDB.persist()
bransonDB = Chroma.from_documents(branson_docs_split, embeddings, persist_directory=os.path.join(persist_directory, 'branson'))
bransonDB.persist()
| [] |
2024-01-10 | krawc/itd2023 | backend~server.py | from flask import Flask, request, jsonify
from flask_cors import CORS
import openai
import requests
import json
app = Flask(__name__)
CORS(app, origins="*") # Allow all origins
openai.api_key = "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"
model_engine = "text-davinci-002"
@app.route("/api/chat", methods=["POST"])
def chat():
# Get the prompt from the request data
print(request.json)
prompt = request.json["prompt"]
# Use the ChatGPT model to generate text
completion = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "user", "content": prompt}
],
)
message = completion.choices[0].message.content
response = jsonify({"message": message})
# Set CORS headers
response.headers.add("Access-Control-Allow-Origin", "*")
response.headers.add("Access-Control-Allow-Headers", "Content-Type,Authorization")
# Return the response as JSON
return response
def extract_mp3_url(response):
events = response.split("\n\n") # Split events by double newline ("\n\n")
for event in events:
lines = event.strip().split("\n")
event_type = lines[0].replace("event: ", "")
data_line = lines[1].replace("data: ", "")
data = json.loads(data_line)
print(event_type, data)
if event_type == "completed":
url = data.get("url")
if url:
return url
return None
@app.route("/api/voice", methods=["POST"])
def voice():
# Get the text and voice from the request data
print(request.json)
text = request.json["text"]["message"]
voice = request.json.get("voice", "larry")
# Call the playHT API to convert text to speech
playHT_url = "https://play.ht/api/v2/tts"
playHT_headers = {
"Content-Type": "application/json",
"AUTHORIZATION": "Bearer 836b46d009054707aff803c5da3d9203",
"X-USER-ID": "AiI8TMtOClRIwKkOF7lstErKfGK2",
"accept": "text/event-stream",
"content-type": "application/json"
}
playHT_data = {
"text": text,
"voice": voice,
"speed": 0.8
}
playHT_response = requests.post(playHT_url, json=playHT_data, headers=playHT_headers)
#playHT_response.raise_for_status()
mp3_url = None
# Split the response into events
events = playHT_response.text.split("event: ")
# Iterate over events in reverse order to find the "completed" event
for event in reversed(events):
lines = event.strip().split("\n")
event_type = lines[0].replace("event: ", "")
if len(lines) >= 2:
data_line = lines[1].replace("data: ", "")
print(event_type, data_line)
data = json.loads(data_line)
if 'url' in data_line:
mp3_url = data.get("url")
break
response = jsonify({"url": mp3_url}) if mp3_url else jsonify({"message": "MP3 conversion failed."})
# Set CORS headers
response.headers.add("Access-Control-Allow-Origin", "*")
response.headers.add("Access-Control-Allow-Headers", "Content-Type,Authorization")
# Return the playHT API response as JSON
return response
if __name__ == "__main__":
app.run() | [
"application/json"
] |
2024-01-10 | alexshuang/transformers | src~transformers~tokenization_auto.py | # coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Tokenizer class. """
from collections import OrderedDict
from .configuration_auto import (
AlbertConfig,
AutoConfig,
BartConfig,
BertConfig,
BertGenerationConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
ElectraConfig,
EncoderDecoderConfig,
FlaubertConfig,
FSMTConfig,
FunnelConfig,
GPT2Config,
LongformerConfig,
LxmertConfig,
MarianConfig,
MBartConfig,
MobileBertConfig,
OpenAIGPTConfig,
PegasusConfig,
ReformerConfig,
RetriBertConfig,
RobertaConfig,
T5Config,
TransfoXLConfig,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
replace_list_option_in_docstrings,
)
from .configuration_utils import PretrainedConfig
from .tokenization_albert import AlbertTokenizer
from .tokenization_bart import BartTokenizer, BartTokenizerFast
from .tokenization_bert import BertTokenizer, BertTokenizerFast
from .tokenization_bert_generation import BertGenerationTokenizer
from .tokenization_bert_japanese import BertJapaneseTokenizer
from .tokenization_camembert import CamembertTokenizer
from .tokenization_ctrl import CTRLTokenizer
from .tokenization_distilbert import DistilBertTokenizer, DistilBertTokenizerFast
from .tokenization_electra import ElectraTokenizer, ElectraTokenizerFast
from .tokenization_flaubert import FlaubertTokenizer
from .tokenization_fsmt import FSMTTokenizer
from .tokenization_funnel import FunnelTokenizer, FunnelTokenizerFast
from .tokenization_gpt2 import GPT2Tokenizer, GPT2TokenizerFast
from .tokenization_longformer import LongformerTokenizer, LongformerTokenizerFast
from .tokenization_lxmert import LxmertTokenizer, LxmertTokenizerFast
from .tokenization_marian import MarianTokenizer
from .tokenization_mbart import MBartTokenizer
from .tokenization_mobilebert import MobileBertTokenizer, MobileBertTokenizerFast
from .tokenization_openai import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from .tokenization_pegasus import PegasusTokenizer
from .tokenization_reformer import ReformerTokenizer
from .tokenization_retribert import RetriBertTokenizer, RetriBertTokenizerFast
from .tokenization_roberta import RobertaTokenizer, RobertaTokenizerFast
from .tokenization_t5 import T5Tokenizer
from .tokenization_transfo_xl import TransfoXLTokenizer, TransfoXLTokenizerFast
from .tokenization_xlm import XLMTokenizer
from .tokenization_xlm_roberta import XLMRobertaTokenizer
from .tokenization_xlnet import XLNetTokenizer
from .utils import logging
logger = logging.get_logger(__name__)
TOKENIZER_MAPPING = OrderedDict(
[
(RetriBertConfig, (RetriBertTokenizer, RetriBertTokenizerFast)),
(T5Config, (T5Tokenizer, None)),
(MobileBertConfig, (MobileBertTokenizer, MobileBertTokenizerFast)),
(DistilBertConfig, (DistilBertTokenizer, DistilBertTokenizerFast)),
(AlbertConfig, (AlbertTokenizer, None)),
(CamembertConfig, (CamembertTokenizer, None)),
(PegasusConfig, (PegasusTokenizer, None)),
(MBartConfig, (MBartTokenizer, None)),
(XLMRobertaConfig, (XLMRobertaTokenizer, None)),
(MarianConfig, (MarianTokenizer, None)),
(BartConfig, (BartTokenizer, BartTokenizerFast)),
(LongformerConfig, (LongformerTokenizer, LongformerTokenizerFast)),
(RobertaConfig, (RobertaTokenizer, RobertaTokenizerFast)),
(ReformerConfig, (ReformerTokenizer, None)),
(ElectraConfig, (ElectraTokenizer, ElectraTokenizerFast)),
(FunnelConfig, (FunnelTokenizer, FunnelTokenizerFast)),
(LxmertConfig, (LxmertTokenizer, LxmertTokenizerFast)),
(BertConfig, (BertTokenizer, BertTokenizerFast)),
(OpenAIGPTConfig, (OpenAIGPTTokenizer, OpenAIGPTTokenizerFast)),
(GPT2Config, (GPT2Tokenizer, GPT2TokenizerFast)),
(TransfoXLConfig, (TransfoXLTokenizer, TransfoXLTokenizerFast)),
(XLNetConfig, (XLNetTokenizer, None)),
(FlaubertConfig, (FlaubertTokenizer, None)),
(XLMConfig, (XLMTokenizer, None)),
(CTRLConfig, (CTRLTokenizer, None)),
(FSMTConfig, (FSMTTokenizer, None)),
(BertGenerationConfig, (BertGenerationTokenizer, None)),
]
)
SLOW_TOKENIZER_MAPPING = {k: v[0] for k, v in TOKENIZER_MAPPING.items()}
class AutoTokenizer:
r"""
This is a generic tokenizer class that will be instantiated as one of the tokenizer classes of the library
when created with the :meth:`AutoTokenizer.from_pretrained` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoTokenizer is designed to be instantiated "
"using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method."
)
@classmethod
@replace_list_option_in_docstrings(SLOW_TOKENIZER_MAPPING)
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
r"""
Instantiate one of the tokenizer classes of the library from a pretrained model vocabulary.
The tokenizer class to instantiate is selected based on the :obj:`model_type` property of the config object
(either passed as an argument or loaded from :obj:`pretrained_model_name_or_path` if possible), or when it's
missing, by falling back to using pattern matching on :obj:`pretrained_model_name_or_path`:
List options
Params:
pretrained_model_name_or_path (:obj:`str`):
Can be either:
- A string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.,
``bert-base-uncased``.
- A string with the `identifier name` of a predefined tokenizer that was user-uploaded to our S3,
e.g., ``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing vocabulary files required by the tokenizer, for instance saved
using the :func:`~transformers.PreTrainedTokenizer.save_pretrained` method, e.g.,
``./my_model_directory/``.
- A path or url to a single saved vocabulary file if and only if the tokenizer only requires a
single vocabulary file (like Bert or XLNet), e.g.: ``./my_model_directory/vocab.txt``.
(Not applicable to all derived classes)
inputs (additional positional arguments, `optional`):
Will be passed along to the Tokenizer ``__init__()`` method.
config (:class:`~transformers.PreTrainedConfig`, `optional`)
The configuration object used to dertermine the tokenizer class to instantiate.
cache_dir (:obj:`str`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download the model weights and configuration files and override the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (:obj:`Dict[str, str]`, `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g.,
:obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each
request.
use_fast (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to try to load the fast version of the tokenizer.
kwargs (additional keyword arguments, `optional`):
Will be passed to the Tokenizer ``__init__()`` method. Can be used to set special tokens like
``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``,
``mask_token``, ``additional_special_tokens``. See parameters in the ``__init__()`` for more details.
Examples::
>>> from transformers import AutoTokenizer
>>> # Download vocabulary from S3 and cache.
>>> tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
>>> # Download vocabulary from S3 (user-uploaded) and cache.
>>> tokenizer = AutoTokenizer.from_pretrained('dbmdz/bert-base-german-cased')
>>> # If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`)
>>> tokenizer = AutoTokenizer.from_pretrained('./test/bert_saved_model/')
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
if "bert-base-japanese" in str(pretrained_model_name_or_path):
return BertJapaneseTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
use_fast = kwargs.pop("use_fast", False)
if config.tokenizer_class is not None:
if use_fast and not config.tokenizer_class.endswith("Fast"):
tokenizer_class_candidate = f"{config.tokenizer_class}Fast"
else:
tokenizer_class_candidate = config.tokenizer_class
tokenizer_class = globals().get(tokenizer_class_candidate)
if tokenizer_class is None:
raise ValueError(
"Tokenizer class {} does not exist or is not currently imported.".format(tokenizer_class_candidate)
)
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
# if model is an encoder decoder, the encoder tokenizer class is used by default
if isinstance(config, EncoderDecoderConfig):
if type(config.decoder) is not type(config.encoder): # noqa: E721
logger.warn(
f"The encoder model config class: {config.encoder.__class__} is different from the decoder model "
f"config class: {config.decoder.__class}. It is not recommended to use the "
"`AutoTokenizer.from_pretrained()` method in this case. Please use the encoder and decoder "
"specific tokenizer classes."
)
config = config.encoder
for config_class, (tokenizer_class_py, tokenizer_class_fast) in TOKENIZER_MAPPING.items():
if isinstance(config, config_class):
if tokenizer_class_fast and use_fast:
return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
else:
return tokenizer_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
raise ValueError(
"Unrecognized configuration class {} to build an AutoTokenizer.\n"
"Model type should be one of {}.".format(
config.__class__, ", ".join(c.__name__ for c in TOKENIZER_MAPPING.keys())
)
)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~quip.py | import logging
import re
import xml.etree.cElementTree
import xml.sax.saxutils
from io import BytesIO
from typing import List, Optional, Sequence
from xml.etree.ElementTree import ElementTree
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
_MAXIMUM_TITLE_LENGTH = 64
class QuipLoader(BaseLoader):
"""Load `Quip` pages.
Port of https://github.com/quip/quip-api/tree/master/samples/baqup
"""
def __init__(
self, api_url: str, access_token: str, request_timeout: Optional[int] = 60
):
"""
Args:
api_url: https://platform.quip.com
access_token: token of access quip API. Please refer:
https://quip.com/dev/automation/documentation/current#section/Authentication/Get-Access-to-Quip's-APIs
request_timeout: timeout of request, default 60s.
"""
try:
from quip_api.quip import QuipClient
except ImportError:
raise ImportError(
"`quip_api` package not found, please run " "`pip install quip_api`"
)
self.quip_client = QuipClient(
access_token=access_token, base_url=api_url, request_timeout=request_timeout
)
def load(
self,
folder_ids: Optional[List[str]] = None,
thread_ids: Optional[List[str]] = None,
max_docs: Optional[int] = 1000,
include_all_folders: bool = False,
include_comments: bool = False,
include_images: bool = False,
) -> List[Document]:
"""
Args:
:param folder_ids: List of specific folder IDs to load, defaults to None
:param thread_ids: List of specific thread IDs to load, defaults to None
:param max_docs: Maximum number of docs to retrieve in total, defaults 1000
:param include_all_folders: Include all folders that your access_token
can access, but doesn't include your private folder
:param include_comments: Include comments, defaults to False
:param include_images: Include images, defaults to False
"""
if not folder_ids and not thread_ids and not include_all_folders:
raise ValueError(
"Must specify at least one among `folder_ids`, `thread_ids` "
"or set `include_all`_folders as True"
)
thread_ids = thread_ids or []
if folder_ids:
for folder_id in folder_ids:
self.get_thread_ids_by_folder_id(folder_id, 0, thread_ids)
if include_all_folders:
user = self.quip_client.get_authenticated_user()
if "group_folder_ids" in user:
self.get_thread_ids_by_folder_id(
user["group_folder_ids"], 0, thread_ids
)
if "shared_folder_ids" in user:
self.get_thread_ids_by_folder_id(
user["shared_folder_ids"], 0, thread_ids
)
thread_ids = list(set(thread_ids[:max_docs]))
return self.process_threads(thread_ids, include_images, include_comments)
def get_thread_ids_by_folder_id(
self, folder_id: str, depth: int, thread_ids: List[str]
) -> None:
"""Get thread ids by folder id and update in thread_ids"""
from quip_api.quip import HTTPError, QuipError
try:
folder = self.quip_client.get_folder(folder_id)
except QuipError as e:
if e.code == 403:
logging.warning(
f"depth {depth}, Skipped over restricted folder {folder_id}, {e}"
)
else:
logging.warning(
f"depth {depth}, Skipped over folder {folder_id} "
f"due to unknown error {e.code}"
)
return
except HTTPError as e:
logging.warning(
f"depth {depth}, Skipped over folder {folder_id} "
f"due to HTTP error {e.code}"
)
return
title = folder["folder"].get("title", "Folder %s" % folder_id)
logging.info(f"depth {depth}, Processing folder {title}")
for child in folder["children"]:
if "folder_id" in child:
self.get_thread_ids_by_folder_id(
child["folder_id"], depth + 1, thread_ids
)
elif "thread_id" in child:
thread_ids.append(child["thread_id"])
def process_threads(
self, thread_ids: Sequence[str], include_images: bool, include_messages: bool
) -> List[Document]:
"""Process a list of thread into a list of documents."""
docs = []
for thread_id in thread_ids:
doc = self.process_thread(thread_id, include_images, include_messages)
if doc is not None:
docs.append(doc)
return docs
def process_thread(
self, thread_id: str, include_images: bool, include_messages: bool
) -> Optional[Document]:
thread = self.quip_client.get_thread(thread_id)
thread_id = thread["thread"]["id"]
title = thread["thread"]["title"]
link = thread["thread"]["link"]
update_ts = thread["thread"]["updated_usec"]
sanitized_title = QuipLoader._sanitize_title(title)
logger.info(
f"processing thread {thread_id} title {sanitized_title} "
f"link {link} update_ts {update_ts}"
)
if "html" in thread:
# Parse the document
try:
tree = self.quip_client.parse_document_html(thread["html"])
except xml.etree.cElementTree.ParseError as e:
logger.error(f"Error parsing thread {title} {thread_id}, skipping, {e}")
return None
metadata = {
"title": sanitized_title,
"update_ts": update_ts,
"id": thread_id,
"source": link,
}
# Download each image and replace with the new URL
text = ""
if include_images:
text = self.process_thread_images(tree)
if include_messages:
text = text + "/n" + self.process_thread_messages(thread_id)
return Document(
page_content=thread["html"] + text,
metadata=metadata,
)
return None
def process_thread_images(self, tree: ElementTree) -> str:
text = ""
try:
from PIL import Image
from pytesseract import pytesseract
except ImportError:
raise ImportError(
"`Pillow or pytesseract` package not found, "
"please run "
"`pip install Pillow` or `pip install pytesseract`"
)
for img in tree.iter("img"):
src = img.get("src")
if not src or not src.startswith("/blob"):
continue
_, _, thread_id, blob_id = src.split("/")
blob_response = self.quip_client.get_blob(thread_id, blob_id)
try:
image = Image.open(BytesIO(blob_response.read()))
text = text + "\n" + pytesseract.image_to_string(image)
except OSError as e:
logger.error(f"failed to convert image to text, {e}")
raise e
return text
def process_thread_messages(self, thread_id: str) -> str:
max_created_usec = None
messages = []
while True:
chunk = self.quip_client.get_messages(
thread_id, max_created_usec=max_created_usec, count=100
)
messages.extend(chunk)
if chunk:
max_created_usec = chunk[-1]["created_usec"] - 1
else:
break
messages.reverse()
texts = [message["text"] for message in messages]
return "\n".join(texts)
@staticmethod
def _sanitize_title(title: str) -> str:
sanitized_title = re.sub(r"\s", " ", title)
sanitized_title = re.sub(r"(?u)[^- \w.]", "", sanitized_title)
if len(sanitized_title) > _MAXIMUM_TITLE_LENGTH:
sanitized_title = sanitized_title[:_MAXIMUM_TITLE_LENGTH]
return sanitized_title
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~elastic_vector_search.py | from __future__ import annotations
import uuid
import warnings
from typing import (
TYPE_CHECKING,
Any,
Dict,
Iterable,
List,
Mapping,
Optional,
Tuple,
Union,
)
from libs.core.langchain_core._api import deprecated
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.utils import get_from_dict_or_env
from libs.core.langchain_core.vectorstores import VectorStore
if TYPE_CHECKING:
from elasticsearch import Elasticsearch
def _default_text_mapping(dim: int) -> Dict:
return {
"properties": {
"text": {"type": "text"},
"vector": {"type": "dense_vector", "dims": dim},
}
}
def _default_script_query(query_vector: List[float], filter: Optional[dict]) -> Dict:
if filter:
((key, value),) = filter.items()
filter = {"match": {f"metadata.{key}.keyword": f"{value}"}}
else:
filter = {"match_all": {}}
return {
"script_score": {
"query": filter,
"script": {
"source": "cosineSimilarity(params.query_vector, 'vector') + 1.0",
"params": {"query_vector": query_vector},
},
}
}
class ElasticVectorSearch(VectorStore):
"""
ElasticVectorSearch uses the brute force method of searching on vectors.
Recommended to use ElasticsearchStore instead, which gives you the option
to uses the approx HNSW algorithm which performs better on large datasets.
ElasticsearchStore also supports metadata filtering, customising the
query retriever and much more!
You can read more on ElasticsearchStore:
https://python.langchain.com/docs/integrations/vectorstores/elasticsearch
To connect to an `Elasticsearch` instance that does not require
login credentials, pass the Elasticsearch URL and index name along with the
embedding object to the constructor.
Example:
.. code-block:: python
from langchain_community.vectorstores import ElasticVectorSearch
from langchain_community.embeddings import OpenAIEmbeddings
embedding = OpenAIEmbeddings()
elastic_vector_search = ElasticVectorSearch(
elasticsearch_url="http://localhost:9200",
index_name="test_index",
embedding=embedding
)
To connect to an Elasticsearch instance that requires login credentials,
including Elastic Cloud, use the Elasticsearch URL format
https://username:password@es_host:9243. For example, to connect to Elastic
Cloud, create the Elasticsearch URL with the required authentication details and
pass it to the ElasticVectorSearch constructor as the named parameter
elasticsearch_url.
You can obtain your Elastic Cloud URL and login credentials by logging in to the
Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and
navigating to the "Deployments" page.
To obtain your Elastic Cloud password for the default "elastic" user:
1. Log in to the Elastic Cloud console at https://cloud.elastic.co
2. Go to "Security" > "Users"
3. Locate the "elastic" user and click "Edit"
4. Click "Reset password"
5. Follow the prompts to reset the password
The format for Elastic Cloud URLs is
https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243.
Example:
.. code-block:: python
from langchain_community.vectorstores import ElasticVectorSearch
from langchain_community.embeddings import OpenAIEmbeddings
embedding = OpenAIEmbeddings()
elastic_host = "cluster_id.region_id.gcp.cloud.es.io"
elasticsearch_url = f"https://username:password@{elastic_host}:9243"
elastic_vector_search = ElasticVectorSearch(
elasticsearch_url=elasticsearch_url,
index_name="test_index",
embedding=embedding
)
Args:
elasticsearch_url (str): The URL for the Elasticsearch instance.
index_name (str): The name of the Elasticsearch index for the embeddings.
embedding (Embeddings): An object that provides the ability to embed text.
It should be an instance of a class that subclasses the Embeddings
abstract base class, such as OpenAIEmbeddings()
Raises:
ValueError: If the elasticsearch python package is not installed.
"""
def __init__(
self,
elasticsearch_url: str,
index_name: str,
embedding: Embeddings,
*,
ssl_verify: Optional[Dict[str, Any]] = None,
):
"""Initialize with necessary components."""
warnings.warn(
"ElasticVectorSearch will be removed in a future release. See"
"Elasticsearch integration docs on how to upgrade."
)
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
self.embedding = embedding
self.index_name = index_name
_ssl_verify = ssl_verify or {}
try:
self.client = elasticsearch.Elasticsearch(
elasticsearch_url,
**_ssl_verify,
headers={"user-agent": self.get_user_agent()},
)
except ValueError as e:
raise ValueError(
f"Your elasticsearch client string is mis-formatted. Got error: {e} "
)
@staticmethod
def get_user_agent() -> str:
from langchain_community import __version__
return f"langchain-py-dvs/{__version__}"
@property
def embeddings(self) -> Embeddings:
return self.embedding
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
refresh_indices: bool = True,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of unique IDs.
refresh_indices: bool to refresh ElasticSearch indices
Returns:
List of ids from adding the texts into the vectorstore.
"""
try:
from elasticsearch.exceptions import NotFoundError
from elasticsearch.helpers import bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
requests = []
ids = ids or [str(uuid.uuid4()) for _ in texts]
embeddings = self.embedding.embed_documents(list(texts))
dim = len(embeddings[0])
mapping = _default_text_mapping(dim)
# check to see if the index already exists
try:
self.client.indices.get(index=self.index_name)
except NotFoundError:
# TODO would be nice to create index before embedding,
# just to save expensive steps for last
self.create_index(self.client, self.index_name, mapping)
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
request = {
"_op_type": "index",
"_index": self.index_name,
"vector": embeddings[i],
"text": text,
"metadata": metadata,
"_id": ids[i],
}
requests.append(request)
bulk(self.client, requests)
if refresh_indices:
self.client.indices.refresh(index=self.index_name)
return ids
def similarity_search(
self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
docs_and_scores = self.similarity_search_with_score(query, k, filter=filter)
documents = [d[0] for d in docs_and_scores]
return documents
def similarity_search_with_score(
self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding.embed_query(query)
script_query = _default_script_query(embedding, filter)
response = self.client_search(
self.client, self.index_name, script_query, size=k
)
hits = [hit for hit in response["hits"]["hits"]]
docs_and_scores = [
(
Document(
page_content=hit["_source"]["text"],
metadata=hit["_source"]["metadata"],
),
hit["_score"],
)
for hit in hits
]
return docs_and_scores
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
index_name: Optional[str] = None,
refresh_indices: bool = True,
**kwargs: Any,
) -> ElasticVectorSearch:
"""Construct ElasticVectorSearch wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in the Elasticsearch instance.
3. Adds the documents to the newly created Elasticsearch index.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import ElasticVectorSearch
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
elastic_vector_search = ElasticVectorSearch.from_texts(
texts,
embeddings,
elasticsearch_url="http://localhost:9200"
)
"""
elasticsearch_url = get_from_dict_or_env(
kwargs, "elasticsearch_url", "ELASTICSEARCH_URL"
)
if "elasticsearch_url" in kwargs:
del kwargs["elasticsearch_url"]
index_name = index_name or uuid.uuid4().hex
vectorsearch = cls(elasticsearch_url, index_name, embedding, **kwargs)
vectorsearch.add_texts(
texts, metadatas=metadatas, ids=ids, refresh_indices=refresh_indices
)
return vectorsearch
def create_index(self, client: Any, index_name: str, mapping: Dict) -> None:
version_num = client.info()["version"]["number"][0]
version_num = int(version_num)
if version_num >= 8:
client.indices.create(index=index_name, mappings=mapping)
else:
client.indices.create(index=index_name, body={"mappings": mapping})
def client_search(
self, client: Any, index_name: str, script_query: Dict, size: int
) -> Any:
version_num = client.info()["version"]["number"][0]
version_num = int(version_num)
if version_num >= 8:
response = client.search(index=index_name, query=script_query, size=size)
else:
response = client.search(
index=index_name, body={"query": script_query, "size": size}
)
return response
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> None:
"""Delete by vector IDs.
Args:
ids: List of ids to delete.
"""
if ids is None:
raise ValueError("No ids provided to delete.")
# TODO: Check if this can be done in bulk
for id in ids:
self.client.delete(index=self.index_name, id=id)
@deprecated("0.0.265", alternative="ElasticsearchStore class.", pending=True)
class ElasticKnnSearch(VectorStore):
"""[DEPRECATED] `Elasticsearch` with k-nearest neighbor search
(`k-NN`) vector store.
Recommended to use ElasticsearchStore instead, which supports
metadata filtering, customising the query retriever and much more!
You can read more on ElasticsearchStore:
https://python.langchain.com/docs/integrations/vectorstores/elasticsearch
It creates an Elasticsearch index of text data that
can be searched using k-NN search. The text data is transformed into
vector embeddings using a provided embedding model, and these embeddings
are stored in the Elasticsearch index.
Attributes:
index_name (str): The name of the Elasticsearch index.
embedding (Embeddings): The embedding model to use for transforming text data
into vector embeddings.
es_connection (Elasticsearch, optional): An existing Elasticsearch connection.
es_cloud_id (str, optional): The Cloud ID of your Elasticsearch Service
deployment.
es_user (str, optional): The username for your Elasticsearch Service deployment.
es_password (str, optional): The password for your Elasticsearch Service
deployment.
vector_query_field (str, optional): The name of the field in the Elasticsearch
index that contains the vector embeddings.
query_field (str, optional): The name of the field in the Elasticsearch index
that contains the original text data.
Usage:
>>> from embeddings import Embeddings
>>> embedding = Embeddings.load('glove')
>>> es_search = ElasticKnnSearch('my_index', embedding)
>>> es_search.add_texts(['Hello world!', 'Another text'])
>>> results = es_search.knn_search('Hello')
[(Document(page_content='Hello world!', metadata={}), 0.9)]
"""
def __init__(
self,
index_name: str,
embedding: Embeddings,
es_connection: Optional["Elasticsearch"] = None,
es_cloud_id: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
vector_query_field: Optional[str] = "vector",
query_field: Optional[str] = "text",
):
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
warnings.warn(
"ElasticKnnSearch will be removed in a future release."
"Use ElasticsearchStore instead. See Elasticsearch "
"integration docs on how to upgrade."
)
self.embedding = embedding
self.index_name = index_name
self.query_field = query_field
self.vector_query_field = vector_query_field
# If a pre-existing Elasticsearch connection is provided, use it.
if es_connection is not None:
self.client = es_connection
else:
# If credentials for a new Elasticsearch connection are provided,
# create a new connection.
if es_cloud_id and es_user and es_password:
self.client = elasticsearch.Elasticsearch(
cloud_id=es_cloud_id, basic_auth=(es_user, es_password)
)
else:
raise ValueError(
"""Either provide a pre-existing Elasticsearch connection, \
or valid credentials for creating a new connection."""
)
@staticmethod
def _default_knn_mapping(
dims: int, similarity: Optional[str] = "dot_product"
) -> Dict:
return {
"properties": {
"text": {"type": "text"},
"vector": {
"type": "dense_vector",
"dims": dims,
"index": True,
"similarity": similarity,
},
}
}
def _default_knn_query(
self,
query_vector: Optional[List[float]] = None,
query: Optional[str] = None,
model_id: Optional[str] = None,
k: Optional[int] = 10,
num_candidates: Optional[int] = 10,
) -> Dict:
knn: Dict = {
"field": self.vector_query_field,
"k": k,
"num_candidates": num_candidates,
}
# Case 1: `query_vector` is provided, but not `model_id` -> use query_vector
if query_vector and not model_id:
knn["query_vector"] = query_vector
# Case 2: `query` and `model_id` are provided, -> use query_vector_builder
elif query and model_id:
knn["query_vector_builder"] = {
"text_embedding": {
"model_id": model_id, # use 'model_id' argument
"model_text": query, # use 'query' argument
}
}
else:
raise ValueError(
"Either `query_vector` or `model_id` must be provided, but not both."
)
return knn
def similarity_search(
self, query: str, k: int = 4, filter: Optional[dict] = None, **kwargs: Any
) -> List[Document]:
"""
Pass through to `knn_search`
"""
results = self.knn_search(query=query, k=k, **kwargs)
return [doc for doc, score in results]
def similarity_search_with_score(
self, query: str, k: int = 10, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""Pass through to `knn_search including score`"""
return self.knn_search(query=query, k=k, **kwargs)
def knn_search(
self,
query: Optional[str] = None,
k: Optional[int] = 10,
query_vector: Optional[List[float]] = None,
model_id: Optional[str] = None,
size: Optional[int] = 10,
source: Optional[bool] = True,
fields: Optional[
Union[List[Mapping[str, Any]], Tuple[Mapping[str, Any], ...], None]
] = None,
page_content: Optional[str] = "text",
) -> List[Tuple[Document, float]]:
"""
Perform a k-NN search on the Elasticsearch index.
Args:
query (str, optional): The query text to search for.
k (int, optional): The number of nearest neighbors to return.
query_vector (List[float], optional): The query vector to search for.
model_id (str, optional): The ID of the model to use for transforming the
query text into a vector.
size (int, optional): The number of search results to return.
source (bool, optional): Whether to return the source of the search results.
fields (List[Mapping[str, Any]], optional): The fields to return in the
search results.
page_content (str, optional): The name of the field that contains the page
content.
Returns:
A list of tuples, where each tuple contains a Document object and a score.
"""
# if not source and (fields == None or page_content not in fields):
if not source and (
fields is None or not any(page_content in field for field in fields)
):
raise ValueError("If source=False `page_content` field must be in `fields`")
knn_query_body = self._default_knn_query(
query_vector=query_vector, query=query, model_id=model_id, k=k
)
# Perform the kNN search on the Elasticsearch index and return the results.
response = self.client.search(
index=self.index_name,
knn=knn_query_body,
size=size,
source=source,
fields=fields,
)
hits = [hit for hit in response["hits"]["hits"]]
docs_and_scores = [
(
Document(
page_content=hit["_source"][page_content]
if source
else hit["fields"][page_content][0],
metadata=hit["fields"] if fields else {},
),
hit["_score"],
)
for hit in hits
]
return docs_and_scores
def knn_hybrid_search(
self,
query: Optional[str] = None,
k: Optional[int] = 10,
query_vector: Optional[List[float]] = None,
model_id: Optional[str] = None,
size: Optional[int] = 10,
source: Optional[bool] = True,
knn_boost: Optional[float] = 0.9,
query_boost: Optional[float] = 0.1,
fields: Optional[
Union[List[Mapping[str, Any]], Tuple[Mapping[str, Any], ...], None]
] = None,
page_content: Optional[str] = "text",
) -> List[Tuple[Document, float]]:
"""
Perform a hybrid k-NN and text search on the Elasticsearch index.
Args:
query (str, optional): The query text to search for.
k (int, optional): The number of nearest neighbors to return.
query_vector (List[float], optional): The query vector to search for.
model_id (str, optional): The ID of the model to use for transforming the
query text into a vector.
size (int, optional): The number of search results to return.
source (bool, optional): Whether to return the source of the search results.
knn_boost (float, optional): The boost value to apply to the k-NN search
results.
query_boost (float, optional): The boost value to apply to the text search
results.
fields (List[Mapping[str, Any]], optional): The fields to return in the
search results.
page_content (str, optional): The name of the field that contains the page
content.
Returns:
A list of tuples, where each tuple contains a Document object and a score.
"""
# if not source and (fields == None or page_content not in fields):
if not source and (
fields is None or not any(page_content in field for field in fields)
):
raise ValueError("If source=False `page_content` field must be in `fields`")
knn_query_body = self._default_knn_query(
query_vector=query_vector, query=query, model_id=model_id, k=k
)
# Modify the knn_query_body to add a "boost" parameter
knn_query_body["boost"] = knn_boost
# Generate the body of the standard Elasticsearch query
match_query_body = {
"match": {self.query_field: {"query": query, "boost": query_boost}}
}
# Perform the hybrid search on the Elasticsearch index and return the results.
response = self.client.search(
index=self.index_name,
query=match_query_body,
knn=knn_query_body,
fields=fields,
size=size,
source=source,
)
hits = [hit for hit in response["hits"]["hits"]]
docs_and_scores = [
(
Document(
page_content=hit["_source"][page_content]
if source
else hit["fields"][page_content][0],
metadata=hit["fields"] if fields else {},
),
hit["_score"],
)
for hit in hits
]
return docs_and_scores
def create_knn_index(self, mapping: Dict) -> None:
"""
Create a new k-NN index in Elasticsearch.
Args:
mapping (Dict): The mapping to use for the new index.
Returns:
None
"""
self.client.indices.create(index=self.index_name, mappings=mapping)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[Dict[Any, Any]]] = None,
model_id: Optional[str] = None,
refresh_indices: bool = False,
**kwargs: Any,
) -> List[str]:
"""
Add a list of texts to the Elasticsearch index.
Args:
texts (Iterable[str]): The texts to add to the index.
metadatas (List[Dict[Any, Any]], optional): A list of metadata dictionaries
to associate with the texts.
model_id (str, optional): The ID of the model to use for transforming the
texts into vectors.
refresh_indices (bool, optional): Whether to refresh the Elasticsearch
indices after adding the texts.
**kwargs: Arbitrary keyword arguments.
Returns:
A list of IDs for the added texts.
"""
# Check if the index exists.
if not self.client.indices.exists(index=self.index_name):
dims = kwargs.get("dims")
if dims is None:
raise ValueError("ElasticKnnSearch requires 'dims' parameter")
similarity = kwargs.get("similarity")
optional_args = {}
if similarity is not None:
optional_args["similarity"] = similarity
mapping = self._default_knn_mapping(dims=dims, **optional_args)
self.create_knn_index(mapping)
embeddings = self.embedding.embed_documents(list(texts))
# body = []
body: List[Mapping[str, Any]] = []
for text, vector in zip(texts, embeddings):
body.extend(
[
{"index": {"_index": self.index_name}},
{"text": text, "vector": vector},
]
)
responses = self.client.bulk(operations=body)
ids = [
item["index"]["_id"]
for item in responses["items"]
if item["index"]["result"] == "created"
]
if refresh_indices:
self.client.indices.refresh(index=self.index_name)
return ids
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[Dict[Any, Any]]] = None,
**kwargs: Any,
) -> ElasticKnnSearch:
"""
Create a new ElasticKnnSearch instance and add a list of texts to the
Elasticsearch index.
Args:
texts (List[str]): The texts to add to the index.
embedding (Embeddings): The embedding model to use for transforming the
texts into vectors.
metadatas (List[Dict[Any, Any]], optional): A list of metadata dictionaries
to associate with the texts.
**kwargs: Arbitrary keyword arguments.
Returns:
A new ElasticKnnSearch instance.
"""
index_name = kwargs.get("index_name", str(uuid.uuid4()))
es_connection = kwargs.get("es_connection")
es_cloud_id = kwargs.get("es_cloud_id")
es_user = kwargs.get("es_user")
es_password = kwargs.get("es_password")
vector_query_field = kwargs.get("vector_query_field", "vector")
query_field = kwargs.get("query_field", "text")
model_id = kwargs.get("model_id")
dims = kwargs.get("dims")
if dims is None:
raise ValueError("ElasticKnnSearch requires 'dims' parameter")
optional_args = {}
if vector_query_field is not None:
optional_args["vector_query_field"] = vector_query_field
if query_field is not None:
optional_args["query_field"] = query_field
knnvectorsearch = cls(
index_name=index_name,
embedding=embedding,
es_connection=es_connection,
es_cloud_id=es_cloud_id,
es_user=es_user,
es_password=es_password,
**optional_args,
)
# Encode the provided texts and add them to the newly created index.
knnvectorsearch.add_texts(texts, model_id=model_id, dims=dims, **optional_args)
return knnvectorsearch
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~chat_models~bedrock.py | from typing import Any, Dict, Iterator, List, Optional
from libs.core.langchain_core.callbacks import (
CallbackManagerForLLMRun,
)
from libs.core.langchain_core.language_models.chat_models import BaseChatModel
from libs.core.langchain_core.messages import AIMessage, AIMessageChunk, BaseMessage
from libs.core.langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from libs.core.langchain_core.pydantic_v1 import Extra
from langchain_community.chat_models.anthropic import (
convert_messages_to_prompt_anthropic,
)
from langchain_community.chat_models.meta import convert_messages_to_prompt_llama
from langchain_community.llms.bedrock import BedrockBase
from langchain_community.utilities.anthropic import (
get_num_tokens_anthropic,
get_token_ids_anthropic,
)
class ChatPromptAdapter:
"""Adapter class to prepare the inputs from Langchain to prompt format
that Chat model expects.
"""
@classmethod
def convert_messages_to_prompt(
cls, provider: str, messages: List[BaseMessage]
) -> str:
if provider == "anthropic":
prompt = convert_messages_to_prompt_anthropic(messages=messages)
elif provider == "meta":
prompt = convert_messages_to_prompt_llama(messages=messages)
else:
raise NotImplementedError(
f"Provider {provider} model does not support chat."
)
return prompt
class BedrockChat(BaseChatModel, BedrockBase):
"""A chat model that uses the Bedrock API."""
@property
def _llm_type(self) -> str:
"""Return type of chat model."""
return "amazon_bedrock_chat"
@classmethod
def is_lc_serializable(cls) -> bool:
"""Return whether this model can be serialized by Langchain."""
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "chat_models", "bedrock"]
@property
def lc_attributes(self) -> Dict[str, Any]:
attributes: Dict[str, Any] = {}
if self.region_name:
attributes["region_name"] = self.region_name
return attributes
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
provider = self._get_provider()
prompt = ChatPromptAdapter.convert_messages_to_prompt(
provider=provider, messages=messages
)
for chunk in self._prepare_input_and_invoke_stream(
prompt=prompt, stop=stop, run_manager=run_manager, **kwargs
):
delta = chunk.text
yield ChatGenerationChunk(message=AIMessageChunk(content=delta))
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
completion = ""
if self.streaming:
for chunk in self._stream(messages, stop, run_manager, **kwargs):
completion += chunk.text
else:
provider = self._get_provider()
prompt = ChatPromptAdapter.convert_messages_to_prompt(
provider=provider, messages=messages
)
params: Dict[str, Any] = {**kwargs}
if stop:
params["stop_sequences"] = stop
completion = self._prepare_input_and_invoke(
prompt=prompt, stop=stop, run_manager=run_manager, **params
)
message = AIMessage(content=completion)
return ChatResult(generations=[ChatGeneration(message=message)])
def get_num_tokens(self, text: str) -> int:
if self._model_is_anthropic:
return get_num_tokens_anthropic(text)
else:
return super().get_num_tokens(text)
def get_token_ids(self, text: str) -> List[int]:
if self._model_is_anthropic:
return get_token_ids_anthropic(text)
else:
return super().get_token_ids(text)
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~chains~api~openapi~response_chain.py | """Response parser."""
import json
import re
from typing import Any
from libs.core.langchain_core.language_models import BaseLanguageModel
from libs.core.langchain_core.output_parsers import BaseOutputParser
from libs.core.langchain_core.prompts.prompt import PromptTemplate
from langchain.chains.api.openapi.prompts import RESPONSE_TEMPLATE
from langchain.chains.llm import LLMChain
class APIResponderOutputParser(BaseOutputParser):
"""Parse the response and error tags."""
def _load_json_block(self, serialized_block: str) -> str:
try:
response_content = json.loads(serialized_block, strict=False)
return response_content.get("response", "ERROR parsing response.")
except json.JSONDecodeError:
return "ERROR parsing response."
except:
raise
def parse(self, llm_output: str) -> str:
"""Parse the response and error tags."""
json_match = re.search(r"```json(.*?)```", llm_output, re.DOTALL)
if json_match:
return self._load_json_block(json_match.group(1).strip())
else:
raise ValueError(f"No response found in output: {llm_output}.")
@property
def _type(self) -> str:
return "api_responder"
class APIResponderChain(LLMChain):
"""Get the response parser."""
@classmethod
def is_lc_serializable(cls) -> bool:
return False
@classmethod
def from_llm(
cls, llm: BaseLanguageModel, verbose: bool = True, **kwargs: Any
) -> LLMChain:
"""Get the response parser."""
output_parser = APIResponderOutputParser()
prompt = PromptTemplate(
template=RESPONSE_TEMPLATE,
output_parser=output_parser,
input_variables=["response", "instructions"],
)
return cls(prompt=prompt, llm=llm, verbose=verbose, **kwargs)
| [
"instructions",
"response"
] |
2024-01-10 | mth93/langchain | templates~propositional-retrieval~propositional_retrieval~proposal_chain.py | import logging
from langchain.output_parsers.openai_tools import JsonOutputToolsParser
from langchain_community.chat_models import ChatOpenAI
from libs.core.langchain_core.prompts import ChatPromptTemplate
from libs.core.langchain_core.runnables import RunnableLambda
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Modified from the paper to be more robust to benign prompt injection
# https://arxiv.org/abs/2312.06648
# @misc{chen2023dense,
# title={Dense X Retrieval: What Retrieval Granularity Should We Use?},
# author={Tong Chen and Hongwei Wang and Sihao Chen and Wenhao Yu and Kaixin Ma
# and Xinran Zhao and Hongming Zhang and Dong Yu},
# year={2023},
# eprint={2312.06648},
# archivePrefix={arXiv},
# primaryClass={cs.CL}
# }
PROMPT = ChatPromptTemplate.from_messages(
[
(
"system",
"""Decompose the "Content" into clear and simple propositions, ensuring they are interpretable out of
context.
1. Split compound sentence into simple sentences. Maintain the original phrasing from the input
whenever possible.
2. For any named entity that is accompanied by additional descriptive information, separate this
information into its own distinct proposition.
3. Decontextualize the proposition by adding necessary modifier to nouns or entire sentences
and replacing pronouns (e.g., "it", "he", "she", "they", "this", "that") with the full name of the
entities they refer to.
4. Present the results as a list of strings, formatted in JSON.
Example:
Input: Title: ¯Eostre. Section: Theories and interpretations, Connection to Easter Hares. Content:
The earliest evidence for the Easter Hare (Osterhase) was recorded in south-west Germany in
1678 by the professor of medicine Georg Franck von Franckenau, but it remained unknown in
other parts of Germany until the 18th century. Scholar Richard Sermon writes that "hares were
frequently seen in gardens in spring, and thus may have served as a convenient explanation for the
origin of the colored eggs hidden there for children. Alternatively, there is a European tradition
that hares laid eggs, since a hare’s scratch or form and a lapwing’s nest look very similar, and
both occur on grassland and are first seen in the spring. In the nineteenth century the influence
of Easter cards, toys, and books was to make the Easter Hare/Rabbit popular throughout Europe.
German immigrants then exported the custom to Britain and America where it evolved into the
Easter Bunny."
Output: [ "The earliest evidence for the Easter Hare was recorded in south-west Germany in
1678 by Georg Franck von Franckenau.", "Georg Franck von Franckenau was a professor of
medicine.", "The evidence for the Easter Hare remained unknown in other parts of Germany until
the 18th century.", "Richard Sermon was a scholar.", "Richard Sermon writes a hypothesis about
the possible explanation for the connection between hares and the tradition during Easter", "Hares
were frequently seen in gardens in spring.", "Hares may have served as a convenient explanation
for the origin of the colored eggs hidden in gardens for children.", "There is a European tradition
that hares laid eggs.", "A hare’s scratch or form and a lapwing’s nest look very similar.", "Both
hares and lapwing’s nests occur on grassland and are first seen in the spring.", "In the nineteenth
century the influence of Easter cards, toys, and books was to make the Easter Hare/Rabbit popular
throughout Europe.", "German immigrants exported the custom of the Easter Hare/Rabbit to
Britain and America.", "The custom of the Easter Hare/Rabbit evolved into the Easter Bunny in
Britain and America."]""", # noqa
),
("user", "Decompose the following:\n{input}"),
]
)
def get_propositions(tool_calls: list) -> list:
if not tool_calls:
raise ValueError("No tool calls found")
return tool_calls[0]["args"]["propositions"]
def empty_proposals(x):
# Model couldn't generate proposals
return []
proposition_chain = (
PROMPT
| ChatOpenAI(model="gpt-3.5-turbo-16k").bind(
tools=[
{
"type": "function",
"function": {
"name": "decompose_content",
"description": "Return the decomposed propositions",
"parameters": {
"type": "object",
"properties": {
"propositions": {
"type": "array",
"items": {"type": "string"},
}
},
"required": ["propositions"],
},
},
}
],
tool_choice={"type": "function", "function": {"name": "decompose_content"}},
)
| JsonOutputToolsParser()
| get_propositions
).with_fallbacks([RunnableLambda(empty_proposals)])
| [
"[('system', 'Decompose the \"Content\" into clear and simple propositions, ensuring they are interpretable out of\\ncontext.\\n1. Split compound sentence into simple sentences. Maintain the original phrasing from the input\\nwhenever possible.\\n2. For any named entity that is accompanied by additional descriptive information, separate this\\ninformation into its own distinct proposition.\\n3. Decontextualize the proposition by adding necessary modifier to nouns or entire sentences\\nand replacing pronouns (e.g., \"it\", \"he\", \"she\", \"they\", \"this\", \"that\") with the full name of the\\nentities they refer to.\\n4. Present the results as a list of strings, formatted in JSON.\\n\\nExample:\\n\\nInput: Title: ¯Eostre. Section: Theories and interpretations, Connection to Easter Hares. Content:\\nThe earliest evidence for the Easter Hare (Osterhase) was recorded in south-west Germany in\\n1678 by the professor of medicine Georg Franck von Franckenau, but it remained unknown in\\nother parts of Germany until the 18th century. Scholar Richard Sermon writes that \"hares were\\nfrequently seen in gardens in spring, and thus may have served as a convenient explanation for the\\norigin of the colored eggs hidden there for children. Alternatively, there is a European tradition\\nthat hares laid eggs, since a hare’s scratch or form and a lapwing’s nest look very similar, and\\nboth occur on grassland and are first seen in the spring. In the nineteenth century the influence\\nof Easter cards, toys, and books was to make the Easter Hare/Rabbit popular throughout Europe.\\nGerman immigrants then exported the custom to Britain and America where it evolved into the\\nEaster Bunny.\"\\nOutput: [ \"The earliest evidence for the Easter Hare was recorded in south-west Germany in\\n1678 by Georg Franck von Franckenau.\", \"Georg Franck von Franckenau was a professor of\\nmedicine.\", \"The evidence for the Easter Hare remained unknown in other parts of Germany until\\nthe 18th century.\", \"Richard Sermon was a scholar.\", \"Richard Sermon writes a hypothesis about\\nthe possible explanation for the connection between hares and the tradition during Easter\", \"Hares\\nwere frequently seen in gardens in spring.\", \"Hares may have served as a convenient explanation\\nfor the origin of the colored eggs hidden in gardens for children.\", \"There is a European tradition\\nthat hares laid eggs.\", \"A hare’s scratch or form and a lapwing’s nest look very similar.\", \"Both\\nhares and lapwing’s nests occur on grassland and are first seen in the spring.\", \"In the nineteenth\\ncentury the influence of Easter cards, toys, and books was to make the Easter Hare/Rabbit popular\\nthroughout Europe.\", \"German immigrants exported the custom of the Easter Hare/Rabbit to\\nBritain and America.\", \"The custom of the Easter Hare/Rabbit evolved into the Easter Bunny in\\nBritain and America.\"]'), ('user', 'Decompose the following:\\n{input}')]",
"Decompose the \"Content\" into clear and simple propositions, ensuring they are interpretable out of\ncontext.\n1. Split compound sentence into simple sentences. Maintain the original phrasing from the input\nwhenever possible.\n2. For any named entity that is accompanied by additional descriptive information, separate this\ninformation into its own distinct proposition.\n3. Decontextualize the proposition by adding necessary modifier to nouns or entire sentences\nand replacing pronouns (e.g., \"it\", \"he\", \"she\", \"they\", \"this\", \"that\") with the full name of the\nentities they refer to.\n4. Present the results as a list of strings, formatted in JSON.\n\nExample:\n\nInput: Title: ¯Eostre. Section: Theories and interpretations, Connection to Easter Hares. Content:\nThe earliest evidence for the Easter Hare (Osterhase) was recorded in south-west Germany in\n1678 by the professor of medicine Georg Franck von Franckenau, but it remained unknown in\nother parts of Germany until the 18th century. Scholar Richard Sermon writes that \"hares were\nfrequently seen in gardens in spring, and thus may have served as a convenient explanation for the\norigin of the colored eggs hidden there for children. Alternatively, there is a European tradition\nthat hares laid eggs, since a hare’s scratch or form and a lapwing’s nest look very similar, and\nboth occur on grassland and are first seen in the spring. In the nineteenth century the influence\nof Easter cards, toys, and books was to make the Easter Hare/Rabbit popular throughout Europe.\nGerman immigrants then exported the custom to Britain and America where it evolved into the\nEaster Bunny.\"\nOutput: [ \"The earliest evidence for the Easter Hare was recorded in south-west Germany in\n1678 by Georg Franck von Franckenau.\", \"Georg Franck von Franckenau was a professor of\nmedicine.\", \"The evidence for the Easter Hare remained unknown in other parts of Germany until\nthe 18th century.\", \"Richard Sermon was a scholar.\", \"Richard Sermon writes a hypothesis about\nthe possible explanation for the connection between hares and the tradition during Easter\", \"Hares\nwere frequently seen in gardens in spring.\", \"Hares may have served as a convenient explanation\nfor the origin of the colored eggs hidden in gardens for children.\", \"There is a European tradition\nthat hares laid eggs.\", \"A hare’s scratch or form and a lapwing’s nest look very similar.\", \"Both\nhares and lapwing’s nests occur on grassland and are first seen in the spring.\", \"In the nineteenth\ncentury the influence of Easter cards, toys, and books was to make the Easter Hare/Rabbit popular\nthroughout Europe.\", \"German immigrants exported the custom of the Easter Hare/Rabbit to\nBritain and America.\", \"The custom of the Easter Hare/Rabbit evolved into the Easter Bunny in\nBritain and America.\"]",
"Decompose the following:\n{input}"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~agent_toolkits~office365~toolkit.py | from __future__ import annotations
from typing import TYPE_CHECKING, List
from libs.core.langchain_core.pydantic_v1 import Field
from langchain_community.agent_toolkits.base import BaseToolkit
from langchain_community.tools import BaseTool
from langchain_community.tools.office365.create_draft_message import (
O365CreateDraftMessage,
)
from langchain_community.tools.office365.events_search import O365SearchEvents
from langchain_community.tools.office365.messages_search import O365SearchEmails
from langchain_community.tools.office365.send_event import O365SendEvent
from langchain_community.tools.office365.send_message import O365SendMessage
from langchain_community.tools.office365.utils import authenticate
if TYPE_CHECKING:
from O365 import Account
class O365Toolkit(BaseToolkit):
"""Toolkit for interacting with Office 365.
*Security Note*: This toolkit contains tools that can read and modify
the state of a service; e.g., by reading, creating, updating, deleting
data associated with this service.
For example, this toolkit can be used search through emails and events,
send messages and event invites, and create draft messages.
Please make sure that the permissions given by this toolkit
are appropriate for your use case.
See https://python.langchain.com/docs/security for more information.
"""
account: Account = Field(default_factory=authenticate)
class Config:
"""Pydantic config."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
return [
O365SearchEvents(),
O365CreateDraftMessage(),
O365SearchEmails(),
O365SendEvent(),
O365SendMessage(),
]
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~clarifai.py | import logging
from typing import Any, Dict, List, Optional
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.outputs import Generation, LLMResult
from libs.core.langchain_core.pydantic_v1 import Extra, root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
EXAMPLE_URL = "https://clarifai.com/openai/chat-completion/models/GPT-4"
class Clarifai(LLM):
"""Clarifai large language models.
To use, you should have an account on the Clarifai platform,
the ``clarifai`` python package installed, and the
environment variable ``CLARIFAI_PAT`` set with your PAT key,
or pass it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.llms import Clarifai
clarifai_llm = Clarifai(user_id=USER_ID, app_id=APP_ID, model_id=MODEL_ID)
(or)
clarifai_llm = Clarifai(model_url=EXAMPLE_URL)
"""
model_url: Optional[str] = None
"""Model url to use."""
model_id: Optional[str] = None
"""Model id to use."""
model_version_id: Optional[str] = None
"""Model version id to use."""
app_id: Optional[str] = None
"""Clarifai application id to use."""
user_id: Optional[str] = None
"""Clarifai user id to use."""
pat: Optional[str] = None
"""Clarifai personal access token to use."""
api_base: str = "https://api.clarifai.com"
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that we have all required info to access Clarifai
platform and python package exists in environment."""
values["pat"] = get_from_dict_or_env(values, "pat", "CLARIFAI_PAT")
user_id = values.get("user_id")
app_id = values.get("app_id")
model_id = values.get("model_id")
model_url = values.get("model_url")
if model_url is not None and model_id is not None:
raise ValueError("Please provide either model_url or model_id, not both.")
if model_url is None and model_id is None:
raise ValueError("Please provide one of model_url or model_id.")
if model_url is None and model_id is not None:
if user_id is None or app_id is None:
raise ValueError("Please provide a user_id and app_id.")
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Clarifai API."""
return {}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {
**{
"model_url": self.model_url,
"user_id": self.user_id,
"app_id": self.app_id,
"model_id": self.model_id,
}
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "clarifai"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
inference_params: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> str:
"""Call out to Clarfai's PostModelOutputs endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = clarifai_llm("Tell me a joke.")
"""
# If version_id None, Defaults to the latest model version
try:
from clarifai.client.model import Model
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
if self.pat is not None:
pat = self.pat
if self.model_url is not None:
_model_init = Model(url=self.model_url, pat=pat)
else:
_model_init = Model(
model_id=self.model_id,
user_id=self.user_id,
app_id=self.app_id,
pat=pat,
)
try:
(inference_params := {}) if inference_params is None else inference_params
predict_response = _model_init.predict_by_bytes(
bytes(prompt, "utf-8"),
input_type="text",
inference_params=inference_params,
)
text = predict_response.outputs[0].data.text.raw
if stop is not None:
text = enforce_stop_tokens(text, stop)
except Exception as e:
logger.error(f"Predict failed, exception: {e}")
return text
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
inference_params: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
# TODO: add caching here.
try:
from clarifai.client.input import Inputs
from clarifai.client.model import Model
except ImportError:
raise ImportError(
"Could not import clarifai python package. "
"Please install it with `pip install clarifai`."
)
if self.pat is not None:
pat = self.pat
if self.model_url is not None:
_model_init = Model(url=self.model_url, pat=pat)
else:
_model_init = Model(
model_id=self.model_id,
user_id=self.user_id,
app_id=self.app_id,
pat=pat,
)
generations = []
batch_size = 32
input_obj = Inputs(pat=pat)
try:
for i in range(0, len(prompts), batch_size):
batch = prompts[i : i + batch_size]
input_batch = [
input_obj.get_text_input(input_id=str(id), raw_text=inp)
for id, inp in enumerate(batch)
]
(
inference_params := {}
) if inference_params is None else inference_params
predict_response = _model_init.predict(
inputs=input_batch, inference_params=inference_params
)
for output in predict_response.outputs:
if stop is not None:
text = enforce_stop_tokens(output.data.text.raw, stop)
else:
text = output.data.text.raw
generations.append([Generation(text=text)])
except Exception as e:
logger.error(f"Predict failed, exception: {e}")
return LLMResult(generations=generations)
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~hub.py | """Interface with the LangChain Hub."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
from libs.core.langchain_core.load.dump import dumps
from libs.core.langchain_core.load.load import loads
if TYPE_CHECKING:
from langchainhub import Client
def _get_client(api_url: Optional[str] = None, api_key: Optional[str] = None) -> Client:
try:
from langchainhub import Client
except ImportError as e:
raise ImportError(
"Could not import langchainhub, please install with `pip install "
"langchainhub`."
) from e
# Client logic will also attempt to load URL/key from environment variables
return Client(api_url, api_key=api_key)
def push(
repo_full_name: str,
object: Any,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
parent_commit_hash: Optional[str] = "latest",
new_repo_is_public: bool = True,
new_repo_description: str = "",
) -> str:
"""
Pushes an object to the hub and returns the URL it can be viewed at in a browser.
:param repo_full_name: The full name of the repo to push to in the format of
`owner/repo`.
:param object: The LangChain to serialize and push to the hub.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
:param parent_commit_hash: The commit hash of the parent commit to push to. Defaults
to the latest commit automatically.
:param new_repo_is_public: Whether the repo should be public. Defaults to
True (Public by default).
:param new_repo_description: The description of the repo. Defaults to an empty
string.
"""
client = _get_client(api_url=api_url, api_key=api_key)
manifest_json = dumps(object)
message = client.push(
repo_full_name,
manifest_json,
parent_commit_hash=parent_commit_hash,
new_repo_is_public=new_repo_is_public,
new_repo_description=new_repo_description,
)
return message
def pull(
owner_repo_commit: str,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
) -> Any:
"""
Pulls an object from the hub and returns it as a LangChain object.
:param owner_repo_commit: The full name of the repo to pull from in the format of
`owner/repo:commit_hash`.
:param api_url: The URL of the LangChain Hub API. Defaults to the hosted API service
if you have an api key set, or a localhost instance if not.
:param api_key: The API key to use to authenticate with the LangChain Hub API.
"""
client = _get_client(api_url=api_url, api_key=api_key)
resp: str = client.pull(owner_repo_commit)
return loads(resp)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~image_captions.py | from io import BytesIO
from typing import Any, List, Tuple, Union
import requests
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class ImageCaptionLoader(BaseLoader):
"""Load image captions.
By default, the loader utilizes the pre-trained
Salesforce BLIP image captioning model.
https://huggingface.co/Salesforce/blip-image-captioning-base
"""
def __init__(
self,
images: Union[str, bytes, List[Union[str, bytes]]],
blip_processor: str = "Salesforce/blip-image-captioning-base",
blip_model: str = "Salesforce/blip-image-captioning-base",
):
"""Initialize with a list of image data (bytes) or file paths
Args:
images: Either a single image or a list of images. Accepts
image data (bytes) or file paths to images.
blip_processor: The name of the pre-trained BLIP processor.
blip_model: The name of the pre-trained BLIP model.
"""
if isinstance(images, (str, bytes)):
self.images = [images]
else:
self.images = images
self.blip_processor = blip_processor
self.blip_model = blip_model
def load(self) -> List[Document]:
"""Load from a list of image data or file paths"""
try:
from transformers import BlipForConditionalGeneration, BlipProcessor
except ImportError:
raise ImportError(
"`transformers` package not found, please install with "
"`pip install transformers`."
)
processor = BlipProcessor.from_pretrained(self.blip_processor)
model = BlipForConditionalGeneration.from_pretrained(self.blip_model)
results = []
for image in self.images:
caption, metadata = self._get_captions_and_metadata(
model=model, processor=processor, image=image
)
doc = Document(page_content=caption, metadata=metadata)
results.append(doc)
return results
def _get_captions_and_metadata(
self, model: Any, processor: Any, image: Union[str, bytes]
) -> Tuple[str, dict]:
"""Helper function for getting the captions and metadata of an image."""
try:
from PIL import Image
except ImportError:
raise ImportError(
"`PIL` package not found, please install with `pip install pillow`"
)
image_source = image # Save the original source for later reference
try:
if isinstance(image, bytes):
image = Image.open(BytesIO(image)).convert("RGB")
elif image.startswith("http://") or image.startswith("https://"):
image = Image.open(requests.get(image, stream=True).raw).convert("RGB")
else:
image = Image.open(image).convert("RGB")
except Exception:
if isinstance(image_source, bytes):
msg = "Could not get image data from bytes"
else:
msg = f"Could not get image data for {image_source}"
raise ValueError(msg)
inputs = processor(image, "an image of", return_tensors="pt")
output = model.generate(**inputs)
caption: str = processor.decode(output[0])
if isinstance(image_source, bytes):
metadata: dict = {"image_source": "Image bytes provided"}
else:
metadata = {"image_path": image_source}
return caption, metadata
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~office365~send_message.py | from typing import List, Optional, Type
from libs.core.langchain_core.callbacks import CallbackManagerForToolRun
from libs.core.langchain_core.pydantic_v1 import BaseModel, Field
from langchain_community.tools.office365.base import O365BaseTool
class SendMessageSchema(BaseModel):
"""Input for SendMessageTool."""
body: str = Field(
...,
description="The message body to be sent.",
)
to: List[str] = Field(
...,
description="The list of recipients.",
)
subject: str = Field(
...,
description="The subject of the message.",
)
cc: Optional[List[str]] = Field(
None,
description="The list of CC recipients.",
)
bcc: Optional[List[str]] = Field(
None,
description="The list of BCC recipients.",
)
class O365SendMessage(O365BaseTool):
"""Tool for sending an email in Office 365."""
name: str = "send_email"
description: str = (
"Use this tool to send an email with the provided message fields."
)
args_schema: Type[SendMessageSchema] = SendMessageSchema
def _run(
self,
body: str,
to: List[str],
subject: str,
cc: Optional[List[str]] = None,
bcc: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
# Get mailbox object
mailbox = self.account.mailbox()
message = mailbox.new_message()
# Assign message values
message.body = body
message.subject = subject
message.to.add(to)
if cc is not None:
message.cc.add(cc)
if bcc is not None:
message.bcc.add(bcc)
message.send()
output = "Message sent: " + str(message)
return output
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~scann.py | from __future__ import annotations
import operator
import pickle
import uuid
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
import numpy as np
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.vectorstores import VectorStore
from langchain_community.docstore.base import AddableMixin, Docstore
from langchain_community.docstore.in_memory import InMemoryDocstore
from langchain_community.vectorstores.utils import DistanceStrategy
def normalize(x: np.ndarray) -> np.ndarray:
"""Normalize vectors to unit length."""
x /= np.clip(np.linalg.norm(x, axis=-1, keepdims=True), 1e-12, None)
return x
def dependable_scann_import() -> Any:
"""
Import `scann` if available, otherwise raise error.
"""
try:
import scann
except ImportError:
raise ImportError(
"Could not import scann python package. "
"Please install it with `pip install scann` "
)
return scann
class ScaNN(VectorStore):
"""`ScaNN` vector store.
To use, you should have the ``scann`` python package installed.
Example:
.. code-block:: python
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import ScaNN
db = ScaNN.from_texts(
['foo', 'bar', 'barz', 'qux'],
HuggingFaceEmbeddings())
db.similarity_search('foo?', k=1)
"""
def __init__(
self,
embedding: Embeddings,
index: Any,
docstore: Docstore,
index_to_docstore_id: Dict[int, str],
relevance_score_fn: Optional[Callable[[float], float]] = None,
normalize_L2: bool = False,
distance_strategy: DistanceStrategy = DistanceStrategy.EUCLIDEAN_DISTANCE,
scann_config: Optional[str] = None,
):
"""Initialize with necessary components."""
self.embedding = embedding
self.index = index
self.docstore = docstore
self.index_to_docstore_id = index_to_docstore_id
self.distance_strategy = distance_strategy
self.override_relevance_score_fn = relevance_score_fn
self._normalize_L2 = normalize_L2
self._scann_config = scann_config
def __add(
self,
texts: Iterable[str],
embeddings: Iterable[List[float]],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
if not isinstance(self.docstore, AddableMixin):
raise ValueError(
"If trying to add texts, the underlying docstore should support "
f"adding items, which {self.docstore} does not"
)
raise NotImplementedError("Updates are not available in ScaNN, yet.")
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of unique IDs.
Returns:
List of ids from adding the texts into the vectorstore.
"""
# Embed and create the documents.
embeddings = self.embedding.embed_documents(list(texts))
return self.__add(texts, embeddings, metadatas=metadatas, ids=ids, **kwargs)
def add_embeddings(
self,
text_embeddings: Iterable[Tuple[str, List[float]]],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
text_embeddings: Iterable pairs of string and embedding to
add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of unique IDs.
Returns:
List of ids from adding the texts into the vectorstore.
"""
if not isinstance(self.docstore, AddableMixin):
raise ValueError(
"If trying to add texts, the underlying docstore should support "
f"adding items, which {self.docstore} does not"
)
# Embed and create the documents.
texts, embeddings = zip(*text_embeddings)
return self.__add(texts, embeddings, metadatas=metadatas, ids=ids, **kwargs)
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]:
"""Delete by vector ID or other criteria.
Args:
ids: List of ids to delete.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful,
False otherwise, None if not implemented.
"""
raise NotImplementedError("Deletions are not available in ScaNN, yet.")
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
fetch_k: int = 20,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
embedding: Embedding vector to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, Any]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
**kwargs: kwargs to be passed to similarity search. Can include:
score_threshold: Optional, a floating point value between 0 to 1 to
filter the resulting set of retrieved docs
Returns:
List of documents most similar to the query text and L2 distance
in float for each. Lower score represents more similarity.
"""
vector = np.array([embedding], dtype=np.float32)
if self._normalize_L2:
vector = normalize(vector)
indices, scores = self.index.search_batched(
vector, k if filter is None else fetch_k
)
docs = []
for j, i in enumerate(indices[0]):
if i == -1:
# This happens when not enough docs are returned.
continue
_id = self.index_to_docstore_id[i]
doc = self.docstore.search(_id)
if not isinstance(doc, Document):
raise ValueError(f"Could not find document for id {_id}, got {doc}")
if filter is not None:
filter = {
key: [value] if not isinstance(value, list) else value
for key, value in filter.items()
}
if all(doc.metadata.get(key) in value for key, value in filter.items()):
docs.append((doc, scores[0][j]))
else:
docs.append((doc, scores[0][j]))
score_threshold = kwargs.get("score_threshold")
if score_threshold is not None:
cmp = (
operator.ge
if self.distance_strategy
in (DistanceStrategy.MAX_INNER_PRODUCT, DistanceStrategy.JACCARD)
else operator.le
)
docs = [
(doc, similarity)
for doc, similarity in docs
if cmp(similarity, score_threshold)
]
return docs[:k]
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
fetch_k: int = 20,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
Returns:
List of documents most similar to the query text with
L2 distance in float. Lower score represents more similarity.
"""
embedding = self.embedding.embed_query(query)
docs = self.similarity_search_with_score_by_vector(
embedding,
k,
filter=filter,
fetch_k=fetch_k,
**kwargs,
)
return docs
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
fetch_k: int = 20,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
Returns:
List of Documents most similar to the embedding.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding,
k,
filter=filter,
fetch_k=fetch_k,
**kwargs,
)
return [doc for doc, _ in docs_and_scores]
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
fetch_k: int = 20,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter: (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
fetch_k: (Optional[int]) Number of Documents to fetch before filtering.
Defaults to 20.
Returns:
List of Documents most similar to the query.
"""
docs_and_scores = self.similarity_search_with_score(
query, k, filter=filter, fetch_k=fetch_k, **kwargs
)
return [doc for doc, _ in docs_and_scores]
@classmethod
def __from(
cls,
texts: List[str],
embeddings: List[List[float]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
normalize_L2: bool = False,
**kwargs: Any,
) -> ScaNN:
scann = dependable_scann_import()
distance_strategy = kwargs.get(
"distance_strategy", DistanceStrategy.EUCLIDEAN_DISTANCE
)
scann_config = kwargs.get("scann_config", None)
vector = np.array(embeddings, dtype=np.float32)
if normalize_L2:
vector = normalize(vector)
if scann_config is not None:
index = scann.scann_ops_pybind.create_searcher(vector, scann_config)
else:
if distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
index = (
scann.scann_ops_pybind.builder(vector, 1, "dot_product")
.score_brute_force()
.build()
)
else:
# Default to L2, currently other metric types not initialized.
index = (
scann.scann_ops_pybind.builder(vector, 1, "squared_l2")
.score_brute_force()
.build()
)
documents = []
if ids is None:
ids = [str(uuid.uuid4()) for _ in texts]
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
documents.append(Document(page_content=text, metadata=metadata))
index_to_id = dict(enumerate(ids))
if len(index_to_id) != len(documents):
raise Exception(
f"{len(index_to_id)} ids provided for {len(documents)} documents."
" Each document should have an id."
)
docstore = InMemoryDocstore(dict(zip(index_to_id.values(), documents)))
return cls(
embedding,
index,
docstore,
index_to_id,
normalize_L2=normalize_L2,
**kwargs,
)
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> ScaNN:
"""Construct ScaNN wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the ScaNN database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import ScaNN
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
scann = ScaNN.from_texts(texts, embeddings)
"""
embeddings = embedding.embed_documents(texts)
return cls.__from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
**kwargs,
)
@classmethod
def from_embeddings(
cls,
text_embeddings: List[Tuple[str, List[float]]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> ScaNN:
"""Construct ScaNN wrapper from raw documents.
This is a user friendly interface that:
1. Embeds documents.
2. Creates an in memory docstore
3. Initializes the ScaNN database
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain_community.vectorstores import ScaNN
from langchain_community.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
text_embeddings = embeddings.embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
scann = ScaNN.from_embeddings(text_embedding_pairs, embeddings)
"""
texts = [t[0] for t in text_embeddings]
embeddings = [t[1] for t in text_embeddings]
return cls.__from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
**kwargs,
)
def save_local(self, folder_path: str, index_name: str = "index") -> None:
"""Save ScaNN index, docstore, and index_to_docstore_id to disk.
Args:
folder_path: folder path to save index, docstore,
and index_to_docstore_id to.
"""
path = Path(folder_path)
scann_path = path / "{index_name}.scann".format(index_name=index_name)
scann_path.mkdir(exist_ok=True, parents=True)
# save index separately since it is not picklable
self.index.serialize(str(scann_path))
# save docstore and index_to_docstore_id
with open(path / "{index_name}.pkl".format(index_name=index_name), "wb") as f:
pickle.dump((self.docstore, self.index_to_docstore_id), f)
@classmethod
def load_local(
cls,
folder_path: str,
embedding: Embeddings,
index_name: str = "index",
**kwargs: Any,
) -> ScaNN:
"""Load ScaNN index, docstore, and index_to_docstore_id from disk.
Args:
folder_path: folder path to load index, docstore,
and index_to_docstore_id from.
embeddings: Embeddings to use when generating queries
index_name: for saving with a specific index file name
"""
path = Path(folder_path)
scann_path = path / "{index_name}.scann".format(index_name=index_name)
scann_path.mkdir(exist_ok=True, parents=True)
# load index separately since it is not picklable
scann = dependable_scann_import()
index = scann.scann_ops_pybind.load_searcher(str(scann_path))
# load docstore and index_to_docstore_id
with open(path / "{index_name}.pkl".format(index_name=index_name), "rb") as f:
docstore, index_to_docstore_id = pickle.load(f)
return cls(embedding, index, docstore, index_to_docstore_id, **kwargs)
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.override_relevance_score_fn is not None:
return self.override_relevance_score_fn
# Default strategy is to rely on distance strategy provided in
# vectorstore constructor
if self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
return self._max_inner_product_relevance_score_fn
elif self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE:
# Default behavior is to use euclidean distance relevancy
return self._euclidean_relevance_score_fn
else:
raise ValueError(
"Unknown distance strategy, must be cosine, max_inner_product,"
" or euclidean"
)
def _similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
filter: Optional[Dict[str, Any]] = None,
fetch_k: int = 20,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs and their similarity scores on a scale from 0 to 1."""
# Pop score threshold so that only relevancy scores, not raw scores, are
# filtered.
score_threshold = kwargs.pop("score_threshold", None)
relevance_score_fn = self._select_relevance_score_fn()
if relevance_score_fn is None:
raise ValueError(
"normalize_score_fn must be provided to"
" ScaNN constructor to normalize scores"
)
docs_and_scores = self.similarity_search_with_score(
query,
k=k,
filter=filter,
fetch_k=fetch_k,
**kwargs,
)
docs_and_rel_scores = [
(doc, relevance_score_fn(score)) for doc, score in docs_and_scores
]
if score_threshold is not None:
docs_and_rel_scores = [
(doc, similarity)
for doc, similarity in docs_and_rel_scores
if similarity >= score_threshold
]
return docs_and_rel_scores
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~retrievers~docarray.py | from enum import Enum
from typing import Any, Dict, List, Optional, Union
import numpy as np
from libs.core.langchain_core.callbacks import CallbackManagerForRetrieverRun
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.retrievers import BaseRetriever
from langchain_community.vectorstores.utils import maximal_marginal_relevance
class SearchType(str, Enum):
"""Enumerator of the types of search to perform."""
similarity = "similarity"
mmr = "mmr"
class DocArrayRetriever(BaseRetriever):
"""`DocArray Document Indices` retriever.
Currently, it supports 5 backends:
InMemoryExactNNIndex, HnswDocumentIndex, QdrantDocumentIndex,
ElasticDocIndex, and WeaviateDocumentIndex.
Args:
index: One of the above-mentioned index instances
embeddings: Embedding model to represent text as vectors
search_field: Field to consider for searching in the documents.
Should be an embedding/vector/tensor.
content_field: Field that represents the main content in your document schema.
Will be used as a `page_content`. Everything else will go into `metadata`.
search_type: Type of search to perform (similarity / mmr)
filters: Filters applied for document retrieval.
top_k: Number of documents to return
"""
index: Any
embeddings: Embeddings
search_field: str
content_field: str
search_type: SearchType = SearchType.similarity
top_k: int = 1
filters: Optional[Any] = None
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> List[Document]:
"""Get documents relevant for a query.
Args:
query: string to find relevant documents for
Returns:
List of relevant documents
"""
query_emb = np.array(self.embeddings.embed_query(query))
if self.search_type == SearchType.similarity:
results = self._similarity_search(query_emb)
elif self.search_type == SearchType.mmr:
results = self._mmr_search(query_emb)
else:
raise ValueError(
f"Search type {self.search_type} does not exist. "
f"Choose either 'similarity' or 'mmr'."
)
return results
def _search(
self, query_emb: np.ndarray, top_k: int
) -> List[Union[Dict[str, Any], Any]]:
"""
Perform a search using the query embedding and return top_k documents.
Args:
query_emb: Query represented as an embedding
top_k: Number of documents to return
Returns:
A list of top_k documents matching the query
"""
from docarray.index import ElasticDocIndex, WeaviateDocumentIndex
filter_args = {}
search_field = self.search_field
if isinstance(self.index, WeaviateDocumentIndex):
filter_args["where_filter"] = self.filters
search_field = ""
elif isinstance(self.index, ElasticDocIndex):
filter_args["query"] = self.filters
else:
filter_args["filter_query"] = self.filters
if self.filters:
query = (
self.index.build_query() # get empty query object
.find(
query=query_emb, search_field=search_field
) # add vector similarity search
.filter(**filter_args) # add filter search
.build(limit=top_k) # build the query
)
# execute the combined query and return the results
docs = self.index.execute_query(query)
if hasattr(docs, "documents"):
docs = docs.documents
docs = docs[:top_k]
else:
docs = self.index.find(
query=query_emb, search_field=search_field, limit=top_k
).documents
return docs
def _similarity_search(self, query_emb: np.ndarray) -> List[Document]:
"""
Perform a similarity search.
Args:
query_emb: Query represented as an embedding
Returns:
A list of documents most similar to the query
"""
docs = self._search(query_emb=query_emb, top_k=self.top_k)
results = [self._docarray_to_langchain_doc(doc) for doc in docs]
return results
def _mmr_search(self, query_emb: np.ndarray) -> List[Document]:
"""
Perform a maximal marginal relevance (mmr) search.
Args:
query_emb: Query represented as an embedding
Returns:
A list of diverse documents related to the query
"""
docs = self._search(query_emb=query_emb, top_k=20)
mmr_selected = maximal_marginal_relevance(
query_emb,
[
doc[self.search_field]
if isinstance(doc, dict)
else getattr(doc, self.search_field)
for doc in docs
],
k=self.top_k,
)
results = [self._docarray_to_langchain_doc(docs[idx]) for idx in mmr_selected]
return results
def _docarray_to_langchain_doc(self, doc: Union[Dict[str, Any], Any]) -> Document:
"""
Convert a DocArray document (which also might be a dict)
to a langchain document format.
DocArray document can contain arbitrary fields, so the mapping is done
in the following way:
page_content <-> content_field
metadata <-> all other fields excluding
tensors and embeddings (so float, int, string)
Args:
doc: DocArray document
Returns:
Document in langchain format
Raises:
ValueError: If the document doesn't contain the content field
"""
fields = doc.keys() if isinstance(doc, dict) else doc.__fields__
if self.content_field not in fields:
raise ValueError(
f"Document does not contain the content field - {self.content_field}."
)
lc_doc = Document(
page_content=doc[self.content_field]
if isinstance(doc, dict)
else getattr(doc, self.content_field)
)
for name in fields:
value = doc[name] if isinstance(doc, dict) else getattr(doc, name)
if (
isinstance(value, (str, int, float, bool))
and name != self.content_field
):
lc_doc.metadata[name] = value
return lc_doc
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~docstore~arbitrary_fn.py | from typing import Callable, Union
from libs.core.langchain_core.documents import Document
from langchain_community.docstore.base import Docstore
class DocstoreFn(Docstore):
"""Langchain Docstore via arbitrary lookup function.
This is useful when:
* it's expensive to construct an InMemoryDocstore/dict
* you retrieve documents from remote sources
* you just want to reuse existing objects
"""
def __init__(
self,
lookup_fn: Callable[[str], Union[Document, str]],
):
self._lookup_fn = lookup_fn
def search(self, search: str) -> Document:
"""Search for a document.
Args:
search: search string
Returns:
Document if found, else error message.
"""
r = self._lookup_fn(search)
if isinstance(r, str):
# NOTE: assume the search string is the source ID
return Document(page_content=r, metadata={"source": search})
elif isinstance(r, Document):
return r
raise ValueError(f"Unexpected type of document {type(r)}")
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~edenai.py | """Wrapper around EdenAI's Generation API."""
import logging
from typing import Any, Dict, List, Literal, Optional
from aiohttp import ClientSession
from libs.core.langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import Extra, Field, root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env
from langchain_community.llms.utils import enforce_stop_tokens
from langchain_community.utilities.requests import Requests
logger = logging.getLogger(__name__)
class EdenAI(LLM):
"""Wrapper around edenai models.
To use, you should have
the environment variable ``EDENAI_API_KEY`` set with your API token.
You can find your token here: https://app.edenai.run/admin/account/settings
`feature` and `subfeature` are required, but any other model parameters can also be
passed in with the format params={model_param: value, ...}
for api reference check edenai documentation: http://docs.edenai.co.
"""
base_url: str = "https://api.edenai.run/v2"
edenai_api_key: Optional[str] = None
feature: Literal["text", "image"] = "text"
"""Which generative feature to use, use text by default"""
subfeature: Literal["generation"] = "generation"
"""Subfeature of above feature, use generation by default"""
provider: str
"""Generative provider to use (eg: openai,stabilityai,cohere,google etc.)"""
model: Optional[str] = None
"""
model name for above provider (eg: 'gpt-3.5-turbo-instruct' for openai)
available models are shown on https://docs.edenai.co/ under 'available providers'
"""
# Optional parameters to add depending of chosen feature
# see api reference for more infos
temperature: Optional[float] = Field(default=None, ge=0, le=1) # for text
max_tokens: Optional[int] = Field(default=None, ge=0) # for text
resolution: Optional[Literal["256x256", "512x512", "1024x1024"]] = None # for image
params: Dict[str, Any] = Field(default_factory=dict)
"""
DEPRECATED: use temperature, max_tokens, resolution directly
optional parameters to pass to api
"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""extra parameters"""
stop_sequences: Optional[List[str]] = None
"""Stop sequences to use."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key exists in environment."""
values["edenai_api_key"] = get_from_dict_or_env(
values, "edenai_api_key", "EDENAI_API_KEY"
)
return values
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@property
def _llm_type(self) -> str:
"""Return type of model."""
return "edenai"
def _format_output(self, output: dict) -> str:
if self.feature == "text":
return output[self.provider]["generated_text"]
else:
return output[self.provider]["items"][0]["image"]
@staticmethod
def get_user_agent() -> str:
from langchain_community import __version__
return f"langchain/{__version__}"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to EdenAI's text generation endpoint.
Args:
prompt: The prompt to pass into the model.
Returns:
json formatted str response.
"""
stops = None
if self.stop_sequences is not None and stop is not None:
raise ValueError(
"stop sequences found in both the input and default params."
)
elif self.stop_sequences is not None:
stops = self.stop_sequences
else:
stops = stop
url = f"{self.base_url}/{self.feature}/{self.subfeature}"
headers = {
"Authorization": f"Bearer {self.edenai_api_key}",
"User-Agent": self.get_user_agent(),
}
payload: Dict[str, Any] = {
"providers": self.provider,
"text": prompt,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"resolution": self.resolution,
**self.params,
**kwargs,
"num_images": 1, # always limit to 1 (ignored for text)
}
# filter None values to not pass them to the http payload
payload = {k: v for k, v in payload.items() if v is not None}
if self.model is not None:
payload["settings"] = {self.provider: self.model}
request = Requests(headers=headers)
response = request.post(url=url, data=payload)
if response.status_code >= 500:
raise Exception(f"EdenAI Server: Error {response.status_code}")
elif response.status_code >= 400:
raise ValueError(f"EdenAI received an invalid payload: {response.text}")
elif response.status_code != 200:
raise Exception(
f"EdenAI returned an unexpected response with status "
f"{response.status_code}: {response.text}"
)
data = response.json()
provider_response = data[self.provider]
if provider_response.get("status") == "fail":
err_msg = provider_response.get("error", {}).get("message")
raise Exception(err_msg)
output = self._format_output(data)
if stops is not None:
output = enforce_stop_tokens(output, stops)
return output
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call EdenAi model to get predictions based on the prompt.
Args:
prompt: The prompt to pass into the model.
stop: A list of stop words (optional).
run_manager: A callback manager for async interaction with LLMs.
Returns:
The string generated by the model.
"""
stops = None
if self.stop_sequences is not None and stop is not None:
raise ValueError(
"stop sequences found in both the input and default params."
)
elif self.stop_sequences is not None:
stops = self.stop_sequences
else:
stops = stop
url = f"{self.base_url}/{self.feature}/{self.subfeature}"
headers = {
"Authorization": f"Bearer {self.edenai_api_key}",
"User-Agent": self.get_user_agent(),
}
payload: Dict[str, Any] = {
"providers": self.provider,
"text": prompt,
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"resolution": self.resolution,
**self.params,
**kwargs,
"num_images": 1, # always limit to 1 (ignored for text)
}
# filter `None` values to not pass them to the http payload as null
payload = {k: v for k, v in payload.items() if v is not None}
if self.model is not None:
payload["settings"] = {self.provider: self.model}
async with ClientSession() as session:
async with session.post(url, json=payload, headers=headers) as response:
if response.status >= 500:
raise Exception(f"EdenAI Server: Error {response.status}")
elif response.status >= 400:
raise ValueError(
f"EdenAI received an invalid payload: {response.text}"
)
elif response.status != 200:
raise Exception(
f"EdenAI returned an unexpected response with status "
f"{response.status}: {response.text}"
)
response_json = await response.json()
provider_response = response_json[self.provider]
if provider_response.get("status") == "fail":
err_msg = provider_response.get("error", {}).get("message")
raise Exception(err_msg)
output = self._format_output(response_json)
if stops is not None:
output = enforce_stop_tokens(output, stops)
return output
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~telegram.py | from __future__ import annotations
import asyncio
import json
from pathlib import Path
from typing import TYPE_CHECKING, Dict, List, Optional, Union
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
if TYPE_CHECKING:
import pandas as pd
from telethon.hints import EntityLike
def concatenate_rows(row: dict) -> str:
"""Combine message information in a readable format ready to be used."""
date = row["date"]
sender = row["from"]
text = row["text"]
return f"{sender} on {date}: {text}\n\n"
class TelegramChatFileLoader(BaseLoader):
"""Load from `Telegram chat` dump."""
def __init__(self, path: str):
"""Initialize with a path."""
self.file_path = path
def load(self) -> List[Document]:
"""Load documents."""
p = Path(self.file_path)
with open(p, encoding="utf8") as f:
d = json.load(f)
text = "".join(
concatenate_rows(message)
for message in d["messages"]
if message["type"] == "message" and isinstance(message["text"], str)
)
metadata = {"source": str(p)}
return [Document(page_content=text, metadata=metadata)]
def text_to_docs(text: Union[str, List[str]]) -> List[Document]:
"""Convert a string or list of strings to a list of Documents with metadata."""
from langchain.text_splitter import RecursiveCharacterTextSplitter
if isinstance(text, str):
# Take a single string as one page
text = [text]
page_docs = [Document(page_content=page) for page in text]
# Add page numbers as metadata
for i, doc in enumerate(page_docs):
doc.metadata["page"] = i + 1
# Split pages into chunks
doc_chunks = []
for doc in page_docs:
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=800,
separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""],
chunk_overlap=20,
)
chunks = text_splitter.split_text(doc.page_content)
for i, chunk in enumerate(chunks):
doc = Document(
page_content=chunk, metadata={"page": doc.metadata["page"], "chunk": i}
)
# Add sources a metadata
doc.metadata["source"] = f"{doc.metadata['page']}-{doc.metadata['chunk']}"
doc_chunks.append(doc)
return doc_chunks
class TelegramChatApiLoader(BaseLoader):
"""Load `Telegram` chat json directory dump."""
def __init__(
self,
chat_entity: Optional[EntityLike] = None,
api_id: Optional[int] = None,
api_hash: Optional[str] = None,
username: Optional[str] = None,
file_path: str = "telegram_data.json",
):
"""Initialize with API parameters.
Args:
chat_entity: The chat entity to fetch data from.
api_id: The API ID.
api_hash: The API hash.
username: The username.
file_path: The file path to save the data to. Defaults to
"telegram_data.json".
"""
self.chat_entity = chat_entity
self.api_id = api_id
self.api_hash = api_hash
self.username = username
self.file_path = file_path
async def fetch_data_from_telegram(self) -> None:
"""Fetch data from Telegram API and save it as a JSON file."""
from telethon.sync import TelegramClient
data = []
async with TelegramClient(self.username, self.api_id, self.api_hash) as client:
async for message in client.iter_messages(self.chat_entity):
is_reply = message.reply_to is not None
reply_to_id = message.reply_to.reply_to_msg_id if is_reply else None
data.append(
{
"sender_id": message.sender_id,
"text": message.text,
"date": message.date.isoformat(),
"message.id": message.id,
"is_reply": is_reply,
"reply_to_id": reply_to_id,
}
)
with open(self.file_path, "w", encoding="utf-8") as f:
json.dump(data, f, ensure_ascii=False, indent=4)
def _get_message_threads(self, data: pd.DataFrame) -> dict:
"""Create a dictionary of message threads from the given data.
Args:
data (pd.DataFrame): A DataFrame containing the conversation \
data with columns:
- message.sender_id
- text
- date
- message.id
- is_reply
- reply_to_id
Returns:
dict: A dictionary where the key is the parent message ID and \
the value is a list of message IDs in ascending order.
"""
def find_replies(parent_id: int, reply_data: pd.DataFrame) -> List[int]:
"""
Recursively find all replies to a given parent message ID.
Args:
parent_id (int): The parent message ID.
reply_data (pd.DataFrame): A DataFrame containing reply messages.
Returns:
list: A list of message IDs that are replies to the parent message ID.
"""
# Find direct replies to the parent message ID
direct_replies = reply_data[reply_data["reply_to_id"] == parent_id][
"message.id"
].tolist()
# Recursively find replies to the direct replies
all_replies = []
for reply_id in direct_replies:
all_replies += [reply_id] + find_replies(reply_id, reply_data)
return all_replies
# Filter out parent messages
parent_messages = data[~data["is_reply"]]
# Filter out reply messages and drop rows with NaN in 'reply_to_id'
reply_messages = data[data["is_reply"]].dropna(subset=["reply_to_id"])
# Convert 'reply_to_id' to integer
reply_messages["reply_to_id"] = reply_messages["reply_to_id"].astype(int)
# Create a dictionary of message threads with parent message IDs as keys and \
# lists of reply message IDs as values
message_threads = {
parent_id: [parent_id] + find_replies(parent_id, reply_messages)
for parent_id in parent_messages["message.id"]
}
return message_threads
def _combine_message_texts(
self, message_threads: Dict[int, List[int]], data: pd.DataFrame
) -> str:
"""
Combine the message texts for each parent message ID based \
on the list of message threads.
Args:
message_threads (dict): A dictionary where the key is the parent message \
ID and the value is a list of message IDs in ascending order.
data (pd.DataFrame): A DataFrame containing the conversation data:
- message.sender_id
- text
- date
- message.id
- is_reply
- reply_to_id
Returns:
str: A combined string of message texts sorted by date.
"""
combined_text = ""
# Iterate through sorted parent message IDs
for parent_id, message_ids in message_threads.items():
# Get the message texts for the message IDs and sort them by date
message_texts = (
data[data["message.id"].isin(message_ids)]
.sort_values(by="date")["text"]
.tolist()
)
message_texts = [str(elem) for elem in message_texts]
# Combine the message texts
combined_text += " ".join(message_texts) + ".\n"
return combined_text.strip()
def load(self) -> List[Document]:
"""Load documents."""
if self.chat_entity is not None:
try:
import nest_asyncio
nest_asyncio.apply()
asyncio.run(self.fetch_data_from_telegram())
except ImportError:
raise ImportError(
"""`nest_asyncio` package not found.
please install with `pip install nest_asyncio`
"""
)
p = Path(self.file_path)
with open(p, encoding="utf8") as f:
d = json.load(f)
try:
import pandas as pd
except ImportError:
raise ImportError(
"""`pandas` package not found.
please install with `pip install pandas`
"""
)
normalized_messages = pd.json_normalize(d)
df = pd.DataFrame(normalized_messages)
message_threads = self._get_message_threads(df)
combined_texts = self._combine_message_texts(message_threads, df)
return text_to_docs(combined_texts)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~embeddings~vertexai.py | import logging
import re
import string
import threading
from concurrent.futures import ThreadPoolExecutor, wait
from typing import Any, Dict, List, Literal, Optional, Tuple
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.language_models.llms import create_base_retry_decorator
from libs.core.langchain_core.pydantic_v1 import root_validator
from langchain_community.llms.vertexai import _VertexAICommon
from langchain_community.utilities.vertexai import raise_vertex_import_error
logger = logging.getLogger(__name__)
_MAX_TOKENS_PER_BATCH = 20000
_MAX_BATCH_SIZE = 250
_MIN_BATCH_SIZE = 5
class VertexAIEmbeddings(_VertexAICommon, Embeddings):
"""Google Cloud VertexAI embedding models."""
# Instance context
instance: Dict[str, Any] = {} #: :meta private:
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validates that the python package exists in environment."""
cls._try_init_vertexai(values)
if values["model_name"] == "textembedding-gecko-default":
logger.warning(
"Model_name will become a required arg for VertexAIEmbeddings "
"starting from Feb-01-2024. Currently the default is set to "
"textembedding-gecko@001"
)
values["model_name"] = "textembedding-gecko@001"
try:
from vertexai.language_models import TextEmbeddingModel
except ImportError:
raise_vertex_import_error()
values["client"] = TextEmbeddingModel.from_pretrained(values["model_name"])
return values
def __init__(
self,
# the default value would be removed after Feb-01-2024
model_name: str = "textembedding-gecko-default",
project: Optional[str] = None,
location: str = "us-central1",
request_parallelism: int = 5,
max_retries: int = 6,
credentials: Optional[Any] = None,
**kwargs: Any,
):
"""Initialize the sentence_transformer."""
super().__init__(
project=project,
location=location,
credentials=credentials,
request_parallelism=request_parallelism,
max_retries=max_retries,
model_name=model_name,
**kwargs,
)
self.instance["max_batch_size"] = kwargs.get("max_batch_size", _MAX_BATCH_SIZE)
self.instance["batch_size"] = self.instance["max_batch_size"]
self.instance["min_batch_size"] = kwargs.get("min_batch_size", _MIN_BATCH_SIZE)
self.instance["min_good_batch_size"] = self.instance["min_batch_size"]
self.instance["lock"] = threading.Lock()
self.instance["batch_size_validated"] = False
self.instance["task_executor"] = ThreadPoolExecutor(
max_workers=request_parallelism
)
self.instance[
"embeddings_task_type_supported"
] = not self.client._endpoint_name.endswith("/textembedding-gecko@001")
@staticmethod
def _split_by_punctuation(text: str) -> List[str]:
"""Splits a string by punctuation and whitespace characters."""
split_by = string.punctuation + "\t\n "
pattern = f"([{split_by}])"
# Using re.split to split the text based on the pattern
return [segment for segment in re.split(pattern, text) if segment]
@staticmethod
def _prepare_batches(texts: List[str], batch_size: int) -> List[List[str]]:
"""Splits texts in batches based on current maximum batch size
and maximum tokens per request.
"""
text_index = 0
texts_len = len(texts)
batch_token_len = 0
batches: List[List[str]] = []
current_batch: List[str] = []
if texts_len == 0:
return []
while text_index < texts_len:
current_text = texts[text_index]
# Number of tokens per a text is conservatively estimated
# as 2 times number of words, punctuation and whitespace characters.
# Using `count_tokens` API will make batching too expensive.
# Utilizing a tokenizer, would add a dependency that would not
# necessarily be reused by the application using this class.
current_text_token_cnt = (
len(VertexAIEmbeddings._split_by_punctuation(current_text)) * 2
)
end_of_batch = False
if current_text_token_cnt > _MAX_TOKENS_PER_BATCH:
# Current text is too big even for a single batch.
# Such request will fail, but we still make a batch
# so that the app can get the error from the API.
if len(current_batch) > 0:
# Adding current batch if not empty.
batches.append(current_batch)
current_batch = [current_text]
text_index += 1
end_of_batch = True
elif (
batch_token_len + current_text_token_cnt > _MAX_TOKENS_PER_BATCH
or len(current_batch) == batch_size
):
end_of_batch = True
else:
if text_index == texts_len - 1:
# Last element - even though the batch may be not big,
# we still need to make it.
end_of_batch = True
batch_token_len += current_text_token_cnt
current_batch.append(current_text)
text_index += 1
if end_of_batch:
batches.append(current_batch)
current_batch = []
batch_token_len = 0
return batches
def _get_embeddings_with_retry(
self, texts: List[str], embeddings_type: Optional[str] = None
) -> List[List[float]]:
"""Makes a Vertex AI model request with retry logic."""
from google.api_core.exceptions import (
Aborted,
DeadlineExceeded,
ResourceExhausted,
ServiceUnavailable,
)
errors = [
ResourceExhausted,
ServiceUnavailable,
Aborted,
DeadlineExceeded,
]
retry_decorator = create_base_retry_decorator(
error_types=errors, max_retries=self.max_retries
)
@retry_decorator
def _completion_with_retry(texts_to_process: List[str]) -> Any:
if embeddings_type and self.instance["embeddings_task_type_supported"]:
from vertexai.language_models import TextEmbeddingInput
requests = [
TextEmbeddingInput(text=t, task_type=embeddings_type)
for t in texts_to_process
]
else:
requests = texts_to_process
embeddings = self.client.get_embeddings(requests)
return [embs.values for embs in embeddings]
return _completion_with_retry(texts)
def _prepare_and_validate_batches(
self, texts: List[str], embeddings_type: Optional[str] = None
) -> Tuple[List[List[float]], List[List[str]]]:
"""Prepares text batches with one-time validation of batch size.
Batch size varies between GCP regions and individual project quotas.
# Returns embeddings of the first text batch that went through,
# and text batches for the rest of the texts.
"""
from google.api_core.exceptions import InvalidArgument
batches = VertexAIEmbeddings._prepare_batches(
texts, self.instance["batch_size"]
)
# If batch size if less or equal to one that went through before,
# then keep batches as they are.
if len(batches[0]) <= self.instance["min_good_batch_size"]:
return [], batches
with self.instance["lock"]:
# If largest possible batch size was validated
# while waiting for the lock, then check for rebuilding
# our batches, and return.
if self.instance["batch_size_validated"]:
if len(batches[0]) <= self.instance["batch_size"]:
return [], batches
else:
return [], VertexAIEmbeddings._prepare_batches(
texts, self.instance["batch_size"]
)
# Figure out largest possible batch size by trying to push
# batches and lowering their size in half after every failure.
first_batch = batches[0]
first_result = []
had_failure = False
while True:
try:
first_result = self._get_embeddings_with_retry(
first_batch, embeddings_type
)
break
except InvalidArgument:
had_failure = True
first_batch_len = len(first_batch)
if first_batch_len == self.instance["min_batch_size"]:
raise
first_batch_len = max(
self.instance["min_batch_size"], int(first_batch_len / 2)
)
first_batch = first_batch[:first_batch_len]
first_batch_len = len(first_batch)
self.instance["min_good_batch_size"] = max(
self.instance["min_good_batch_size"], first_batch_len
)
# If had a failure and recovered
# or went through with the max size, then it's a legit batch size.
if had_failure or first_batch_len == self.instance["max_batch_size"]:
self.instance["batch_size"] = first_batch_len
self.instance["batch_size_validated"] = True
# If batch size was updated,
# rebuild batches with the new batch size
# (texts that went through are excluded here).
if first_batch_len != self.instance["max_batch_size"]:
batches = VertexAIEmbeddings._prepare_batches(
texts[first_batch_len:], self.instance["batch_size"]
)
else:
# Still figuring out max batch size.
batches = batches[1:]
# Returning embeddings of the first text batch that went through,
# and text batches for the rest of texts.
return first_result, batches
def embed(
self,
texts: List[str],
batch_size: int = 0,
embeddings_task_type: Optional[
Literal[
"RETRIEVAL_QUERY",
"RETRIEVAL_DOCUMENT",
"SEMANTIC_SIMILARITY",
"CLASSIFICATION",
"CLUSTERING",
]
] = None,
) -> List[List[float]]:
"""Embed a list of strings.
Args:
texts: List[str] The list of strings to embed.
batch_size: [int] The batch size of embeddings to send to the model.
If zero, then the largest batch size will be detected dynamically
at the first request, starting from 250, down to 5.
embeddings_task_type: [str] optional embeddings task type,
one of the following
RETRIEVAL_QUERY - Text is a query
in a search/retrieval setting.
RETRIEVAL_DOCUMENT - Text is a document
in a search/retrieval setting.
SEMANTIC_SIMILARITY - Embeddings will be used
for Semantic Textual Similarity (STS).
CLASSIFICATION - Embeddings will be used for classification.
CLUSTERING - Embeddings will be used for clustering.
Returns:
List of embeddings, one for each text.
"""
if len(texts) == 0:
return []
embeddings: List[List[float]] = []
first_batch_result: List[List[float]] = []
if batch_size > 0:
# Fixed batch size.
batches = VertexAIEmbeddings._prepare_batches(texts, batch_size)
else:
# Dynamic batch size, starting from 250 at the first call.
first_batch_result, batches = self._prepare_and_validate_batches(
texts, embeddings_task_type
)
# First batch result may have some embeddings already.
# In such case, batches have texts that were not processed yet.
embeddings.extend(first_batch_result)
tasks = []
for batch in batches:
tasks.append(
self.instance["task_executor"].submit(
self._get_embeddings_with_retry,
texts=batch,
embeddings_type=embeddings_task_type,
)
)
if len(tasks) > 0:
wait(tasks)
for t in tasks:
embeddings.extend(t.result())
return embeddings
def embed_documents(
self, texts: List[str], batch_size: int = 0
) -> List[List[float]]:
"""Embed a list of documents.
Args:
texts: List[str] The list of texts to embed.
batch_size: [int] The batch size of embeddings to send to the model.
If zero, then the largest batch size will be detected dynamically
at the first request, starting from 250, down to 5.
Returns:
List of embeddings, one for each text.
"""
return self.embed(texts, batch_size, "RETRIEVAL_DOCUMENT")
def embed_query(self, text: str) -> List[float]:
"""Embed a text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
embeddings = self.embed([text], 1, "RETRIEVAL_QUERY")
return embeddings[0]
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~document_loaders~test_tensorflow_datasets.py | """Integration tests for the TensorFlow Dataset Loader."""
from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.pydantic_v1 import ValidationError
from langchain_community.document_loaders.tensorflow_datasets import (
TensorflowDatasetLoader,
)
if TYPE_CHECKING:
import tensorflow as tf # noqa: E402
def decode_to_str(item: tf.Tensor) -> str:
return item.numpy().decode("utf-8")
def mlqaen_example_to_document(example: dict) -> Document:
return Document(
page_content=decode_to_str(example["context"]),
metadata={
"id": decode_to_str(example["id"]),
"title": decode_to_str(example["title"]),
"question": decode_to_str(example["question"]),
"answer": decode_to_str(example["answers"]["text"][0]),
},
)
MAX_DOCS = 10
@pytest.fixture
def tfds_client() -> TensorflowDatasetLoader:
return TensorflowDatasetLoader(
dataset_name="mlqa/en",
split_name="test",
load_max_docs=MAX_DOCS,
sample_to_document_function=mlqaen_example_to_document,
)
def test_load_success(tfds_client: TensorflowDatasetLoader) -> None:
"""Test that returns the correct answer"""
output = tfds_client.load()
assert isinstance(output, list)
assert len(output) == MAX_DOCS
assert isinstance(output[0], Document)
assert len(output[0].page_content) > 0
assert isinstance(output[0].page_content, str)
assert isinstance(output[0].metadata, dict)
def test_lazy_load_success(tfds_client: TensorflowDatasetLoader) -> None:
"""Test that returns the correct answer"""
output = list(tfds_client.lazy_load())
assert isinstance(output, list)
assert len(output) == MAX_DOCS
assert isinstance(output[0], Document)
assert len(output[0].page_content) > 0
assert isinstance(output[0].page_content, str)
assert isinstance(output[0].metadata, dict)
def test_load_fail_wrong_dataset_name() -> None:
"""Test that fails to load"""
with pytest.raises(ValidationError) as exc_info:
TensorflowDatasetLoader(
dataset_name="wrong_dataset_name",
split_name="test",
load_max_docs=MAX_DOCS,
sample_to_document_function=mlqaen_example_to_document,
)
assert "the dataset name is spelled correctly" in str(exc_info.value)
def test_load_fail_wrong_split_name() -> None:
"""Test that fails to load"""
with pytest.raises(ValidationError) as exc_info:
TensorflowDatasetLoader(
dataset_name="mlqa/en",
split_name="wrong_split_name",
load_max_docs=MAX_DOCS,
sample_to_document_function=mlqaen_example_to_document,
)
assert "Unknown split" in str(exc_info.value)
def test_load_fail_no_func() -> None:
"""Test that fails to load"""
with pytest.raises(ValidationError) as exc_info:
TensorflowDatasetLoader(
dataset_name="mlqa/en",
split_name="test",
load_max_docs=MAX_DOCS,
)
assert "Please provide a function" in str(exc_info.value)
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~chains~combine_documents~reduce.py | """Combine many documents together by recursively reducing them."""
from __future__ import annotations
from typing import Any, Callable, List, Optional, Protocol, Tuple
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.pydantic_v1 import Extra
from langchain.callbacks.manager import Callbacks
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
class CombineDocsProtocol(Protocol):
"""Interface for the combine_docs method."""
def __call__(self, docs: List[Document], **kwargs: Any) -> str:
"""Interface for the combine_docs method."""
class AsyncCombineDocsProtocol(Protocol):
"""Interface for the combine_docs method."""
async def __call__(self, docs: List[Document], **kwargs: Any) -> str:
"""Async interface for the combine_docs method."""
def split_list_of_docs(
docs: List[Document], length_func: Callable, token_max: int, **kwargs: Any
) -> List[List[Document]]:
"""Split Documents into subsets that each meet a cumulative length constraint.
Args:
docs: The full list of Documents.
length_func: Function for computing the cumulative length of a set of Documents.
token_max: The maximum cumulative length of any subset of Documents.
**kwargs: Arbitrary additional keyword params to pass to each call of the
length_func.
Returns:
A List[List[Document]].
"""
new_result_doc_list = []
_sub_result_docs = []
for doc in docs:
_sub_result_docs.append(doc)
_num_tokens = length_func(_sub_result_docs, **kwargs)
if _num_tokens > token_max:
if len(_sub_result_docs) == 1:
raise ValueError(
"A single document was longer than the context length,"
" we cannot handle this."
)
new_result_doc_list.append(_sub_result_docs[:-1])
_sub_result_docs = _sub_result_docs[-1:]
new_result_doc_list.append(_sub_result_docs)
return new_result_doc_list
def collapse_docs(
docs: List[Document],
combine_document_func: CombineDocsProtocol,
**kwargs: Any,
) -> Document:
"""Execute a collapse function on a set of documents and merge their metadatas.
Args:
docs: A list of Documents to combine.
combine_document_func: A function that takes in a list of Documents and
optionally addition keyword parameters and combines them into a single
string.
**kwargs: Arbitrary additional keyword params to pass to the
combine_document_func.
Returns:
A single Document with the output of combine_document_func for the page content
and the combined metadata's of all the input documents. All metadata values
are strings, and where there are overlapping keys across documents the
values are joined by ", ".
"""
result = combine_document_func(docs, **kwargs)
combined_metadata = {k: str(v) for k, v in docs[0].metadata.items()}
for doc in docs[1:]:
for k, v in doc.metadata.items():
if k in combined_metadata:
combined_metadata[k] += f", {v}"
else:
combined_metadata[k] = str(v)
return Document(page_content=result, metadata=combined_metadata)
async def acollapse_docs(
docs: List[Document],
combine_document_func: AsyncCombineDocsProtocol,
**kwargs: Any,
) -> Document:
"""Execute a collapse function on a set of documents and merge their metadatas.
Args:
docs: A list of Documents to combine.
combine_document_func: A function that takes in a list of Documents and
optionally addition keyword parameters and combines them into a single
string.
**kwargs: Arbitrary additional keyword params to pass to the
combine_document_func.
Returns:
A single Document with the output of combine_document_func for the page content
and the combined metadata's of all the input documents. All metadata values
are strings, and where there are overlapping keys across documents the
values are joined by ", ".
"""
result = await combine_document_func(docs, **kwargs)
combined_metadata = {k: str(v) for k, v in docs[0].metadata.items()}
for doc in docs[1:]:
for k, v in doc.metadata.items():
if k in combined_metadata:
combined_metadata[k] += f", {v}"
else:
combined_metadata[k] = str(v)
return Document(page_content=result, metadata=combined_metadata)
class ReduceDocumentsChain(BaseCombineDocumentsChain):
"""Combine documents by recursively reducing them.
This involves
- combine_documents_chain
- collapse_documents_chain
`combine_documents_chain` is ALWAYS provided. This is final chain that is called.
We pass all previous results to this chain, and the output of this chain is
returned as a final result.
`collapse_documents_chain` is used if the documents passed in are too many to all
be passed to `combine_documents_chain` in one go. In this case,
`collapse_documents_chain` is called recursively on as big of groups of documents
as are allowed.
Example:
.. code-block:: python
from langchain.chains import (
StuffDocumentsChain, LLMChain, ReduceDocumentsChain
)
from libs.core.langchain_core.prompts import PromptTemplate
from langchain.llms import OpenAI
# This controls how each document will be formatted. Specifically,
# it will be passed to `format_document` - see that function for more
# details.
document_prompt = PromptTemplate(
input_variables=["page_content"],
template="{page_content}"
)
document_variable_name = "context"
llm = OpenAI()
# The prompt here should take as an input variable the
# `document_variable_name`
prompt = PromptTemplate.from_template(
"Summarize this content: {context}"
)
llm_chain = LLMChain(llm=llm, prompt=prompt)
combine_documents_chain = StuffDocumentsChain(
llm_chain=llm_chain,
document_prompt=document_prompt,
document_variable_name=document_variable_name
)
chain = ReduceDocumentsChain(
combine_documents_chain=combine_documents_chain,
)
# If we wanted to, we could also pass in collapse_documents_chain
# which is specifically aimed at collapsing documents BEFORE
# the final call.
prompt = PromptTemplate.from_template(
"Collapse this content: {context}"
)
llm_chain = LLMChain(llm=llm, prompt=prompt)
collapse_documents_chain = StuffDocumentsChain(
llm_chain=llm_chain,
document_prompt=document_prompt,
document_variable_name=document_variable_name
)
chain = ReduceDocumentsChain(
combine_documents_chain=combine_documents_chain,
collapse_documents_chain=collapse_documents_chain,
)
"""
combine_documents_chain: BaseCombineDocumentsChain
"""Final chain to call to combine documents.
This is typically a StuffDocumentsChain."""
collapse_documents_chain: Optional[BaseCombineDocumentsChain] = None
"""Chain to use to collapse documents if needed until they can all fit.
If None, will use the combine_documents_chain.
This is typically a StuffDocumentsChain."""
token_max: int = 3000
"""The maximum number of tokens to group documents into. For example, if
set to 3000 then documents will be grouped into chunks of no greater than
3000 tokens before trying to combine them into a smaller chunk."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def _collapse_chain(self) -> BaseCombineDocumentsChain:
if self.collapse_documents_chain is not None:
return self.collapse_documents_chain
else:
return self.combine_documents_chain
def combine_docs(
self,
docs: List[Document],
token_max: Optional[int] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> Tuple[str, dict]:
"""Combine multiple documents recursively.
Args:
docs: List of documents to combine, assumed that each one is less than
`token_max`.
token_max: Recursively creates groups of documents less than this number
of tokens.
callbacks: Callbacks to be passed through
**kwargs: additional parameters to be passed to LLM calls (like other
input variables besides the documents)
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
result_docs, extra_return_dict = self._collapse(
docs, token_max=token_max, callbacks=callbacks, **kwargs
)
return self.combine_documents_chain.combine_docs(
docs=result_docs, callbacks=callbacks, **kwargs
)
async def acombine_docs(
self,
docs: List[Document],
token_max: Optional[int] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> Tuple[str, dict]:
"""Async combine multiple documents recursively.
Args:
docs: List of documents to combine, assumed that each one is less than
`token_max`.
token_max: Recursively creates groups of documents less than this number
of tokens.
callbacks: Callbacks to be passed through
**kwargs: additional parameters to be passed to LLM calls (like other
input variables besides the documents)
Returns:
The first element returned is the single string output. The second
element returned is a dictionary of other keys to return.
"""
result_docs, extra_return_dict = await self._acollapse(
docs, token_max=token_max, callbacks=callbacks, **kwargs
)
return await self.combine_documents_chain.acombine_docs(
docs=result_docs, callbacks=callbacks, **kwargs
)
def _collapse(
self,
docs: List[Document],
token_max: Optional[int] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> Tuple[List[Document], dict]:
result_docs = docs
length_func = self.combine_documents_chain.prompt_length
num_tokens = length_func(result_docs, **kwargs)
def _collapse_docs_func(docs: List[Document], **kwargs: Any) -> str:
return self._collapse_chain.run(
input_documents=docs, callbacks=callbacks, **kwargs
)
_token_max = token_max or self.token_max
while num_tokens is not None and num_tokens > _token_max:
new_result_doc_list = split_list_of_docs(
result_docs, length_func, _token_max, **kwargs
)
result_docs = []
for docs in new_result_doc_list:
new_doc = collapse_docs(docs, _collapse_docs_func, **kwargs)
result_docs.append(new_doc)
num_tokens = length_func(result_docs, **kwargs)
return result_docs, {}
async def _acollapse(
self,
docs: List[Document],
token_max: Optional[int] = None,
callbacks: Callbacks = None,
**kwargs: Any,
) -> Tuple[List[Document], dict]:
result_docs = docs
length_func = self.combine_documents_chain.prompt_length
num_tokens = length_func(result_docs, **kwargs)
async def _collapse_docs_func(docs: List[Document], **kwargs: Any) -> str:
return await self._collapse_chain.arun(
input_documents=docs, callbacks=callbacks, **kwargs
)
_token_max = token_max or self.token_max
while num_tokens is not None and num_tokens > _token_max:
new_result_doc_list = split_list_of_docs(
result_docs, length_func, _token_max, **kwargs
)
result_docs = []
for docs in new_result_doc_list:
new_doc = await acollapse_docs(docs, _collapse_docs_func, **kwargs)
result_docs.append(new_doc)
num_tokens = length_func(result_docs, **kwargs)
return result_docs, {}
@property
def _chain_type(self) -> str:
return "reduce_documents_chain"
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~fauna.py | from typing import Iterator, List, Optional, Sequence
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class FaunaLoader(BaseLoader):
"""Load from `FaunaDB`.
Attributes:
query (str): The FQL query string to execute.
page_content_field (str): The field that contains the content of each page.
secret (str): The secret key for authenticating to FaunaDB.
metadata_fields (Optional[Sequence[str]]):
Optional list of field names to include in metadata.
"""
def __init__(
self,
query: str,
page_content_field: str,
secret: str,
metadata_fields: Optional[Sequence[str]] = None,
):
self.query = query
self.page_content_field = page_content_field
self.secret = secret
self.metadata_fields = metadata_fields
def load(self) -> List[Document]:
return list(self.lazy_load())
def lazy_load(self) -> Iterator[Document]:
try:
from fauna import Page, fql
from fauna.client import Client
from fauna.encoding import QuerySuccess
except ImportError:
raise ImportError(
"Could not import fauna python package. "
"Please install it with `pip install fauna`."
)
# Create Fauna Client
client = Client(secret=self.secret)
# Run FQL Query
response: QuerySuccess = client.query(fql(self.query))
page: Page = response.data
for result in page:
if result is not None:
document_dict = dict(result.items())
page_content = ""
for key, value in document_dict.items():
if key == self.page_content_field:
page_content = value
document: Document = Document(
page_content=page_content,
metadata={"id": result.id, "ts": result.ts},
)
yield document
if page.after is not None:
yield Document(
page_content="Next Page Exists",
metadata={"after": page.after},
)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~slack~get_channel.py | import json
import logging
from typing import Optional
from libs.core.langchain_core.callbacks import CallbackManagerForToolRun
from langchain_community.tools.slack.base import SlackBaseTool
class SlackGetChannel(SlackBaseTool):
"""Tool that gets Slack channel information."""
name: str = "get_channelid_name_dict"
description: str = "Use this tool to get channelid-name dict."
def _run(
self,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
try:
logging.getLogger(__name__)
result = self.client.conversations_list()
channels = result["channels"]
filtered_result = [
{key: channel[key] for key in ("id", "name", "created", "num_members")}
for channel in channels
if "id" in channel
and "name" in channel
and "created" in channel
and "num_members" in channel
]
return json.dumps(filtered_result)
except Exception as e:
return "Error creating conversation: {}".format(e)
| [
"Use this tool to get channelid-name dict."
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~mastodon.py | from __future__ import annotations
import os
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
if TYPE_CHECKING:
import mastodon
def _dependable_mastodon_import() -> mastodon:
try:
import mastodon
except ImportError:
raise ImportError(
"Mastodon.py package not found, "
"please install it with `pip install Mastodon.py`"
)
return mastodon
class MastodonTootsLoader(BaseLoader):
"""Load the `Mastodon` 'toots'."""
def __init__(
self,
mastodon_accounts: Sequence[str],
number_toots: Optional[int] = 100,
exclude_replies: bool = False,
access_token: Optional[str] = None,
api_base_url: str = "https://mastodon.social",
):
"""Instantiate Mastodon toots loader.
Args:
mastodon_accounts: The list of Mastodon accounts to query.
number_toots: How many toots to pull for each account. Defaults to 100.
exclude_replies: Whether to exclude reply toots from the load.
Defaults to False.
access_token: An access token if toots are loaded as a Mastodon app. Can
also be specified via the environment variables "MASTODON_ACCESS_TOKEN".
api_base_url: A Mastodon API base URL to talk to, if not using the default.
Defaults to "https://mastodon.social".
"""
mastodon = _dependable_mastodon_import()
access_token = access_token or os.environ.get("MASTODON_ACCESS_TOKEN")
self.api = mastodon.Mastodon(
access_token=access_token, api_base_url=api_base_url
)
self.mastodon_accounts = mastodon_accounts
self.number_toots = number_toots
self.exclude_replies = exclude_replies
def load(self) -> List[Document]:
"""Load toots into documents."""
results: List[Document] = []
for account in self.mastodon_accounts:
user = self.api.account_lookup(account)
toots = self.api.account_statuses(
user.id,
only_media=False,
pinned=False,
exclude_replies=self.exclude_replies,
exclude_reblogs=True,
limit=self.number_toots,
)
docs = self._format_toots(toots, user)
results.extend(docs)
return results
def _format_toots(
self, toots: List[Dict[str, Any]], user_info: dict
) -> Iterable[Document]:
"""Format toots into documents.
Adding user info, and selected toot fields into the metadata.
"""
for toot in toots:
metadata = {
"created_at": toot["created_at"],
"user_info": user_info,
"is_reply": toot["in_reply_to_id"] is not None,
}
yield Document(
page_content=toot["content"],
metadata=metadata,
)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~url_selenium.py | """Loader that uses Selenium to load a page, then uses unstructured to load the html.
"""
import logging
from typing import TYPE_CHECKING, List, Literal, Optional, Union
if TYPE_CHECKING:
from selenium.webdriver import Chrome, Firefox
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
logger = logging.getLogger(__name__)
class SeleniumURLLoader(BaseLoader):
"""Load `HTML` pages with `Selenium` and parse with `Unstructured`.
This is useful for loading pages that require javascript to render.
Attributes:
urls (List[str]): List of URLs to load.
continue_on_failure (bool): If True, continue loading other URLs on failure.
browser (str): The browser to use, either 'chrome' or 'firefox'.
binary_location (Optional[str]): The location of the browser binary.
executable_path (Optional[str]): The path to the browser executable.
headless (bool): If True, the browser will run in headless mode.
arguments [List[str]]: List of arguments to pass to the browser.
"""
def __init__(
self,
urls: List[str],
continue_on_failure: bool = True,
browser: Literal["chrome", "firefox"] = "chrome",
binary_location: Optional[str] = None,
executable_path: Optional[str] = None,
headless: bool = True,
arguments: List[str] = [],
):
"""Load a list of URLs using Selenium and unstructured."""
try:
import selenium # noqa:F401
except ImportError:
raise ImportError(
"selenium package not found, please install it with "
"`pip install selenium`"
)
try:
import unstructured # noqa:F401
except ImportError:
raise ImportError(
"unstructured package not found, please install it with "
"`pip install unstructured`"
)
self.urls = urls
self.continue_on_failure = continue_on_failure
self.browser = browser
self.binary_location = binary_location
self.executable_path = executable_path
self.headless = headless
self.arguments = arguments
def _get_driver(self) -> Union["Chrome", "Firefox"]:
"""Create and return a WebDriver instance based on the specified browser.
Raises:
ValueError: If an invalid browser is specified.
Returns:
Union[Chrome, Firefox]: A WebDriver instance for the specified browser.
"""
if self.browser.lower() == "chrome":
from selenium.webdriver import Chrome
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.chrome.service import Service
chrome_options = ChromeOptions()
for arg in self.arguments:
chrome_options.add_argument(arg)
if self.headless:
chrome_options.add_argument("--headless")
chrome_options.add_argument("--no-sandbox")
if self.binary_location is not None:
chrome_options.binary_location = self.binary_location
if self.executable_path is None:
return Chrome(options=chrome_options)
return Chrome(
options=chrome_options,
service=Service(executable_path=self.executable_path),
)
elif self.browser.lower() == "firefox":
from selenium.webdriver import Firefox
from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.firefox.service import Service
firefox_options = FirefoxOptions()
for arg in self.arguments:
firefox_options.add_argument(arg)
if self.headless:
firefox_options.add_argument("--headless")
if self.binary_location is not None:
firefox_options.binary_location = self.binary_location
if self.executable_path is None:
return Firefox(options=firefox_options)
return Firefox(
options=firefox_options,
service=Service(executable_path=self.executable_path),
)
else:
raise ValueError("Invalid browser specified. Use 'chrome' or 'firefox'.")
def _build_metadata(self, url: str, driver: Union["Chrome", "Firefox"]) -> dict:
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
"""Build metadata based on the contents of the webpage"""
metadata = {
"source": url,
"title": "No title found.",
"description": "No description found.",
"language": "No language found.",
}
if title := driver.title:
metadata["title"] = title
try:
if description := driver.find_element(
By.XPATH, '//meta[@name="description"]'
):
metadata["description"] = (
description.get_attribute("content") or "No description found."
)
except NoSuchElementException:
pass
try:
if html_tag := driver.find_element(By.TAG_NAME, "html"):
metadata["language"] = (
html_tag.get_attribute("lang") or "No language found."
)
except NoSuchElementException:
pass
return metadata
def load(self) -> List[Document]:
"""Load the specified URLs using Selenium and create Document instances.
Returns:
List[Document]: A list of Document instances with loaded content.
"""
from unstructured.partition.html import partition_html
docs: List[Document] = list()
driver = self._get_driver()
for url in self.urls:
try:
driver.get(url)
page_content = driver.page_source
elements = partition_html(text=page_content)
text = "\n\n".join([str(el) for el in elements])
metadata = self._build_metadata(url, driver)
docs.append(Document(page_content=text, metadata=metadata))
except Exception as e:
if self.continue_on_failure:
logger.error(f"Error fetching or processing {url}, exception: {e}")
else:
raise e
driver.quit()
return docs
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~chat_models~baidu_qianfan_endpoint.py | from __future__ import annotations
import logging
from typing import Any, AsyncIterator, Dict, Iterator, List, Mapping, Optional, cast
from libs.core.langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from libs.core.langchain_core.language_models.chat_models import BaseChatModel
from libs.core.langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
ChatMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
from libs.core.langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
from libs.core.langchain_core.pydantic_v1 import Field, SecretStr, root_validator
from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
logger = logging.getLogger(__name__)
def convert_message_to_dict(message: BaseMessage) -> dict:
"""Convert a message to a dictionary that can be passed to the API."""
message_dict: Dict[str, Any]
if isinstance(message, ChatMessage):
message_dict = {"role": message.role, "content": message.content}
elif isinstance(message, HumanMessage):
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
if "function_call" in message.additional_kwargs:
message_dict["function_call"] = message.additional_kwargs["function_call"]
# If function call only, content is None not empty string
if message_dict["content"] == "":
message_dict["content"] = None
elif isinstance(message, FunctionMessage):
message_dict = {
"role": "function",
"content": message.content,
"name": message.name,
}
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
def _convert_dict_to_message(_dict: Mapping[str, Any]) -> AIMessage:
content = _dict.get("result", "") or ""
if _dict.get("function_call"):
additional_kwargs = {"function_call": dict(_dict["function_call"])}
if "thoughts" in additional_kwargs["function_call"]:
# align to api sample, which affects the llm function_call output
additional_kwargs["function_call"].pop("thoughts")
else:
additional_kwargs = {}
return AIMessage(
content=content,
additional_kwargs={**_dict.get("body", {}), **additional_kwargs},
)
class QianfanChatEndpoint(BaseChatModel):
"""Baidu Qianfan chat models.
To use, you should have the ``qianfan`` python package installed, and
the environment variable ``qianfan_ak`` and ``qianfan_sk`` set with your
API key and Secret Key.
ak, sk are required parameters
which you could get from https://cloud.baidu.com/product/wenxinworkshop
Example:
.. code-block:: python
from langchain_community.chat_models import QianfanChatEndpoint
qianfan_chat = QianfanChatEndpoint(model="ERNIE-Bot",
endpoint="your_endpoint", qianfan_ak="your_ak", qianfan_sk="your_sk")
"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
client: Any
qianfan_ak: Optional[SecretStr] = None
qianfan_sk: Optional[SecretStr] = None
streaming: Optional[bool] = False
"""Whether to stream the results or not."""
request_timeout: Optional[int] = 60
"""request timeout for chat http requests"""
top_p: Optional[float] = 0.8
temperature: Optional[float] = 0.95
penalty_score: Optional[float] = 1
"""Model params, only supported in ERNIE-Bot and ERNIE-Bot-turbo.
In the case of other model, passing these params will not affect the result.
"""
model: str = "ERNIE-Bot-turbo"
"""Model name.
you could get from https://cloud.baidu.com/doc/WENXINWORKSHOP/s/Nlks5zkzu
preset models are mapping to an endpoint.
`model` will be ignored if `endpoint` is set.
Default is ERNIE-Bot-turbo.
"""
endpoint: Optional[str] = None
"""Endpoint of the Qianfan LLM, required if custom model used."""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
values["qianfan_ak"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"qianfan_ak",
"QIANFAN_AK",
default="",
)
)
values["qianfan_sk"] = convert_to_secret_str(
get_from_dict_or_env(
values,
"qianfan_sk",
"QIANFAN_SK",
default="",
)
)
params = {
"model": values["model"],
"stream": values["streaming"],
}
if values["qianfan_ak"].get_secret_value() != "":
params["ak"] = values["qianfan_ak"].get_secret_value()
if values["qianfan_sk"].get_secret_value() != "":
params["sk"] = values["qianfan_sk"].get_secret_value()
if values["endpoint"] is not None and values["endpoint"] != "":
params["endpoint"] = values["endpoint"]
try:
import qianfan
values["client"] = qianfan.ChatCompletion(**params)
except ImportError:
raise ValueError(
"qianfan package not found, please install it with "
"`pip install qianfan`"
)
return values
@property
def _identifying_params(self) -> Dict[str, Any]:
return {
**{"endpoint": self.endpoint, "model": self.model},
**super()._identifying_params,
}
@property
def _llm_type(self) -> str:
"""Return type of chat_model."""
return "baidu-qianfan-chat"
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Qianfan API."""
normal_params = {
"model": self.model,
"endpoint": self.endpoint,
"stream": self.streaming,
"request_timeout": self.request_timeout,
"top_p": self.top_p,
"temperature": self.temperature,
"penalty_score": self.penalty_score,
}
return {**normal_params, **self.model_kwargs}
def _convert_prompt_msg_params(
self,
messages: List[BaseMessage],
**kwargs: Any,
) -> Dict[str, Any]:
"""
Converts a list of messages into a dictionary containing the message content
and default parameters.
Args:
messages (List[BaseMessage]): The list of messages.
**kwargs (Any): Optional arguments to add additional parameters to the
resulting dictionary.
Returns:
Dict[str, Any]: A dictionary containing the message content and default
parameters.
"""
messages_dict: Dict[str, Any] = {
"messages": [
convert_message_to_dict(m)
for m in messages
if not isinstance(m, SystemMessage)
]
}
for i in [i for i, m in enumerate(messages) if isinstance(m, SystemMessage)]:
if "system" not in messages_dict:
messages_dict["system"] = ""
messages_dict["system"] += cast(str, messages[i].content) + "\n"
return {
**messages_dict,
**self._default_params,
**kwargs,
}
def _generate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
"""Call out to an qianfan models endpoint for each generation with a prompt.
Args:
messages: The messages to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = qianfan_model("Tell me a joke.")
"""
if self.streaming:
completion = ""
for chunk in self._stream(messages, stop, run_manager, **kwargs):
completion += chunk.text
lc_msg = AIMessage(content=completion, additional_kwargs={})
gen = ChatGeneration(
message=lc_msg,
generation_info=dict(finish_reason="stop"),
)
return ChatResult(
generations=[gen],
llm_output={"token_usage": {}, "model_name": self.model},
)
params = self._convert_prompt_msg_params(messages, **kwargs)
response_payload = self.client.do(**params)
lc_msg = _convert_dict_to_message(response_payload)
gen = ChatGeneration(
message=lc_msg,
generation_info={
"finish_reason": "stop",
**response_payload.get("body", {}),
},
)
token_usage = response_payload.get("usage", {})
llm_output = {"token_usage": token_usage, "model_name": self.model}
return ChatResult(generations=[gen], llm_output=llm_output)
async def _agenerate(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
if self.streaming:
completion = ""
token_usage = {}
async for chunk in self._astream(messages, stop, run_manager, **kwargs):
completion += chunk.text
lc_msg = AIMessage(content=completion, additional_kwargs={})
gen = ChatGeneration(
message=lc_msg,
generation_info=dict(finish_reason="stop"),
)
return ChatResult(
generations=[gen],
llm_output={"token_usage": {}, "model_name": self.model},
)
params = self._convert_prompt_msg_params(messages, **kwargs)
response_payload = await self.client.ado(**params)
lc_msg = _convert_dict_to_message(response_payload)
generations = []
gen = ChatGeneration(
message=lc_msg,
generation_info={
"finish_reason": "stop",
**response_payload.get("body", {}),
},
)
generations.append(gen)
token_usage = response_payload.get("usage", {})
llm_output = {"token_usage": token_usage, "model_name": self.model}
return ChatResult(generations=generations, llm_output=llm_output)
def _stream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
params = self._convert_prompt_msg_params(messages, **kwargs)
for res in self.client.do(**params):
if res:
msg = _convert_dict_to_message(res)
chunk = ChatGenerationChunk(
text=res["result"],
message=AIMessageChunk(
content=msg.content,
role="assistant",
additional_kwargs=msg.additional_kwargs,
),
)
yield chunk
if run_manager:
run_manager.on_llm_new_token(chunk.text, chunk=chunk)
async def _astream(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> AsyncIterator[ChatGenerationChunk]:
params = self._convert_prompt_msg_params(messages, **kwargs)
async for res in await self.client.ado(**params):
if res:
msg = _convert_dict_to_message(res)
chunk = ChatGenerationChunk(
text=res["result"],
message=AIMessageChunk(
content=msg.content,
role="assistant",
additional_kwargs=msg.additional_kwargs,
),
)
yield chunk
if run_manager:
await run_manager.on_llm_new_token(chunk.text, chunk=chunk)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~embeddings~fastembed.py | from typing import Any, Dict, List, Literal, Optional
import numpy as np
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, root_validator
class FastEmbedEmbeddings(BaseModel, Embeddings):
"""Qdrant FastEmbedding models.
FastEmbed is a lightweight, fast, Python library built for embedding generation.
See more documentation at:
* https://github.com/qdrant/fastembed/
* https://qdrant.github.io/fastembed/
To use this class, you must install the `fastembed` Python package.
`pip install fastembed`
Example:
from langchain_community.embeddings import FastEmbedEmbeddings
fastembed = FastEmbedEmbeddings()
"""
model_name: str = "BAAI/bge-small-en-v1.5"
"""Name of the FastEmbedding model to use
Defaults to "BAAI/bge-small-en-v1.5"
Find the list of supported models at
https://qdrant.github.io/fastembed/examples/Supported_Models/
"""
max_length: int = 512
"""The maximum number of tokens. Defaults to 512.
Unknown behavior for values > 512.
"""
cache_dir: Optional[str]
"""The path to the cache directory.
Defaults to `local_cache` in the parent directory
"""
threads: Optional[int]
"""The number of threads single onnxruntime session can use.
Defaults to None
"""
doc_embed_type: Literal["default", "passage"] = "default"
"""Type of embedding to use for documents
"default": Uses FastEmbed's default embedding method
"passage": Prefixes the text with "passage" before embedding.
"""
_model: Any # : :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that FastEmbed has been installed."""
try:
from fastembed.embedding import FlagEmbedding
model_name = values.get("model_name")
max_length = values.get("max_length")
cache_dir = values.get("cache_dir")
threads = values.get("threads")
values["_model"] = FlagEmbedding(
model_name=model_name,
max_length=max_length,
cache_dir=cache_dir,
threads=threads,
)
except ImportError as ie:
raise ImportError(
"Could not import 'fastembed' Python package. "
"Please install it with `pip install fastembed`."
) from ie
return values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Generate embeddings for documents using FastEmbed.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings: List[np.ndarray]
if self.doc_embed_type == "passage":
embeddings = self._model.passage_embed(texts)
else:
embeddings = self._model.embed(texts)
return [e.tolist() for e in embeddings]
def embed_query(self, text: str) -> List[float]:
"""Generate query embeddings using FastEmbed.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
query_embeddings: np.ndarray = next(self._model.query_embed(text))
return query_embeddings.tolist()
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~agents~output_parsers~self_ask.py | from typing import Sequence, Union
from libs.core.langchain_core.agents import AgentAction, AgentFinish
from libs.core.langchain_core.exceptions import OutputParserException
from langchain.agents.agent import AgentOutputParser
class SelfAskOutputParser(AgentOutputParser):
"""Parses self-ask style LLM calls.
Expects output to be in one of two formats.
If the output signals that an action should be taken,
should be in the below format. This will result in an AgentAction
being returned.
```
Thoughts go here...
Follow up: what is the temperature in SF?
```
If the output signals that a final answer should be given,
should be in the below format. This will result in an AgentFinish
being returned.
```
Thoughts go here...
So the final answer is: The temperature is 100 degrees
```
"""
followups: Sequence[str] = ("Follow up:", "Followup:")
finish_string: str = "So the final answer is: "
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
last_line = text.split("\n")[-1]
if not any([follow in last_line for follow in self.followups]):
if self.finish_string not in last_line:
raise OutputParserException(f"Could not parse output: {text}")
return AgentFinish({"output": last_line[len(self.finish_string) :]}, text)
after_colon = text.split(":")[-1].strip()
return AgentAction("Intermediate Answer", after_colon, text)
@property
def _type(self) -> str:
return "self_ask"
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~self_hosted_hugging_face.py | import importlib.util
import logging
from typing import Any, Callable, List, Mapping, Optional
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.pydantic_v1 import Extra
from langchain_community.llms.self_hosted import SelfHostedPipeline
from langchain_community.llms.utils import enforce_stop_tokens
DEFAULT_MODEL_ID = "gpt2"
DEFAULT_TASK = "text-generation"
VALID_TASKS = ("text2text-generation", "text-generation", "summarization")
logger = logging.getLogger(__name__)
def _generate_text(
pipeline: Any,
prompt: str,
*args: Any,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> str:
"""Inference function to send to the remote hardware.
Accepts a Hugging Face pipeline (or more likely,
a key pointing to such a pipeline on the cluster's object store)
and returns generated text.
"""
response = pipeline(prompt, *args, **kwargs)
if pipeline.task == "text-generation":
# Text generation return includes the starter text.
text = response[0]["generated_text"][len(prompt) :]
elif pipeline.task == "text2text-generation":
text = response[0]["generated_text"]
elif pipeline.task == "summarization":
text = response[0]["summary_text"]
else:
raise ValueError(
f"Got invalid task {pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
def _load_transformer(
model_id: str = DEFAULT_MODEL_ID,
task: str = DEFAULT_TASK,
device: int = 0,
model_kwargs: Optional[dict] = None,
) -> Any:
"""Inference function to send to the remote hardware.
Accepts a huggingface model_id and returns a pipeline for the task.
"""
from transformers import AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer
from transformers import pipeline as hf_pipeline
_model_kwargs = model_kwargs or {}
tokenizer = AutoTokenizer.from_pretrained(model_id, **_model_kwargs)
try:
if task == "text-generation":
model = AutoModelForCausalLM.from_pretrained(model_id, **_model_kwargs)
elif task in ("text2text-generation", "summarization"):
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, **_model_kwargs)
else:
raise ValueError(
f"Got invalid task {task}, "
f"currently only {VALID_TASKS} are supported"
)
except ImportError as e:
raise ValueError(
f"Could not load the {task} model due to missing dependencies."
) from e
if importlib.util.find_spec("torch") is not None:
import torch
cuda_device_count = torch.cuda.device_count()
if device < -1 or (device >= cuda_device_count):
raise ValueError(
f"Got device=={device}, "
f"device is required to be within [-1, {cuda_device_count})"
)
if device < 0 and cuda_device_count > 0:
logger.warning(
"Device has %d GPUs available. "
"Provide device={deviceId} to `from_model_id` to use available"
"GPUs for execution. deviceId is -1 for CPU and "
"can be a positive integer associated with CUDA device id.",
cuda_device_count,
)
pipeline = hf_pipeline(
task=task,
model=model,
tokenizer=tokenizer,
device=device,
model_kwargs=_model_kwargs,
)
if pipeline.task not in VALID_TASKS:
raise ValueError(
f"Got invalid task {pipeline.task}, "
f"currently only {VALID_TASKS} are supported"
)
return pipeline
class SelfHostedHuggingFaceLLM(SelfHostedPipeline):
"""HuggingFace Pipeline API to run on self-hosted remote hardware.
Supported hardware includes auto-launched instances on AWS, GCP, Azure,
and Lambda, as well as servers specified
by IP address and SSH credentials (such as on-prem, or another cloud
like Paperspace, Coreweave, etc.).
To use, you should have the ``runhouse`` python package installed.
Only supports `text-generation`, `text2text-generation` and `summarization` for now.
Example using from_model_id:
.. code-block:: python
from langchain_community.llms import SelfHostedHuggingFaceLLM
import runhouse as rh
gpu = rh.cluster(name="rh-a10x", instance_type="A100:1")
hf = SelfHostedHuggingFaceLLM(
model_id="google/flan-t5-large", task="text2text-generation",
hardware=gpu
)
Example passing fn that generates a pipeline (bc the pipeline is not serializable):
.. code-block:: python
from langchain_community.llms import SelfHostedHuggingFaceLLM
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import runhouse as rh
def get_pipeline():
model_id = "gpt2"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(model_id)
pipe = pipeline(
"text-generation", model=model, tokenizer=tokenizer
)
return pipe
hf = SelfHostedHuggingFaceLLM(
model_load_fn=get_pipeline, model_id="gpt2", hardware=gpu)
"""
model_id: str = DEFAULT_MODEL_ID
"""Hugging Face model_id to load the model."""
task: str = DEFAULT_TASK
"""Hugging Face task ("text-generation", "text2text-generation" or
"summarization")."""
device: int = 0
"""Device to use for inference. -1 for CPU, 0 for GPU, 1 for second GPU, etc."""
model_kwargs: Optional[dict] = None
"""Keyword arguments to pass to the model."""
hardware: Any
"""Remote hardware to send the inference function to."""
model_reqs: List[str] = ["./", "transformers", "torch"]
"""Requirements to install on hardware to inference the model."""
model_load_fn: Callable = _load_transformer
"""Function to load the model remotely on the server."""
inference_fn: Callable = _generate_text #: :meta private:
"""Inference function to send to the remote hardware."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def __init__(self, **kwargs: Any):
"""Construct the pipeline remotely using an auxiliary function.
The load function needs to be importable to be imported
and run on the server, i.e. in a module and not a REPL or closure.
Then, initialize the remote inference function.
"""
load_fn_kwargs = {
"model_id": kwargs.get("model_id", DEFAULT_MODEL_ID),
"task": kwargs.get("task", DEFAULT_TASK),
"device": kwargs.get("device", 0),
"model_kwargs": kwargs.get("model_kwargs", None),
}
super().__init__(load_fn_kwargs=load_fn_kwargs, **kwargs)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_id": self.model_id},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
return "selfhosted_huggingface_pipeline"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
return self.client(
pipeline=self.pipeline_ref, prompt=prompt, stop=stop, **kwargs
)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~cohere.py | from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional
from libs.core.langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.load.serializable import Serializable
from libs.core.langchain_core.pydantic_v1 import Extra, Field, root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
def _create_retry_decorator(llm: Cohere) -> Callable[[Any], Any]:
import cohere
min_seconds = 4
max_seconds = 10
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(llm.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(retry_if_exception_type(cohere.error.CohereError)),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def completion_with_retry(llm: Cohere, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return llm.client.generate(**kwargs)
return _completion_with_retry(**kwargs)
def acompletion_with_retry(llm: Cohere, **kwargs: Any) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm)
@retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
return await llm.async_client.generate(**kwargs)
return _completion_with_retry(**kwargs)
class BaseCohere(Serializable):
"""Base class for Cohere models."""
client: Any #: :meta private:
async_client: Any #: :meta private:
model: Optional[str] = Field(default=None)
"""Model name to use."""
temperature: float = 0.75
"""A non-negative float that tunes the degree of randomness in generation."""
cohere_api_key: Optional[str] = None
stop: Optional[List[str]] = None
streaming: bool = Field(default=False)
"""Whether to stream the results."""
user_agent: str = "langchain"
"""Identifier for the application making the request."""
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
try:
import cohere
except ImportError:
raise ImportError(
"Could not import cohere python package. "
"Please install it with `pip install cohere`."
)
else:
cohere_api_key = get_from_dict_or_env(
values, "cohere_api_key", "COHERE_API_KEY"
)
client_name = values["user_agent"]
values["client"] = cohere.Client(cohere_api_key, client_name=client_name)
values["async_client"] = cohere.AsyncClient(
cohere_api_key, client_name=client_name
)
return values
class Cohere(LLM, BaseCohere):
"""Cohere large language models.
To use, you should have the ``cohere`` python package installed, and the
environment variable ``COHERE_API_KEY`` set with your API key, or pass
it as a named parameter to the constructor.
Example:
.. code-block:: python
from langchain_community.llms import Cohere
cohere = Cohere(model="gptd-instruct-tft", cohere_api_key="my-api-key")
"""
max_tokens: int = 256
"""Denotes the number of tokens to predict per generation."""
k: int = 0
"""Number of most likely tokens to consider at each step."""
p: int = 1
"""Total probability mass of tokens to consider at each step."""
frequency_penalty: float = 0.0
"""Penalizes repeated tokens according to frequency. Between 0 and 1."""
presence_penalty: float = 0.0
"""Penalizes repeated tokens. Between 0 and 1."""
truncate: Optional[str] = None
"""Specify how the client handles inputs longer than the maximum token
length: Truncate from START, END or NONE"""
max_retries: int = 10
"""Maximum number of retries to make when generating."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling Cohere API."""
return {
"max_tokens": self.max_tokens,
"temperature": self.temperature,
"k": self.k,
"p": self.p,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"truncate": self.truncate,
}
@property
def lc_secrets(self) -> Dict[str, str]:
return {"cohere_api_key": "COHERE_API_KEY"}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "cohere"
def _invocation_params(self, stop: Optional[List[str]], **kwargs: Any) -> dict:
params = self._default_params
if self.stop is not None and stop is not None:
raise ValueError("`stop` found in both the input and default params.")
elif self.stop is not None:
params["stop_sequences"] = self.stop
else:
params["stop_sequences"] = stop
return {**params, **kwargs}
def _process_response(self, response: Any, stop: Optional[List[str]]) -> str:
text = response.generations[0].text
# If stop tokens are provided, Cohere's endpoint returns them.
# In order to make this consistent with other endpoints, we strip them.
if stop:
text = enforce_stop_tokens(text, stop)
return text
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call out to Cohere's generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = cohere("Tell me a joke.")
"""
params = self._invocation_params(stop, **kwargs)
response = completion_with_retry(
self, model=self.model, prompt=prompt, **params
)
_stop = params.get("stop_sequences")
return self._process_response(response, _stop)
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Async call out to Cohere's generate endpoint.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = await cohere("Tell me a joke.")
"""
params = self._invocation_params(stop, **kwargs)
response = await acompletion_with_retry(
self, model=self.model, prompt=prompt, **params
)
_stop = params.get("stop_sequences")
return self._process_response(response, _stop)
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~unit_tests~llms~test_gooseai.py | """Test GooseAI"""
import pytest
from libs.core.langchain_core.pydantic_v1 import SecretStr
from pytest import MonkeyPatch
from langchain_community.llms.gooseai import GooseAI
from langchain_community.utils.openai import is_openai_v1
def _openai_v1_installed() -> bool:
try:
return is_openai_v1()
except Exception as _:
return False
@pytest.mark.requires("openai")
def test_api_key_is_secret_string() -> None:
llm = GooseAI(gooseai_api_key="secret-api-key")
assert isinstance(llm.gooseai_api_key, SecretStr)
assert llm.gooseai_api_key.get_secret_value() == "secret-api-key"
@pytest.mark.skipif(
_openai_v1_installed(), reason="GooseAI currently only works with openai<1"
)
@pytest.mark.requires("openai")
def test_api_key_masked_when_passed_via_constructor() -> None:
llm = GooseAI(gooseai_api_key="secret-api-key")
assert str(llm.gooseai_api_key) == "**********"
assert "secret-api-key" not in repr(llm.gooseai_api_key)
assert "secret-api-key" not in repr(llm)
@pytest.mark.skipif(
_openai_v1_installed(), reason="GooseAI currently only works with openai<1"
)
@pytest.mark.requires("openai")
def test_api_key_masked_when_passed_from_env() -> None:
with MonkeyPatch.context() as mp:
mp.setenv("GOOSEAI_API_KEY", "secret-api-key")
llm = GooseAI()
assert str(llm.gooseai_api_key) == "**********"
assert "secret-api-key" not in repr(llm.gooseai_api_key)
assert "secret-api-key" not in repr(llm)
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~unit_tests~document_loaders~test_github.py | import pytest
from libs.core.langchain_core.documents import Document
from pytest_mock import MockerFixture
from langchain_community.document_loaders.github import GitHubIssuesLoader
def test_initialization() -> None:
loader = GitHubIssuesLoader(repo="repo", access_token="access_token")
assert loader.repo == "repo"
assert loader.access_token == "access_token"
assert loader.headers == {
"Accept": "application/vnd.github+json",
"Authorization": "Bearer access_token",
}
def test_initialization_ghe() -> None:
loader = GitHubIssuesLoader(
repo="repo",
access_token="access_token",
github_api_url="https://github.example.com/api/v3",
)
assert loader.repo == "repo"
assert loader.access_token == "access_token"
assert loader.github_api_url == "https://github.example.com/api/v3"
assert loader.headers == {
"Accept": "application/vnd.github+json",
"Authorization": "Bearer access_token",
}
def test_invalid_initialization() -> None:
# Invalid parameter
with pytest.raises(ValueError):
GitHubIssuesLoader(invalid="parameter")
# Invalid value for valid parameter
with pytest.raises(ValueError):
GitHubIssuesLoader(state="invalid_state")
# Invalid type for labels
with pytest.raises(ValueError):
GitHubIssuesLoader(labels="not_a_list")
# Invalid date format for since
with pytest.raises(ValueError):
GitHubIssuesLoader(since="not_a_date")
def test_load(mocker: MockerFixture) -> None:
mocker.patch(
"requests.get", return_value=mocker.MagicMock(json=lambda: [], links=None)
)
loader = GitHubIssuesLoader(repo="repo", access_token="access_token")
documents = loader.load()
assert documents == []
def test_parse_issue() -> None:
issue = {
"html_url": "https://github.com/repo/issue/1",
"title": "Example Issue 1",
"user": {"login": "username1"},
"created_at": "2023-01-01T00:00:00Z",
"comments": 1,
"state": "open",
"labels": [{"name": "bug"}],
"assignee": {"login": "username2"},
"milestone": {"title": "v1.0"},
"locked": "False",
"number": "1",
"body": "This is an example issue 1",
}
expected_document = Document(
page_content=issue["body"], # type: ignore
metadata={
"url": issue["html_url"],
"title": issue["title"],
"creator": issue["user"]["login"], # type: ignore
"created_at": issue["created_at"],
"comments": issue["comments"],
"state": issue["state"],
"labels": [label["name"] for label in issue["labels"]], # type: ignore
"assignee": issue["assignee"]["login"], # type: ignore
"milestone": issue["milestone"]["title"], # type: ignore
"locked": issue["locked"],
"number": issue["number"],
"is_pull_request": False,
},
)
loader = GitHubIssuesLoader(repo="repo", access_token="access_token")
document = loader.parse_issue(issue)
assert document == expected_document
def test_url() -> None:
# No parameters
loader = GitHubIssuesLoader(repo="repo", access_token="access_token")
assert loader.url == "https://api.github.com/repos/repo/issues?"
# parameters: state, sort
loader = GitHubIssuesLoader(
repo="repo", access_token="access_token", state="open", sort="created"
)
assert (
loader.url == "https://api.github.com/repos/repo/issues?state=open&sort=created"
)
# parameters: milestone, state, assignee, creator, mentioned, labels, sort,
# direction, since
loader = GitHubIssuesLoader(
repo="repo",
access_token="access_token",
milestone="*",
state="closed",
assignee="user1",
creator="user2",
mentioned="user3",
labels=["bug", "ui", "@high"],
sort="comments",
direction="asc",
since="2023-05-26T00:00:00Z",
)
assert loader.url == (
"https://api.github.com/repos/repo/issues?milestone=*&state=closed"
"&assignee=user1&creator=user2&mentioned=user3&labels=bug,ui,@high"
"&sort=comments&direction=asc&since=2023-05-26T00:00:00Z"
)
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~unit_tests~chat_models~test_javelin_ai_gateway.py | """Test `Javelin AI Gateway` chat models"""
import pytest
from libs.core.langchain_core.pydantic_v1 import SecretStr
from langchain_community.chat_models import ChatJavelinAIGateway
@pytest.mark.requires("javelin_sdk")
def test_api_key_is_secret_string() -> None:
llm = ChatJavelinAIGateway(
gateway_uri="<javelin-ai-gateway-uri>",
route="<javelin-ai-gateway-chat-route>",
javelin_api_key="secret-api-key",
params={"temperature": 0.1},
)
assert isinstance(llm.javelin_api_key, SecretStr)
assert llm.javelin_api_key.get_secret_value() == "secret-api-key"
@pytest.mark.requires("javelin_sdk")
def test_api_key_masked_when_passed_via_constructor() -> None:
llm = ChatJavelinAIGateway(
gateway_uri="<javelin-ai-gateway-uri>",
route="<javelin-ai-gateway-chat-route>",
javelin_api_key="secret-api-key",
params={"temperature": 0.1},
)
assert str(llm.javelin_api_key) == "**********"
assert "secret-api-key" not in repr(llm.javelin_api_key)
assert "secret-api-key" not in repr(llm)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~azure_cognitive_services~speech2text.py | from __future__ import annotations
import logging
import time
from typing import Any, Dict, Optional
from libs.core.langchain_core.callbacks import CallbackManagerForToolRun
from libs.core.langchain_core.pydantic_v1 import root_validator
from libs.core.langchain_core.tools import BaseTool
from libs.core.langchain_core.utils import get_from_dict_or_env
from langchain_community.tools.azure_cognitive_services.utils import (
detect_file_src_type,
download_audio_from_url,
)
logger = logging.getLogger(__name__)
class AzureCogsSpeech2TextTool(BaseTool):
"""Tool that queries the Azure Cognitive Services Speech2Text API.
In order to set this up, follow instructions at:
https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/get-started-speech-to-text?pivots=programming-language-python
"""
azure_cogs_key: str = "" #: :meta private:
azure_cogs_region: str = "" #: :meta private:
speech_language: str = "en-US" #: :meta private:
speech_config: Any #: :meta private:
name: str = "azure_cognitive_services_speech2text"
description: str = (
"A wrapper around Azure Cognitive Services Speech2Text. "
"Useful for when you need to transcribe audio to text. "
"Input should be a url to an audio file."
)
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and endpoint exists in environment."""
azure_cogs_key = get_from_dict_or_env(
values, "azure_cogs_key", "AZURE_COGS_KEY"
)
azure_cogs_region = get_from_dict_or_env(
values, "azure_cogs_region", "AZURE_COGS_REGION"
)
try:
import azure.cognitiveservices.speech as speechsdk
values["speech_config"] = speechsdk.SpeechConfig(
subscription=azure_cogs_key, region=azure_cogs_region
)
except ImportError:
raise ImportError(
"azure-cognitiveservices-speech is not installed. "
"Run `pip install azure-cognitiveservices-speech` to install."
)
return values
def _continuous_recognize(self, speech_recognizer: Any) -> str:
done = False
text = ""
def stop_cb(evt: Any) -> None:
"""callback that stop continuous recognition"""
speech_recognizer.stop_continuous_recognition_async()
nonlocal done
done = True
def retrieve_cb(evt: Any) -> None:
"""callback that retrieves the intermediate recognition results"""
nonlocal text
text += evt.result.text
# retrieve text on recognized events
speech_recognizer.recognized.connect(retrieve_cb)
# stop continuous recognition on either session stopped or canceled events
speech_recognizer.session_stopped.connect(stop_cb)
speech_recognizer.canceled.connect(stop_cb)
# Start continuous speech recognition
speech_recognizer.start_continuous_recognition_async()
while not done:
time.sleep(0.5)
return text
def _speech2text(self, audio_path: str, speech_language: str) -> str:
try:
import azure.cognitiveservices.speech as speechsdk
except ImportError:
pass
audio_src_type = detect_file_src_type(audio_path)
if audio_src_type == "local":
audio_config = speechsdk.AudioConfig(filename=audio_path)
elif audio_src_type == "remote":
tmp_audio_path = download_audio_from_url(audio_path)
audio_config = speechsdk.AudioConfig(filename=tmp_audio_path)
else:
raise ValueError(f"Invalid audio path: {audio_path}")
self.speech_config.speech_recognition_language = speech_language
speech_recognizer = speechsdk.SpeechRecognizer(self.speech_config, audio_config)
return self._continuous_recognize(speech_recognizer)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool."""
try:
text = self._speech2text(query, self.speech_language)
return text
except Exception as e:
raise RuntimeError(f"Error while running AzureCogsSpeech2TextTool: {e}")
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~utilities~twilio.py | """Util that calls Twilio."""
from typing import Any, Dict, Optional
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env
class TwilioAPIWrapper(BaseModel):
"""Messaging Client using Twilio.
To use, you should have the ``twilio`` python package installed,
and the environment variables ``TWILIO_ACCOUNT_SID``, ``TWILIO_AUTH_TOKEN``, and
``TWILIO_FROM_NUMBER``, or pass `account_sid`, `auth_token`, and `from_number` as
named parameters to the constructor.
Example:
.. code-block:: python
from langchain_community.utilities.twilio import TwilioAPIWrapper
twilio = TwilioAPIWrapper(
account_sid="ACxxx",
auth_token="xxx",
from_number="+10123456789"
)
twilio.run('test', '+12484345508')
"""
client: Any #: :meta private:
account_sid: Optional[str] = None
"""Twilio account string identifier."""
auth_token: Optional[str] = None
"""Twilio auth token."""
from_number: Optional[str] = None
"""A Twilio phone number in [E.164](https://www.twilio.com/docs/glossary/what-e164)
format, an
[alphanumeric sender ID](https://www.twilio.com/docs/sms/send-messages#use-an-alphanumeric-sender-id),
or a [Channel Endpoint address](https://www.twilio.com/docs/sms/channels#channel-addresses)
that is enabled for the type of message you want to send. Phone numbers or
[short codes](https://www.twilio.com/docs/sms/api/short-code) purchased from
Twilio also work here. You cannot, for example, spoof messages from a private
cell phone number. If you are using `messaging_service_sid`, this parameter
must be empty.
""" # noqa: E501
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = False
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
try:
from twilio.rest import Client
except ImportError:
raise ImportError(
"Could not import twilio python package. "
"Please install it with `pip install twilio`."
)
account_sid = get_from_dict_or_env(values, "account_sid", "TWILIO_ACCOUNT_SID")
auth_token = get_from_dict_or_env(values, "auth_token", "TWILIO_AUTH_TOKEN")
values["from_number"] = get_from_dict_or_env(
values, "from_number", "TWILIO_FROM_NUMBER"
)
values["client"] = Client(account_sid, auth_token)
return values
def run(self, body: str, to: str) -> str:
"""Run body through Twilio and respond with message sid.
Args:
body: The text of the message you want to send. Can be up to 1,600
characters in length.
to: The destination phone number in
[E.164](https://www.twilio.com/docs/glossary/what-e164) format for
SMS/MMS or
[Channel user address](https://www.twilio.com/docs/sms/channels#channel-addresses)
for other 3rd-party channels.
""" # noqa: E501
message = self.client.messages.create(to, from_=self.from_number, body=body)
return message.sid
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~s3_directory.py | from __future__ import annotations
from typing import TYPE_CHECKING, List, Optional, Union
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.document_loaders.s3_file import S3FileLoader
if TYPE_CHECKING:
import botocore
class S3DirectoryLoader(BaseLoader):
"""Load from `Amazon AWS S3` directory."""
def __init__(
self,
bucket: str,
prefix: str = "",
*,
region_name: Optional[str] = None,
api_version: Optional[str] = None,
use_ssl: Optional[bool] = True,
verify: Union[str, bool, None] = None,
endpoint_url: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
boto_config: Optional[botocore.client.Config] = None,
):
"""Initialize with bucket and key name.
:param bucket: The name of the S3 bucket.
:param prefix: The prefix of the S3 key. Defaults to "".
:param region_name: The name of the region associated with the client.
A client is associated with a single region.
:param api_version: The API version to use. By default, botocore will
use the latest API version when creating a client. You only need
to specify this parameter if you want to use a previous API version
of the client.
:param use_ssl: Whether to use SSL. By default, SSL is used.
Note that not all services support non-ssl connections.
:param verify: Whether to verify SSL certificates.
By default SSL certificates are verified. You can provide the
following values:
* False - do not validate SSL certificates. SSL will still be
used (unless use_ssl is False), but SSL certificates
will not be verified.
* path/to/cert/bundle.pem - A filename of the CA cert bundle to
uses. You can specify this argument if you want to use a
different CA cert bundle than the one used by botocore.
:param endpoint_url: The complete URL to use for the constructed
client. Normally, botocore will automatically construct the
appropriate URL to use when communicating with a service. You can
specify a complete URL (including the "http/https" scheme) to
override this behavior. If this value is provided, then
``use_ssl`` is ignored.
:param aws_access_key_id: The access key to use when creating
the client. This is entirely optional, and if not provided,
the credentials configured for the session will automatically
be used. You only need to provide this argument if you want
to override the credentials used for this specific client.
:param aws_secret_access_key: The secret key to use when creating
the client. Same semantics as aws_access_key_id above.
:param aws_session_token: The session token to use when creating
the client. Same semantics as aws_access_key_id above.
:type boto_config: botocore.client.Config
:param boto_config: Advanced boto3 client configuration options. If a value
is specified in the client config, its value will take precedence
over environment variables and configuration values, but not over
a value passed explicitly to the method. If a default config
object is set on the session, the config object used when creating
the client will be the result of calling ``merge()`` on the
default config with the config provided to this call.
"""
self.bucket = bucket
self.prefix = prefix
self.region_name = region_name
self.api_version = api_version
self.use_ssl = use_ssl
self.verify = verify
self.endpoint_url = endpoint_url
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.aws_session_token = aws_session_token
self.boto_config = boto_config
def load(self) -> List[Document]:
"""Load documents."""
try:
import boto3
except ImportError:
raise ImportError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
s3 = boto3.resource(
"s3",
region_name=self.region_name,
api_version=self.api_version,
use_ssl=self.use_ssl,
verify=self.verify,
endpoint_url=self.endpoint_url,
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
aws_session_token=self.aws_session_token,
config=self.boto_config,
)
bucket = s3.Bucket(self.bucket)
docs = []
for obj in bucket.objects.filter(Prefix=self.prefix):
loader = S3FileLoader(
self.bucket,
obj.key,
region_name=self.region_name,
api_version=self.api_version,
use_ssl=self.use_ssl,
verify=self.verify,
endpoint_url=self.endpoint_url,
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
aws_session_token=self.aws_session_token,
boto_config=self.boto_config,
)
docs.extend(loader.load())
return docs
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~airbyte.py | from typing import Any, Callable, Iterator, List, Mapping, Optional
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.utils.utils import guard_import
from langchain_community.document_loaders.base import BaseLoader
RecordHandler = Callable[[Any, Optional[str]], Document]
class AirbyteCDKLoader(BaseLoader):
"""Load with an `Airbyte` source connector implemented using the `CDK`."""
def __init__(
self,
config: Mapping[str, Any],
source_class: Any,
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
"""Initializes the loader.
Args:
config: The config to pass to the source connector.
source_class: The source connector class.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
from airbyte_cdk.models.airbyte_protocol import AirbyteRecordMessage
from airbyte_cdk.sources.embedded.base_integration import (
BaseEmbeddedIntegration,
)
from airbyte_cdk.sources.embedded.runner import CDKRunner
class CDKIntegration(BaseEmbeddedIntegration):
"""A wrapper around the CDK integration."""
def _handle_record(
self, record: AirbyteRecordMessage, id: Optional[str]
) -> Document:
if record_handler:
return record_handler(record, id)
return Document(page_content="", metadata=record.data)
self._integration = CDKIntegration(
config=config,
runner=CDKRunner(source=source_class(), name=source_class.__name__),
)
self._stream_name = stream_name
self._state = state
def load(self) -> List[Document]:
return list(self.lazy_load())
def lazy_load(self) -> Iterator[Document]:
return self._integration._load_data(
stream_name=self._stream_name, state=self._state
)
@property
def last_state(self) -> Any:
return self._integration.last_state
class AirbyteHubspotLoader(AirbyteCDKLoader):
"""Load from `Hubspot` using an `Airbyte` source connector."""
def __init__(
self,
config: Mapping[str, Any],
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
"""Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import(
"source_hubspot", pip_name="airbyte-source-hubspot"
).SourceHubspot
super().__init__(
config=config,
source_class=source_class,
stream_name=stream_name,
record_handler=record_handler,
state=state,
)
class AirbyteStripeLoader(AirbyteCDKLoader):
"""Load from `Stripe` using an `Airbyte` source connector."""
def __init__(
self,
config: Mapping[str, Any],
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
"""Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import(
"source_stripe", pip_name="airbyte-source-stripe"
).SourceStripe
super().__init__(
config=config,
source_class=source_class,
stream_name=stream_name,
record_handler=record_handler,
state=state,
)
class AirbyteTypeformLoader(AirbyteCDKLoader):
"""Load from `Typeform` using an `Airbyte` source connector."""
def __init__(
self,
config: Mapping[str, Any],
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
"""Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import(
"source_typeform", pip_name="airbyte-source-typeform"
).SourceTypeform
super().__init__(
config=config,
source_class=source_class,
stream_name=stream_name,
record_handler=record_handler,
state=state,
)
class AirbyteZendeskSupportLoader(AirbyteCDKLoader):
"""Load from `Zendesk Support` using an `Airbyte` source connector."""
def __init__(
self,
config: Mapping[str, Any],
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
"""Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import(
"source_zendesk_support", pip_name="airbyte-source-zendesk-support"
).SourceZendeskSupport
super().__init__(
config=config,
source_class=source_class,
stream_name=stream_name,
record_handler=record_handler,
state=state,
)
class AirbyteShopifyLoader(AirbyteCDKLoader):
"""Load from `Shopify` using an `Airbyte` source connector."""
def __init__(
self,
config: Mapping[str, Any],
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
"""Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import(
"source_shopify", pip_name="airbyte-source-shopify"
).SourceShopify
super().__init__(
config=config,
source_class=source_class,
stream_name=stream_name,
record_handler=record_handler,
state=state,
)
class AirbyteSalesforceLoader(AirbyteCDKLoader):
"""Load from `Salesforce` using an `Airbyte` source connector."""
def __init__(
self,
config: Mapping[str, Any],
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
"""Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import(
"source_salesforce", pip_name="airbyte-source-salesforce"
).SourceSalesforce
super().__init__(
config=config,
source_class=source_class,
stream_name=stream_name,
record_handler=record_handler,
state=state,
)
class AirbyteGongLoader(AirbyteCDKLoader):
"""Load from `Gong` using an `Airbyte` source connector."""
def __init__(
self,
config: Mapping[str, Any],
stream_name: str,
record_handler: Optional[RecordHandler] = None,
state: Optional[Any] = None,
) -> None:
"""Initializes the loader.
Args:
config: The config to pass to the source connector.
stream_name: The name of the stream to load.
record_handler: A function that takes in a record and an optional id and
returns a Document. If None, the record will be used as the document.
Defaults to None.
state: The state to pass to the source connector. Defaults to None.
"""
source_class = guard_import(
"source_gong", pip_name="airbyte-source-gong"
).SourceGong
super().__init__(
config=config,
source_class=source_class,
stream_name=stream_name,
record_handler=record_handler,
state=state,
)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~document_loaders~conllu.py | import csv
from typing import List
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
class CoNLLULoader(BaseLoader):
"""Load `CoNLL-U` files."""
def __init__(self, file_path: str):
"""Initialize with a file path."""
self.file_path = file_path
def load(self) -> List[Document]:
"""Load from a file path."""
with open(self.file_path, encoding="utf8") as f:
tsv = list(csv.reader(f, delimiter="\t"))
# If len(line) > 1, the line is not a comment
lines = [line for line in tsv if len(line) > 1]
text = ""
for i, line in enumerate(lines):
# Do not add a space after a punctuation mark or at the end of the sentence
if line[9] == "SpaceAfter=No" or i == len(lines) - 1:
text += line[1]
else:
text += line[1] + " "
metadata = {"source": self.file_path}
return [Document(page_content=text, metadata=metadata)]
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~retrievers~web_research.py | import logging
import re
from typing import List, Optional
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.prompts import BasePromptTemplate, PromptTemplate
from libs.core.langchain_core.pydantic_v1 import BaseModel, Field
from libs.core.langchain_core.retrievers import BaseRetriever
from libs.core.langchain_core.vectorstores import VectorStore
from langchain.callbacks.manager import (
AsyncCallbackManagerForRetrieverRun,
CallbackManagerForRetrieverRun,
)
from langchain.chains import LLMChain
from langchain.chains.prompt_selector import ConditionalPromptSelector
from langchain.document_loaders import AsyncHtmlLoader
from langchain.document_transformers import Html2TextTransformer
from langchain.llms import LlamaCpp
from langchain.llms.base import BaseLLM
from langchain.output_parsers.pydantic import PydanticOutputParser
from langchain.text_splitter import RecursiveCharacterTextSplitter, TextSplitter
from langchain.utilities import GoogleSearchAPIWrapper
logger = logging.getLogger(__name__)
class SearchQueries(BaseModel):
"""Search queries to research for the user's goal."""
queries: List[str] = Field(
..., description="List of search queries to look up on Google"
)
DEFAULT_LLAMA_SEARCH_PROMPT = PromptTemplate(
input_variables=["question"],
template="""<<SYS>> \n You are an assistant tasked with improving Google search \
results. \n <</SYS>> \n\n [INST] Generate THREE Google search queries that \
are similar to this question. The output should be a numbered list of questions \
and each should have a question mark at the end: \n\n {question} [/INST]""",
)
DEFAULT_SEARCH_PROMPT = PromptTemplate(
input_variables=["question"],
template="""You are an assistant tasked with improving Google search \
results. Generate THREE Google search queries that are similar to \
this question. The output should be a numbered list of questions and each \
should have a question mark at the end: {question}""",
)
class LineList(BaseModel):
"""List of questions."""
lines: List[str] = Field(description="Questions")
class QuestionListOutputParser(PydanticOutputParser):
"""Output parser for a list of numbered questions."""
def __init__(self) -> None:
super().__init__(pydantic_object=LineList)
def parse(self, text: str) -> LineList:
lines = re.findall(r"\d+\..*?(?:\n|$)", text)
return LineList(lines=lines)
class WebResearchRetriever(BaseRetriever):
"""`Google Search API` retriever."""
# Inputs
vectorstore: VectorStore = Field(
..., description="Vector store for storing web pages"
)
llm_chain: LLMChain
search: GoogleSearchAPIWrapper = Field(..., description="Google Search API Wrapper")
num_search_results: int = Field(1, description="Number of pages per Google search")
text_splitter: TextSplitter = Field(
RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=50),
description="Text splitter for splitting web pages into chunks",
)
url_database: List[str] = Field(
default_factory=list, description="List of processed URLs"
)
@classmethod
def from_llm(
cls,
vectorstore: VectorStore,
llm: BaseLLM,
search: GoogleSearchAPIWrapper,
prompt: Optional[BasePromptTemplate] = None,
num_search_results: int = 1,
text_splitter: RecursiveCharacterTextSplitter = RecursiveCharacterTextSplitter(
chunk_size=1500, chunk_overlap=150
),
) -> "WebResearchRetriever":
"""Initialize from llm using default template.
Args:
vectorstore: Vector store for storing web pages
llm: llm for search question generation
search: GoogleSearchAPIWrapper
prompt: prompt to generating search questions
num_search_results: Number of pages per Google search
text_splitter: Text splitter for splitting web pages into chunks
Returns:
WebResearchRetriever
"""
if not prompt:
QUESTION_PROMPT_SELECTOR = ConditionalPromptSelector(
default_prompt=DEFAULT_SEARCH_PROMPT,
conditionals=[
(lambda llm: isinstance(llm, LlamaCpp), DEFAULT_LLAMA_SEARCH_PROMPT)
],
)
prompt = QUESTION_PROMPT_SELECTOR.get_prompt(llm)
# Use chat model prompt
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
output_parser=QuestionListOutputParser(),
)
return cls(
vectorstore=vectorstore,
llm_chain=llm_chain,
search=search,
num_search_results=num_search_results,
text_splitter=text_splitter,
)
def clean_search_query(self, query: str) -> str:
# Some search tools (e.g., Google) will
# fail to return results if query has a
# leading digit: 1. "LangCh..."
# Check if the first character is a digit
if query[0].isdigit():
# Find the position of the first quote
first_quote_pos = query.find('"')
if first_quote_pos != -1:
# Extract the part of the string after the quote
query = query[first_quote_pos + 1 :]
# Remove the trailing quote if present
if query.endswith('"'):
query = query[:-1]
return query.strip()
def search_tool(self, query: str, num_search_results: int = 1) -> List[dict]:
"""Returns num_search_results pages per Google search."""
query_clean = self.clean_search_query(query)
result = self.search.results(query_clean, num_search_results)
return result
def _get_relevant_documents(
self,
query: str,
*,
run_manager: CallbackManagerForRetrieverRun,
) -> List[Document]:
"""Search Google for documents related to the query input.
Args:
query: user query
Returns:
Relevant documents from all various urls.
"""
# Get search questions
logger.info("Generating questions for Google Search ...")
result = self.llm_chain({"question": query})
logger.info(f"Questions for Google Search (raw): {result}")
questions = getattr(result["text"], "lines", [])
logger.info(f"Questions for Google Search: {questions}")
# Get urls
logger.info("Searching for relevant urls...")
urls_to_look = []
for query in questions:
# Google search
search_results = self.search_tool(query, self.num_search_results)
logger.info("Searching for relevant urls...")
logger.info(f"Search results: {search_results}")
for res in search_results:
if res.get("link", None):
urls_to_look.append(res["link"])
# Relevant urls
urls = set(urls_to_look)
# Check for any new urls that we have not processed
new_urls = list(urls.difference(self.url_database))
logger.info(f"New URLs to load: {new_urls}")
# Load, split, and add new urls to vectorstore
if new_urls:
loader = AsyncHtmlLoader(new_urls, ignore_load_errors=True)
html2text = Html2TextTransformer()
logger.info("Indexing new urls...")
docs = loader.load()
docs = list(html2text.transform_documents(docs))
docs = self.text_splitter.split_documents(docs)
self.vectorstore.add_documents(docs)
self.url_database.extend(new_urls)
# Search for relevant splits
# TODO: make this async
logger.info("Grabbing most relevant splits from urls...")
docs = []
for query in questions:
docs.extend(self.vectorstore.similarity_search(query))
# Get unique docs
unique_documents_dict = {
(doc.page_content, tuple(sorted(doc.metadata.items()))): doc for doc in docs
}
unique_documents = list(unique_documents_dict.values())
return unique_documents
async def _aget_relevant_documents(
self,
query: str,
*,
run_manager: AsyncCallbackManagerForRetrieverRun,
) -> List[Document]:
raise NotImplementedError
| [
"You are an assistant tasked with improving Google search results. Generate THREE Google search queries that are similar to this question. The output should be a numbered list of questions and each should have a question mark at the end: {question}",
"question",
"<<SYS>> \n You are an assistant tasked with improving Google search results. \n <</SYS>> \n\n [INST] Generate THREE Google search queries that are similar to this question. The output should be a numbered list of questions and each should have a question mark at the end: \n\n {question} [/INST]"
] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~tools~office365~send_event.py | """Util that sends calendar events in Office 365.
Free, but setup is required. See link below.
https://learn.microsoft.com/en-us/graph/auth/
"""
from datetime import datetime as dt
from typing import List, Optional, Type
from libs.core.langchain_core.callbacks import CallbackManagerForToolRun
from libs.core.langchain_core.pydantic_v1 import BaseModel, Field
from langchain_community.tools.office365.base import O365BaseTool
from langchain_community.tools.office365.utils import UTC_FORMAT
class SendEventSchema(BaseModel):
"""Input for CreateEvent Tool."""
body: str = Field(
...,
description="The message body to include in the event.",
)
attendees: List[str] = Field(
...,
description="The list of attendees for the event.",
)
subject: str = Field(
...,
description="The subject of the event.",
)
start_datetime: str = Field(
description=" The start datetime for the event in the following format: "
' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time '
" components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC).",
)
end_datetime: str = Field(
description=" The end datetime for the event in the following format: "
' YYYY-MM-DDTHH:MM:SS±hh:mm, where "T" separates the date and time '
" components, and the time zone offset is specified as ±hh:mm. "
' For example: "2023-06-09T10:30:00+03:00" represents June 9th, '
" 2023, at 10:30 AM in a time zone with a positive offset of 3 "
" hours from Coordinated Universal Time (UTC).",
)
class O365SendEvent(O365BaseTool):
"""Tool for sending calendar events in Office 365."""
name: str = "send_event"
description: str = (
"Use this tool to create and send an event with the provided event fields."
)
args_schema: Type[SendEventSchema] = SendEventSchema
def _run(
self,
body: str,
attendees: List[str],
subject: str,
start_datetime: str,
end_datetime: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
# Get calendar object
schedule = self.account.schedule()
calendar = schedule.get_default_calendar()
event = calendar.new_event()
event.body = body
event.subject = subject
event.start = dt.strptime(start_datetime, UTC_FORMAT)
event.end = dt.strptime(end_datetime, UTC_FORMAT)
for attendee in attendees:
event.attendees.add(attendee)
# TO-DO: Look into PytzUsageWarning
event.save()
output = "Event sent: " + str(event)
return output
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~vectorstores~vespa.py | from __future__ import annotations
from typing import Any, Dict, Iterable, List, Optional, Tuple, Type, Union
from libs.core.langchain_core.documents import Document
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.vectorstores import VectorStore, VectorStoreRetriever
class VespaStore(VectorStore):
"""
`Vespa` vector store.
To use, you should have the python client library ``pyvespa`` installed.
Example:
.. code-block:: python
from langchain_community.vectorstores import VespaStore
from langchain_community.embeddings.openai import OpenAIEmbeddings
from vespa.application import Vespa
# Create a vespa client dependent upon your application,
# e.g. either connecting to Vespa Cloud or a local deployment
# such as Docker. Please refer to the PyVespa documentation on
# how to initialize the client.
vespa_app = Vespa(url="...", port=..., application_package=...)
# You need to instruct LangChain on which fields to use for embeddings
vespa_config = dict(
page_content_field="text",
embedding_field="embedding",
input_field="query_embedding",
metadata_fields=["date", "rating", "author"]
)
embedding_function = OpenAIEmbeddings()
vectorstore = VespaStore(vespa_app, embedding_function, **vespa_config)
"""
def __init__(
self,
app: Any,
embedding_function: Optional[Embeddings] = None,
page_content_field: Optional[str] = None,
embedding_field: Optional[str] = None,
input_field: Optional[str] = None,
metadata_fields: Optional[List[str]] = None,
) -> None:
"""
Initialize with a PyVespa client.
"""
try:
from vespa.application import Vespa
except ImportError:
raise ImportError(
"Could not import Vespa python package. "
"Please install it with `pip install pyvespa`."
)
if not isinstance(app, Vespa):
raise ValueError(
f"app should be an instance of vespa.application.Vespa, got {type(app)}"
)
self._vespa_app = app
self._embedding_function = embedding_function
self._page_content_field = page_content_field
self._embedding_field = embedding_field
self._input_field = input_field
self._metadata_fields = metadata_fields
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""
Add texts to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
ids: Optional list of ids associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
embeddings = None
if self._embedding_function is not None:
embeddings = self._embedding_function.embed_documents(list(texts))
if ids is None:
ids = [str(f"{i+1}") for i, _ in enumerate(texts)]
batch = []
for i, text in enumerate(texts):
fields: Dict[str, Union[str, List[float]]] = {}
if self._page_content_field is not None:
fields[self._page_content_field] = text
if self._embedding_field is not None and embeddings is not None:
fields[self._embedding_field] = embeddings[i]
if metadatas is not None and self._metadata_fields is not None:
for metadata_field in self._metadata_fields:
if metadata_field in metadatas[i]:
fields[metadata_field] = metadatas[i][metadata_field]
batch.append({"id": ids[i], "fields": fields})
results = self._vespa_app.feed_batch(batch)
for result in results:
if not (str(result.status_code).startswith("2")):
raise RuntimeError(
f"Could not add document to Vespa. "
f"Error code: {result.status_code}. "
f"Message: {result.json['message']}"
)
return ids
def delete(self, ids: Optional[List[str]] = None, **kwargs: Any) -> Optional[bool]:
if ids is None:
return False
batch = [{"id": id} for id in ids]
result = self._vespa_app.delete_batch(batch)
return sum([0 if r.status_code == 200 else 1 for r in result]) == 0
def _create_query(
self, query_embedding: List[float], k: int = 4, **kwargs: Any
) -> Dict:
hits = k
doc_embedding_field = self._embedding_field
input_embedding_field = self._input_field
ranking_function = kwargs["ranking"] if "ranking" in kwargs else "default"
filter = kwargs["filter"] if "filter" in kwargs else None
approximate = kwargs["approximate"] if "approximate" in kwargs else False
approximate = "true" if approximate else "false"
yql = "select * from sources * where "
yql += f"{{targetHits: {hits}, approximate: {approximate}}}"
yql += f"nearestNeighbor({doc_embedding_field}, {input_embedding_field})"
if filter is not None:
yql += f" and {filter}"
query = {
"yql": yql,
f"input.query({input_embedding_field})": query_embedding,
"ranking": ranking_function,
"hits": hits,
}
return query
def similarity_search_by_vector_with_score(
self, query_embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
"""
Performs similarity search from a embeddings vector.
Args:
query_embedding: Embeddings vector to search for.
k: Number of results to return.
custom_query: Use this custom query instead default query (kwargs)
kwargs: other vector store specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
if "custom_query" in kwargs:
query = kwargs["custom_query"]
else:
query = self._create_query(query_embedding, k, **kwargs)
try:
response = self._vespa_app.query(body=query)
except Exception as e:
raise RuntimeError(
f"Could not retrieve data from Vespa: "
f"{e.args[0][0]['summary']}. "
f"Error: {e.args[0][0]['message']}"
)
if not str(response.status_code).startswith("2"):
raise RuntimeError(
f"Could not retrieve data from Vespa. "
f"Error code: {response.status_code}. "
f"Message: {response.json['message']}"
)
root = response.json["root"]
if "errors" in root:
import json
raise RuntimeError(json.dumps(root["errors"]))
if response is None or response.hits is None:
return []
docs = []
for child in response.hits:
page_content = child["fields"][self._page_content_field]
score = child["relevance"]
metadata = {"id": child["id"]}
if self._metadata_fields is not None:
for field in self._metadata_fields:
metadata[field] = child["fields"].get(field)
doc = Document(page_content=page_content, metadata=metadata)
docs.append((doc, score))
return docs
def similarity_search_by_vector(
self, embedding: List[float], k: int = 4, **kwargs: Any
) -> List[Document]:
results = self.similarity_search_by_vector_with_score(embedding, k, **kwargs)
return [r[0] for r in results]
def similarity_search_with_score(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Tuple[Document, float]]:
query_emb = []
if self._embedding_function is not None:
query_emb = self._embedding_function.embed_query(query)
return self.similarity_search_by_vector_with_score(query_emb, k, **kwargs)
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
results = self.similarity_search_with_score(query, k, **kwargs)
return [r[0] for r in results]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
raise NotImplementedError("MMR search not implemented")
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> List[Document]:
raise NotImplementedError("MMR search by vector not implemented")
@classmethod
def from_texts(
cls: Type[VespaStore],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> VespaStore:
vespa = cls(embedding_function=embedding, **kwargs)
vespa.add_texts(texts=texts, metadatas=metadatas, ids=ids)
return vespa
def as_retriever(self, **kwargs: Any) -> VectorStoreRetriever:
return super().as_retriever(**kwargs)
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~retrievers~docarray~fixtures.py | from __future__ import annotations
from pathlib import Path
from typing import TYPE_CHECKING, Any, Dict, Generator, Tuple
import numpy as np
import pytest
from libs.core.langchain_core.pydantic_v1 import Field
if TYPE_CHECKING:
from docarray.index import (
ElasticDocIndex,
HnswDocumentIndex,
InMemoryExactNNIndex,
QdrantDocumentIndex,
WeaviateDocumentIndex,
)
from docarray.typing import NdArray
from qdrant_client.http import models as rest
from langchain_community.embeddings import FakeEmbeddings
@pytest.fixture
def init_weaviate() -> (
Generator[
Tuple[WeaviateDocumentIndex, Dict[str, Any], FakeEmbeddings],
None,
None,
]
):
"""
cd tests/integration_tests/vectorstores/docker-compose
docker compose -f weaviate.yml up
"""
from docarray import BaseDoc
from docarray.index import (
WeaviateDocumentIndex,
)
class WeaviateDoc(BaseDoc):
# When initializing the Weaviate index, denote the field
# you want to search on with `is_embedding=True`
title: str
title_embedding: NdArray[32] = Field(is_embedding=True) # type: ignore
other_emb: NdArray[32] # type: ignore
year: int
embeddings = FakeEmbeddings(size=32)
# initialize WeaviateDocumentIndex
dbconfig = WeaviateDocumentIndex.DBConfig(host="http://localhost:8080")
weaviate_db = WeaviateDocumentIndex[WeaviateDoc](
db_config=dbconfig, index_name="docarray_retriever"
)
# index data
weaviate_db.index(
[
WeaviateDoc(
title=f"My document {i}",
title_embedding=np.array(embeddings.embed_query(f"fake emb {i}")),
other_emb=np.array(embeddings.embed_query(f"other fake emb {i}")),
year=i,
)
for i in range(100)
]
)
# build a filter query
filter_query = {"path": ["year"], "operator": "LessThanEqual", "valueInt": "90"}
yield weaviate_db, filter_query, embeddings
weaviate_db._client.schema.delete_all()
@pytest.fixture
def init_elastic() -> (
Generator[Tuple[ElasticDocIndex, Dict[str, Any], FakeEmbeddings], None, None]
):
"""
cd tests/integration_tests/vectorstores/docker-compose
docker-compose -f elasticsearch.yml up
"""
from docarray import BaseDoc
from docarray.index import (
ElasticDocIndex,
)
class MyDoc(BaseDoc):
title: str
title_embedding: NdArray[32] # type: ignore
other_emb: NdArray[32] # type: ignore
year: int
embeddings = FakeEmbeddings(size=32)
# initialize ElasticDocIndex
elastic_db = ElasticDocIndex[MyDoc](
hosts="http://localhost:9200", index_name="docarray_retriever"
)
# index data
elastic_db.index(
[
MyDoc(
title=f"My document {i}",
title_embedding=np.array(embeddings.embed_query(f"fake emb {i}")),
other_emb=np.array(embeddings.embed_query(f"other fake emb {i}")),
year=i,
)
for i in range(100)
]
)
# build a filter query
filter_query = {"range": {"year": {"lte": 90}}}
yield elastic_db, filter_query, embeddings
elastic_db._client.indices.delete(index="docarray_retriever")
@pytest.fixture
def init_qdrant() -> Tuple[QdrantDocumentIndex, rest.Filter, FakeEmbeddings]:
from docarray import BaseDoc
from docarray.index import QdrantDocumentIndex
class MyDoc(BaseDoc):
title: str
title_embedding: NdArray[32] # type: ignore
other_emb: NdArray[32] # type: ignore
year: int
embeddings = FakeEmbeddings(size=32)
# initialize QdrantDocumentIndex
qdrant_config = QdrantDocumentIndex.DBConfig(path=":memory:")
qdrant_db = QdrantDocumentIndex[MyDoc](qdrant_config)
# index data
qdrant_db.index(
[
MyDoc(
title=f"My document {i}",
title_embedding=np.array(embeddings.embed_query(f"fake emb {i}")),
other_emb=np.array(embeddings.embed_query(f"other fake emb {i}")),
year=i,
)
for i in range(100)
]
)
# build a filter query
filter_query = rest.Filter(
must=[
rest.FieldCondition(
key="year",
range=rest.Range(
gte=10,
lt=90,
),
)
]
)
return qdrant_db, filter_query, embeddings
@pytest.fixture
def init_in_memory() -> Tuple[InMemoryExactNNIndex, Dict[str, Any], FakeEmbeddings]:
from docarray import BaseDoc
from docarray.index import InMemoryExactNNIndex
class MyDoc(BaseDoc):
title: str
title_embedding: NdArray[32] # type: ignore
other_emb: NdArray[32] # type: ignore
year: int
embeddings = FakeEmbeddings(size=32)
# initialize InMemoryExactNNIndex
in_memory_db = InMemoryExactNNIndex[MyDoc]()
# index data
in_memory_db.index(
[
MyDoc(
title=f"My document {i}",
title_embedding=np.array(embeddings.embed_query(f"fake emb {i}")),
other_emb=np.array(embeddings.embed_query(f"other fake emb {i}")),
year=i,
)
for i in range(100)
]
)
# build a filter query
filter_query = {"year": {"$lte": 90}}
return in_memory_db, filter_query, embeddings
@pytest.fixture
def init_hnsw(
tmp_path: Path,
) -> Tuple[HnswDocumentIndex, Dict[str, Any], FakeEmbeddings]:
from docarray import BaseDoc
from docarray.index import (
HnswDocumentIndex,
)
class MyDoc(BaseDoc):
title: str
title_embedding: NdArray[32] # type: ignore
other_emb: NdArray[32] # type: ignore
year: int
embeddings = FakeEmbeddings(size=32)
# initialize InMemoryExactNNIndex
hnsw_db = HnswDocumentIndex[MyDoc](work_dir=tmp_path)
# index data
hnsw_db.index(
[
MyDoc(
title=f"My document {i}",
title_embedding=np.array(embeddings.embed_query(f"fake emb {i}")),
other_emb=np.array(embeddings.embed_query(f"other fake emb {i}")),
year=i,
)
for i in range(100)
]
)
# build a filter query
filter_query = {"year": {"$lte": 90}}
return hnsw_db, filter_query, embeddings
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~graphs~neo4j_graph.py | from typing import Any, Dict, List, Optional
from libs.core.langchain_core.utils import get_from_env
from langchain_community.graphs.graph_document import GraphDocument
from langchain_community.graphs.graph_store import GraphStore
node_properties_query = """
CALL apoc.meta.data()
YIELD label, other, elementType, type, property
WHERE NOT type = "RELATIONSHIP" AND elementType = "node"
WITH label AS nodeLabels, collect({property:property, type:type}) AS properties
RETURN {labels: nodeLabels, properties: properties} AS output
"""
rel_properties_query = """
CALL apoc.meta.data()
YIELD label, other, elementType, type, property
WHERE NOT type = "RELATIONSHIP" AND elementType = "relationship"
WITH label AS nodeLabels, collect({property:property, type:type}) AS properties
RETURN {type: nodeLabels, properties: properties} AS output
"""
rel_query = """
CALL apoc.meta.data()
YIELD label, other, elementType, type, property
WHERE type = "RELATIONSHIP" AND elementType = "node"
UNWIND other AS other_node
RETURN {start: label, type: property, end: toString(other_node)} AS output
"""
class Neo4jGraph(GraphStore):
"""Neo4j wrapper for graph operations.
*Security note*: Make sure that the database connection uses credentials
that are narrowly-scoped to only include necessary permissions.
Failure to do so may result in data corruption or loss, since the calling
code may attempt commands that would result in deletion, mutation
of data if appropriately prompted or reading sensitive data if such
data is present in the database.
The best way to guard against such negative outcomes is to (as appropriate)
limit the permissions granted to the credentials used with this tool.
See https://python.langchain.com/docs/security for more information.
"""
def __init__(
self,
url: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
database: str = "neo4j",
) -> None:
"""Create a new Neo4j graph wrapper instance."""
try:
import neo4j
except ImportError:
raise ValueError(
"Could not import neo4j python package. "
"Please install it with `pip install neo4j`."
)
url = get_from_env("url", "NEO4J_URI", url)
username = get_from_env("username", "NEO4J_USERNAME", username)
password = get_from_env("password", "NEO4J_PASSWORD", password)
database = get_from_env("database", "NEO4J_DATABASE", database)
self._driver = neo4j.GraphDatabase.driver(url, auth=(username, password))
self._database = database
self.schema: str = ""
self.structured_schema: Dict[str, Any] = {}
# Verify connection
try:
self._driver.verify_connectivity()
except neo4j.exceptions.ServiceUnavailable:
raise ValueError(
"Could not connect to Neo4j database. "
"Please ensure that the url is correct"
)
except neo4j.exceptions.AuthError:
raise ValueError(
"Could not connect to Neo4j database. "
"Please ensure that the username and password are correct"
)
# Set schema
try:
self.refresh_schema()
except neo4j.exceptions.ClientError:
raise ValueError(
"Could not use APOC procedures. "
"Please ensure the APOC plugin is installed in Neo4j and that "
"'apoc.meta.data()' is allowed in Neo4j configuration "
)
@property
def get_schema(self) -> str:
"""Returns the schema of the Graph"""
return self.schema
@property
def get_structured_schema(self) -> Dict[str, Any]:
"""Returns the structured schema of the Graph"""
return self.structured_schema
def query(self, query: str, params: dict = {}) -> List[Dict[str, Any]]:
"""Query Neo4j database."""
from neo4j.exceptions import CypherSyntaxError
with self._driver.session(database=self._database) as session:
try:
data = session.run(query, params)
return [r.data() for r in data]
except CypherSyntaxError as e:
raise ValueError(f"Generated Cypher Statement is not valid\n{e}")
def refresh_schema(self) -> None:
"""
Refreshes the Neo4j graph schema information.
"""
node_properties = [el["output"] for el in self.query(node_properties_query)]
rel_properties = [el["output"] for el in self.query(rel_properties_query)]
relationships = [el["output"] for el in self.query(rel_query)]
self.structured_schema = {
"node_props": {el["labels"]: el["properties"] for el in node_properties},
"rel_props": {el["type"]: el["properties"] for el in rel_properties},
"relationships": relationships,
}
# Format node properties
formatted_node_props = []
for el in node_properties:
props_str = ", ".join(
[f"{prop['property']}: {prop['type']}" for prop in el["properties"]]
)
formatted_node_props.append(f"{el['labels']} {{{props_str}}}")
# Format relationship properties
formatted_rel_props = []
for el in rel_properties:
props_str = ", ".join(
[f"{prop['property']}: {prop['type']}" for prop in el["properties"]]
)
formatted_rel_props.append(f"{el['type']} {{{props_str}}}")
# Format relationships
formatted_rels = [
f"(:{el['start']})-[:{el['type']}]->(:{el['end']})" for el in relationships
]
self.schema = "\n".join(
[
"Node properties are the following:",
",".join(formatted_node_props),
"Relationship properties are the following:",
",".join(formatted_rel_props),
"The relationships are the following:",
",".join(formatted_rels),
]
)
def add_graph_documents(
self, graph_documents: List[GraphDocument], include_source: bool = False
) -> None:
"""
Take GraphDocument as input as uses it to construct a graph.
"""
for document in graph_documents:
include_docs_query = (
"CREATE (d:Document) "
"SET d.text = $document.page_content "
"SET d += $document.metadata "
"WITH d "
)
# Import nodes
self.query(
(
f"{include_docs_query if include_source else ''}"
"UNWIND $data AS row "
"CALL apoc.merge.node([row.type], {id: row.id}, "
"row.properties, {}) YIELD node "
f"{'MERGE (d)-[:MENTIONS]->(node) ' if include_source else ''}"
"RETURN distinct 'done' AS result"
),
{
"data": [el.__dict__ for el in document.nodes],
"document": document.source.__dict__,
},
)
# Import relationships
self.query(
"UNWIND $data AS row "
"CALL apoc.merge.node([row.source_label], {id: row.source},"
"{}, {}) YIELD node as source "
"CALL apoc.merge.node([row.target_label], {id: row.target},"
"{}, {}) YIELD node as target "
"CALL apoc.merge.relationship(source, row.type, "
"{}, row.properties, target) YIELD rel "
"RETURN distinct 'done'",
{
"data": [
{
"source": el.source.id,
"source_label": el.source.type,
"target": el.target.id,
"target_label": el.target.type,
"type": el.type.replace(" ", "_").upper(),
"properties": el.properties,
}
for el in document.relationships
]
},
)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~embeddings~localai.py | from __future__ import annotations
import logging
import warnings
from typing import (
Any,
Callable,
Dict,
List,
Literal,
Optional,
Sequence,
Set,
Tuple,
Union,
)
from libs.core.langchain_core.embeddings import Embeddings
from libs.core.langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env, get_pydantic_field_names
from tenacity import (
AsyncRetrying,
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
logger = logging.getLogger(__name__)
def _create_retry_decorator(embeddings: LocalAIEmbeddings) -> Callable[[Any], Any]:
import openai
min_seconds = 4
max_seconds = 10
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
return retry(
reraise=True,
stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def _async_retry_decorator(embeddings: LocalAIEmbeddings) -> Any:
import openai
min_seconds = 4
max_seconds = 10
# Wait 2^x * 1 second between each retry starting with
# 4 seconds, then up to 10 seconds, then 10 seconds afterwards
async_retrying = AsyncRetrying(
reraise=True,
stop=stop_after_attempt(embeddings.max_retries),
wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds),
retry=(
retry_if_exception_type(openai.error.Timeout)
| retry_if_exception_type(openai.error.APIError)
| retry_if_exception_type(openai.error.APIConnectionError)
| retry_if_exception_type(openai.error.RateLimitError)
| retry_if_exception_type(openai.error.ServiceUnavailableError)
),
before_sleep=before_sleep_log(logger, logging.WARNING),
)
def wrap(func: Callable) -> Callable:
async def wrapped_f(*args: Any, **kwargs: Any) -> Callable:
async for _ in async_retrying:
return await func(*args, **kwargs)
raise AssertionError("this is unreachable")
return wrapped_f
return wrap
# https://stackoverflow.com/questions/76469415/getting-embeddings-of-length-1-from-langchain-openaiembeddings
def _check_response(response: dict) -> dict:
if any(len(d["embedding"]) == 1 for d in response["data"]):
import openai
raise openai.error.APIError("LocalAI API returned an empty embedding")
return response
def embed_with_retry(embeddings: LocalAIEmbeddings, **kwargs: Any) -> Any:
"""Use tenacity to retry the embedding call."""
retry_decorator = _create_retry_decorator(embeddings)
@retry_decorator
def _embed_with_retry(**kwargs: Any) -> Any:
response = embeddings.client.create(**kwargs)
return _check_response(response)
return _embed_with_retry(**kwargs)
async def async_embed_with_retry(embeddings: LocalAIEmbeddings, **kwargs: Any) -> Any:
"""Use tenacity to retry the embedding call."""
@_async_retry_decorator(embeddings)
async def _async_embed_with_retry(**kwargs: Any) -> Any:
response = await embeddings.client.acreate(**kwargs)
return _check_response(response)
return await _async_embed_with_retry(**kwargs)
class LocalAIEmbeddings(BaseModel, Embeddings):
"""LocalAI embedding models.
Since LocalAI and OpenAI have 1:1 compatibility between APIs, this class
uses the ``openai`` Python package's ``openai.Embedding`` as its client.
Thus, you should have the ``openai`` python package installed, and defeat
the environment variable ``OPENAI_API_KEY`` by setting to a random string.
You also need to specify ``OPENAI_API_BASE`` to point to your LocalAI
service endpoint.
Example:
.. code-block:: python
from langchain_community.embeddings import LocalAIEmbeddings
openai = LocalAIEmbeddings(
openai_api_key="random-string",
openai_api_base="http://localhost:8080"
)
"""
client: Any #: :meta private:
model: str = "text-embedding-ada-002"
deployment: str = model
openai_api_version: Optional[str] = None
openai_api_base: Optional[str] = None
# to support explicit proxy for LocalAI
openai_proxy: Optional[str] = None
embedding_ctx_length: int = 8191
"""The maximum number of tokens to embed at once."""
openai_api_key: Optional[str] = None
openai_organization: Optional[str] = None
allowed_special: Union[Literal["all"], Set[str]] = set()
disallowed_special: Union[Literal["all"], Set[str], Sequence[str]] = "all"
chunk_size: int = 1000
"""Maximum number of texts to embed in each batch"""
max_retries: int = 6
"""Maximum number of retries to make when generating."""
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout in seconds for the LocalAI request."""
headers: Any = None
show_progress_bar: bool = False
"""Whether to show a progress bar when embedding."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = get_pydantic_field_names(cls)
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
if field_name not in all_required_field_names:
warnings.warn(
f"""WARNING! {field_name} is not default parameter.
{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
invalid_model_kwargs = all_required_field_names.intersection(extra.keys())
if invalid_model_kwargs:
raise ValueError(
f"Parameters {invalid_model_kwargs} should be specified explicitly. "
f"Instead they were passed in as part of `model_kwargs` parameter."
)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["openai_api_key"] = get_from_dict_or_env(
values, "openai_api_key", "OPENAI_API_KEY"
)
values["openai_api_base"] = get_from_dict_or_env(
values,
"openai_api_base",
"OPENAI_API_BASE",
default="",
)
values["openai_proxy"] = get_from_dict_or_env(
values,
"openai_proxy",
"OPENAI_PROXY",
default="",
)
default_api_version = ""
values["openai_api_version"] = get_from_dict_or_env(
values,
"openai_api_version",
"OPENAI_API_VERSION",
default=default_api_version,
)
values["openai_organization"] = get_from_dict_or_env(
values,
"openai_organization",
"OPENAI_ORGANIZATION",
default="",
)
try:
import openai
values["client"] = openai.Embedding
except ImportError:
raise ImportError(
"Could not import openai python package. "
"Please install it with `pip install openai`."
)
return values
@property
def _invocation_params(self) -> Dict:
openai_args = {
"model": self.model,
"request_timeout": self.request_timeout,
"headers": self.headers,
"api_key": self.openai_api_key,
"organization": self.openai_organization,
"api_base": self.openai_api_base,
"api_version": self.openai_api_version,
**self.model_kwargs,
}
if self.openai_proxy:
import openai
openai.proxy = {
"http": self.openai_proxy,
"https": self.openai_proxy,
} # type: ignore[assignment] # noqa: E501
return openai_args
def _embedding_func(self, text: str, *, engine: str) -> List[float]:
"""Call out to LocalAI's embedding endpoint."""
# handle large input text
if self.model.endswith("001"):
# See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500
# replace newlines, which can negatively affect performance.
text = text.replace("\n", " ")
return embed_with_retry(
self,
input=[text],
**self._invocation_params,
)["data"][0]["embedding"]
async def _aembedding_func(self, text: str, *, engine: str) -> List[float]:
"""Call out to LocalAI's embedding endpoint."""
# handle large input text
if self.model.endswith("001"):
# See: https://github.com/openai/openai-python/issues/418#issuecomment-1525939500
# replace newlines, which can negatively affect performance.
text = text.replace("\n", " ")
return (
await async_embed_with_retry(
self,
input=[text],
**self._invocation_params,
)
)["data"][0]["embedding"]
def embed_documents(
self, texts: List[str], chunk_size: Optional[int] = 0
) -> List[List[float]]:
"""Call out to LocalAI's embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns:
List of embeddings, one for each text.
"""
# call _embedding_func for each text
return [self._embedding_func(text, engine=self.deployment) for text in texts]
async def aembed_documents(
self, texts: List[str], chunk_size: Optional[int] = 0
) -> List[List[float]]:
"""Call out to LocalAI's embedding endpoint async for embedding search docs.
Args:
texts: The list of texts to embed.
chunk_size: The chunk size of embeddings. If None, will use the chunk size
specified by the class.
Returns:
List of embeddings, one for each text.
"""
embeddings = []
for text in texts:
response = await self._aembedding_func(text, engine=self.deployment)
embeddings.append(response)
return embeddings
def embed_query(self, text: str) -> List[float]:
"""Call out to LocalAI's embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
embedding = self._embedding_func(text, engine=self.deployment)
return embedding
async def aembed_query(self, text: str) -> List[float]:
"""Call out to LocalAI's embedding endpoint async for embedding query text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
embedding = await self._aembedding_func(text, engine=self.deployment)
return embedding
| [] |
2024-01-10 | mth93/langchain | libs~langchain~langchain~agents~agent_toolkits~vectorstore~toolkit.py | """Toolkit for interacting with a vector store."""
from typing import List
from libs.core.langchain_core.language_models import BaseLanguageModel
from libs.core.langchain_core.pydantic_v1 import BaseModel, Field
from libs.core.langchain_core.vectorstores import VectorStore
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.llms.openai import OpenAI
from langchain.tools import BaseTool
from langchain.tools.vectorstore.tool import (
VectorStoreQATool,
VectorStoreQAWithSourcesTool,
)
class VectorStoreInfo(BaseModel):
"""Information about a VectorStore."""
vectorstore: VectorStore = Field(exclude=True)
name: str
description: str
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
class VectorStoreToolkit(BaseToolkit):
"""Toolkit for interacting with a Vector Store."""
vectorstore_info: VectorStoreInfo = Field(exclude=True)
llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0))
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
description = VectorStoreQATool.get_description(
self.vectorstore_info.name, self.vectorstore_info.description
)
qa_tool = VectorStoreQATool(
name=self.vectorstore_info.name,
description=description,
vectorstore=self.vectorstore_info.vectorstore,
llm=self.llm,
)
description = VectorStoreQAWithSourcesTool.get_description(
self.vectorstore_info.name, self.vectorstore_info.description
)
qa_with_sources_tool = VectorStoreQAWithSourcesTool(
name=f"{self.vectorstore_info.name}_with_sources",
description=description,
vectorstore=self.vectorstore_info.vectorstore,
llm=self.llm,
)
return [qa_tool, qa_with_sources_tool]
class VectorStoreRouterToolkit(BaseToolkit):
"""Toolkit for routing between Vector Stores."""
vectorstores: List[VectorStoreInfo] = Field(exclude=True)
llm: BaseLanguageModel = Field(default_factory=lambda: OpenAI(temperature=0))
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
tools: List[BaseTool] = []
for vectorstore_info in self.vectorstores:
description = VectorStoreQATool.get_description(
vectorstore_info.name, vectorstore_info.description
)
qa_tool = VectorStoreQATool(
name=vectorstore_info.name,
description=description,
vectorstore=vectorstore_info.vectorstore,
llm=self.llm,
)
tools.append(qa_tool)
return tools
| [] |
2024-01-10 | mth93/langchain | libs~langchain~tests~integration_tests~memory~test_singlestoredb.py | import json
from libs.core.langchain_core.messages import message_to_dict
from langchain.memory import ConversationBufferMemory, SingleStoreDBChatMessageHistory
# Replace these with your mongodb connection string
TEST_SINGLESTOREDB_URL = "root:pass@localhost:3306/db"
def test_memory_with_message_store() -> None:
"""Test the memory with a message store."""
# setup SingleStoreDB as a message store
message_history = SingleStoreDBChatMessageHistory(
session_id="test-session",
host=TEST_SINGLESTOREDB_URL,
)
memory = ConversationBufferMemory(
memory_key="baz", chat_memory=message_history, return_messages=True
)
# add some messages
memory.chat_memory.add_ai_message("This is me, the AI")
memory.chat_memory.add_user_message("This is me, the human")
# get the message history from the memory store and turn it into a json
messages = memory.chat_memory.messages
messages_json = json.dumps([message_to_dict(msg) for msg in messages])
assert "This is me, the AI" in messages_json
assert "This is me, the human" in messages_json
# remove the record from SingleStoreDB, so the next test run won't pick it up
memory.chat_memory.clear()
assert memory.chat_memory.messages == []
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~llms~test_tongyi.py | """Test Tongyi API wrapper."""
from libs.core.langchain_core.outputs import LLMResult
from langchain_community.llms.tongyi import Tongyi
def test_tongyi_call() -> None:
"""Test valid call to tongyi."""
llm = Tongyi()
output = llm("who are you")
assert isinstance(output, str)
def test_tongyi_generate() -> None:
"""Test valid call to tongyi."""
llm = Tongyi()
output = llm.generate(["who are you"])
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
def test_tongyi_generate_stream() -> None:
"""Test valid call to tongyi."""
llm = Tongyi(streaming=True)
output = llm.generate(["who are you"])
print(output)
assert isinstance(output, LLMResult)
assert isinstance(output.generations, list)
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~agent_toolkits~openapi~toolkit.py | """Requests toolkit."""
from __future__ import annotations
from typing import Any, List
from libs.core.langchain_core.language_models import BaseLanguageModel
from libs.core.langchain_core.tools import Tool
from langchain_community.agent_toolkits.base import BaseToolkit
from langchain_community.agent_toolkits.json.base import create_json_agent
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
from langchain_community.agent_toolkits.openapi.prompt import DESCRIPTION
from langchain_community.tools import BaseTool
from langchain_community.tools.json.tool import JsonSpec
from langchain_community.tools.requests.tool import (
RequestsDeleteTool,
RequestsGetTool,
RequestsPatchTool,
RequestsPostTool,
RequestsPutTool,
)
from langchain_community.utilities.requests import TextRequestsWrapper
class RequestsToolkit(BaseToolkit):
"""Toolkit for making REST requests.
*Security Note*: This toolkit contains tools to make GET, POST, PATCH, PUT,
and DELETE requests to an API.
Exercise care in who is allowed to use this toolkit. If exposing
to end users, consider that users will be able to make arbitrary
requests on behalf of the server hosting the code. For example,
users could ask the server to make a request to a private API
that is only accessible from the server.
Control access to who can submit issue requests using this toolkit and
what network access it has.
See https://python.langchain.com/docs/security for more information.
"""
requests_wrapper: TextRequestsWrapper
def get_tools(self) -> List[BaseTool]:
"""Return a list of tools."""
return [
RequestsGetTool(requests_wrapper=self.requests_wrapper),
RequestsPostTool(requests_wrapper=self.requests_wrapper),
RequestsPatchTool(requests_wrapper=self.requests_wrapper),
RequestsPutTool(requests_wrapper=self.requests_wrapper),
RequestsDeleteTool(requests_wrapper=self.requests_wrapper),
]
class OpenAPIToolkit(BaseToolkit):
"""Toolkit for interacting with an OpenAPI API.
*Security Note*: This toolkit contains tools that can read and modify
the state of a service; e.g., by creating, deleting, or updating,
reading underlying data.
For example, this toolkit can be used to delete data exposed via
an OpenAPI compliant API.
"""
json_agent: Any
requests_wrapper: TextRequestsWrapper
def get_tools(self) -> List[BaseTool]:
"""Get the tools in the toolkit."""
json_agent_tool = Tool(
name="json_explorer",
func=self.json_agent.run,
description=DESCRIPTION,
)
request_toolkit = RequestsToolkit(requests_wrapper=self.requests_wrapper)
return [*request_toolkit.get_tools(), json_agent_tool]
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
json_spec: JsonSpec,
requests_wrapper: TextRequestsWrapper,
**kwargs: Any,
) -> OpenAPIToolkit:
"""Create json agent from llm, then initialize."""
json_agent = create_json_agent(llm, JsonToolkit(spec=json_spec), **kwargs)
return cls(json_agent=json_agent, requests_wrapper=requests_wrapper)
| [] |
2024-01-10 | mth93/langchain | libs~community~tests~integration_tests~document_loaders~test_wikipedia.py | """Integration test for Wikipedia Document Loader."""
from typing import List
from libs.core.langchain_core.documents import Document
from langchain_community.document_loaders import WikipediaLoader
def assert_docs(docs: List[Document], all_meta: bool = False) -> None:
for doc in docs:
assert doc.page_content
assert doc.metadata
main_meta = {"title", "summary", "source"}
assert set(doc.metadata).issuperset(main_meta)
if all_meta:
assert len(set(doc.metadata)) > len(main_meta)
else:
assert len(set(doc.metadata)) == len(main_meta)
def test_load_success() -> None:
loader = WikipediaLoader(query="HUNTER X HUNTER")
docs = loader.load()
assert len(docs) > 1
assert len(docs) <= 25
assert_docs(docs, all_meta=False)
def test_load_success_all_meta() -> None:
load_max_docs = 5
load_all_available_meta = True
loader = WikipediaLoader(
query="HUNTER X HUNTER",
load_max_docs=load_max_docs,
load_all_available_meta=load_all_available_meta,
)
docs = loader.load()
assert len(docs) == load_max_docs
assert_docs(docs, all_meta=load_all_available_meta)
def test_load_success_more() -> None:
load_max_docs = 10
loader = WikipediaLoader(query="HUNTER X HUNTER", load_max_docs=load_max_docs)
docs = loader.load()
assert len(docs) == load_max_docs
assert_docs(docs, all_meta=False)
def test_load_no_result() -> None:
loader = WikipediaLoader(
"NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL_NORESULTCALL"
)
docs = loader.load()
assert not docs
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~bananadev.py | import logging
from typing import Any, Dict, List, Mapping, Optional
from libs.core.langchain_core.callbacks import CallbackManagerForLLMRun
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import Extra, Field, root_validator
from libs.core.langchain_core.utils import get_from_dict_or_env
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class Banana(LLM):
"""Banana large language models.
To use, you should have the ``banana-dev`` python package installed,
and the environment variable ``BANANA_API_KEY`` set with your API key.
This is the team API key available in the Banana dashboard.
Any parameters that are valid to be passed to the call can be passed
in, even if not explicitly saved on this class.
Example:
.. code-block:: python
from langchain_community.llms import Banana
banana = Banana(model_key="", model_url_slug="")
"""
model_key: str = ""
"""model key to use"""
model_url_slug: str = ""
"""model endpoint to use"""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not
explicitly specified."""
banana_api_key: Optional[str] = None
class Config:
"""Configuration for this pydantic config."""
extra = Extra.forbid
@root_validator(pre=True)
def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Build extra kwargs from additional params that were passed in."""
all_required_field_names = {field.alias for field in cls.__fields__.values()}
extra = values.get("model_kwargs", {})
for field_name in list(values):
if field_name not in all_required_field_names:
if field_name in extra:
raise ValueError(f"Found {field_name} supplied twice.")
logger.warning(
f"""{field_name} was transferred to model_kwargs.
Please confirm that {field_name} is what you intended."""
)
extra[field_name] = values.pop(field_name)
values["model_kwargs"] = extra
return values
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
banana_api_key = get_from_dict_or_env(
values, "banana_api_key", "BANANA_API_KEY"
)
values["banana_api_key"] = banana_api_key
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
**{"model_key": self.model_key},
**{"model_url_slug": self.model_url_slug},
**{"model_kwargs": self.model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "bananadev"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
"""Call to Banana endpoint."""
try:
from banana_dev import Client
except ImportError:
raise ImportError(
"Could not import banana-dev python package. "
"Please install it with `pip install banana-dev`."
)
params = self.model_kwargs or {}
params = {**params, **kwargs}
api_key = self.banana_api_key
model_key = self.model_key
model_url_slug = self.model_url_slug
model_inputs = {
# a json specific to your model.
"prompt": prompt,
**params,
}
model = Client(
# Found in main dashboard
api_key=api_key,
# Both found in model details page
model_key=model_key,
url=f"https://{model_url_slug}.run.banana.dev",
)
response, meta = model.call("/", model_inputs)
try:
text = response["outputs"]
except (KeyError, TypeError):
raise ValueError(
"Response should be of schema: {'outputs': 'text'}."
"\nTo fix this:"
"\n- fork the source repo of the Banana model"
"\n- modify app.py to return the above schema"
"\n- deploy that as a custom repo"
)
if stop is not None:
# I believe this is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | mth93/langchain | libs~langchain~tests~unit_tests~retrievers~test_multi_query.py | from typing import List
import pytest as pytest
from libs.core.langchain_core.documents import Document
from langchain.retrievers.multi_query import _unique_documents
@pytest.mark.parametrize(
"documents,expected",
[
([], []),
([Document(page_content="foo")], [Document(page_content="foo")]),
([Document(page_content="foo")] * 2, [Document(page_content="foo")]),
(
[Document(page_content="foo", metadata={"bar": "baz"})] * 2,
[Document(page_content="foo", metadata={"bar": "baz"})],
),
(
[Document(page_content="foo", metadata={"bar": [1, 2]})] * 2,
[Document(page_content="foo", metadata={"bar": [1, 2]})],
),
(
[Document(page_content="foo", metadata={"bar": {1, 2}})] * 2,
[Document(page_content="foo", metadata={"bar": {1, 2}})],
),
(
[
Document(page_content="foo", metadata={"bar": [1, 2]}),
Document(page_content="foo", metadata={"bar": [2, 1]}),
],
[
Document(page_content="foo", metadata={"bar": [1, 2]}),
Document(page_content="foo", metadata={"bar": [2, 1]}),
],
),
],
)
def test__unique_documents(documents: List[Document], expected: List[Document]) -> None:
assert _unique_documents(documents) == expected
| [] |
2024-01-10 | mth93/langchain | libs~community~langchain_community~llms~minimax.py | """Wrapper around Minimax APIs."""
from __future__ import annotations
import logging
from typing import (
Any,
Dict,
List,
Optional,
)
import requests
from libs.core.langchain_core.callbacks import (
CallbackManagerForLLMRun,
)
from libs.core.langchain_core.language_models.llms import LLM
from libs.core.langchain_core.pydantic_v1 import BaseModel, Field, SecretStr, root_validator
from libs.core.langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from langchain_community.llms.utils import enforce_stop_tokens
logger = logging.getLogger(__name__)
class _MinimaxEndpointClient(BaseModel):
"""An API client that talks to a Minimax llm endpoint."""
host: str
group_id: str
api_key: SecretStr
api_url: str
@root_validator(pre=True, allow_reuse=True)
def set_api_url(cls, values: Dict[str, Any]) -> Dict[str, Any]:
if "api_url" not in values:
host = values["host"]
group_id = values["group_id"]
api_url = f"{host}/v1/text/chatcompletion?GroupId={group_id}"
values["api_url"] = api_url
return values
def post(self, request: Any) -> Any:
headers = {"Authorization": f"Bearer {self.api_key.get_secret_value()}"}
response = requests.post(self.api_url, headers=headers, json=request)
# TODO: error handling and automatic retries
if not response.ok:
raise ValueError(f"HTTP {response.status_code} error: {response.text}")
if response.json()["base_resp"]["status_code"] > 0:
raise ValueError(
f"API {response.json()['base_resp']['status_code']}"
f" error: {response.json()['base_resp']['status_msg']}"
)
return response.json()["reply"]
class MinimaxCommon(BaseModel):
"""Common parameters for Minimax large language models."""
_client: _MinimaxEndpointClient
model: str = "abab5.5-chat"
"""Model name to use."""
max_tokens: int = 256
"""Denotes the number of tokens to predict per generation."""
temperature: float = 0.7
"""A non-negative float that tunes the degree of randomness in generation."""
top_p: float = 0.95
"""Total probability mass of tokens to consider at each step."""
model_kwargs: Dict[str, Any] = Field(default_factory=dict)
"""Holds any model parameters valid for `create` call not explicitly specified."""
minimax_api_host: Optional[str] = None
minimax_group_id: Optional[str] = None
minimax_api_key: Optional[SecretStr] = None
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
values["minimax_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "minimax_api_key", "MINIMAX_API_KEY")
)
values["minimax_group_id"] = get_from_dict_or_env(
values, "minimax_group_id", "MINIMAX_GROUP_ID"
)
# Get custom api url from environment.
values["minimax_api_host"] = get_from_dict_or_env(
values,
"minimax_api_host",
"MINIMAX_API_HOST",
default="https://api.minimax.chat",
)
values["_client"] = _MinimaxEndpointClient(
host=values["minimax_api_host"],
api_key=values["minimax_api_key"],
group_id=values["minimax_group_id"],
)
return values
@property
def _default_params(self) -> Dict[str, Any]:
"""Get the default parameters for calling OpenAI API."""
return {
"model": self.model,
"tokens_to_generate": self.max_tokens,
"temperature": self.temperature,
"top_p": self.top_p,
**self.model_kwargs,
}
@property
def _identifying_params(self) -> Dict[str, Any]:
"""Get the identifying parameters."""
return {**{"model": self.model}, **self._default_params}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "minimax"
class Minimax(MinimaxCommon, LLM):
"""Wrapper around Minimax large language models.
To use, you should have the environment variable
``MINIMAX_API_KEY`` and ``MINIMAX_GROUP_ID`` set with your API key,
or pass them as a named parameter to the constructor.
Example:
. code-block:: python
from langchain_community.llms.minimax import Minimax
minimax = Minimax(model="<model_name>", minimax_api_key="my-api-key",
minimax_group_id="my-group-id")
"""
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> str:
r"""Call out to Minimax's completion endpoint to chat
Args:
prompt: The prompt to pass into the model.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = minimax("Tell me a joke.")
"""
request = self._default_params
request["messages"] = [{"sender_type": "USER", "text": prompt}]
request.update(kwargs)
text = self._client.post(request)
if stop is not None:
# This is required since the stop tokens
# are not enforced by the model parameters
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | mth93/langchain | libs~langchain~tests~unit_tests~retrievers~test_parent_document.py | from typing import Any, List, Sequence
from libs.core.langchain_core.documents import Document
from langchain.retrievers import ParentDocumentRetriever
from langchain.storage import InMemoryStore
from langchain.text_splitter import CharacterTextSplitter
from tests.unit_tests.indexes.test_indexing import InMemoryVectorStore
class InMemoryVectorstoreWithSearch(InMemoryVectorStore):
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
res = self.store.get(query)
if res is None:
return []
return [res]
def add_documents(self, documents: Sequence[Document], **kwargs: Any) -> List[str]:
print(documents)
return super().add_documents(
documents, ids=[f"{i}" for i in range(len(documents))]
)
def test_parent_document_retriever_initialization() -> None:
vectorstore = InMemoryVectorstoreWithSearch()
store = InMemoryStore()
child_splitter = CharacterTextSplitter(chunk_size=400)
documents = [Document(page_content="test document")]
retriever = ParentDocumentRetriever(
vectorstore=vectorstore,
docstore=store,
child_splitter=child_splitter,
)
retriever.add_documents(documents)
results = retriever.invoke("0")
assert len(results) > 0
assert results[0].page_content == "test document"
| [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.