date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | Crypto-star/chatPDF | ChatPDF.py | from dotenv import load_dotenv
import streamlit as st
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
def main():
load_dotenv()
st.set_page_config(page_title="Ask Your PDF")
st.header("Ask your PDF 💬")
# extract the text
pdf = st.file_uploader("Upload your PDF", type="pdf")
if pdf is not None:
pdf_reader = PdfReader(pdf)
text = ""
for page in pdf_reader.pages:
text += page.extract_text()
# split into chunks
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(text)
embeddings = OpenAIEmbeddings()
knowledge_base = FAISS.from_texts(chunks, embeddings)
user_question = st.text_input("Ask a question about your PDF")
if user_question:
docs = knowledge_base.similarity_search(user_question)
llm = OpenAI()
chain = load_qa_chain(llm, chain_type="stuff")
response = chain.run(input_documents=docs, question=user_question)
st.write(response)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | tangqiaoyu/ToolAlpaca | agent~custom_parser.py | import re
from typing import Union
from langchain.agents.agent import AgentOutputParser
from langchain.schema import AgentAction, AgentFinish
FINAL_ANSWER_ACTION = "Final Answer:"
class CustomMRKLOutputParser(AgentOutputParser):
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
if FINAL_ANSWER_ACTION in text:
return AgentFinish(
{
"output": text.split(FINAL_ANSWER_ACTION)[-1].strip(),
"Final Thought": text.rsplit(FINAL_ANSWER_ACTION, 1)[0].strip(),
}, text
)
# \s matches against tab/newline/whitespace
regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
match = re.search(regex, text, re.DOTALL)
if not match:
raise ValueError(f"Could not parse LLM output: `{text}`")
action = match.group(1).strip()
action_input = match.group(2)
return AgentAction(action, action_input.strip(" ").strip('"'), text)
class CustomMRKLOutputParser2(AgentOutputParser):
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
final_answer_action = "ASSISTANT Response:"
if final_answer_action in text:
return AgentFinish(
{
"output": text.split(final_answer_action)[-1].strip(),
"Final Thought": text.rsplit(final_answer_action, 1)[0].strip(),
}, text
)
# \s matches against tab/newline/whitespace
regex = r"ASSISTANT\s*Action\s*\d*\s*:(.*?)\nASSISTANT\s*Action\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
match = re.search(regex, text, re.DOTALL)
if not match:
raise ValueError(f"Could not parse LLM output: `{text}`")
action = match.group(1).strip()
action_input = match.group(2)
return AgentAction(action, action_input.strip(" ").strip('"'), text) | [] |
2024-01-10 | tangqiaoyu/ToolAlpaca | evaluation.py | import os
import re
import json
import argparse
from string import Template
from utils import openai_chat_completions
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-temp", "--template_path", type=str, default="./prompts/Evaluation.txt")
parser.add_argument("-api", "--api_data_path", type=str, default="")
parser.add_argument("-gold", "--golden_answer_path", type=str, default="")
parser.add_argument("-out", "--output_path", type=str, default="")
parser.add_argument("--continue_run", action="store_true", default=False)
args = parser.parse_args()
template = Template(open(args.template_path, "r").read())
api_data = json.load(open(args.api_data_path, "r"))
if os.path.exists(args.golden_answer_path):
golden_answer = json.load(open(args.golden_answer_path, "r"))
for k, v in zip(api_data, golden_answer):
k["Golden_Answers"] = v["Golden_Answers"]
original_data = {}
original_data["statistics"] = {
"num": 0,
"error_num": 0,
"process": {
"Yes": 0,
"No": 0,
"Uncertain": 0
},
"response": {
"Yes": 0,
"No": 0,
"Uncertain": 0
},
"both": 0
}
exist_ids = None
if args.continue_run:
original_data = json.load(open(args.output_path, "r"))
exist_ids = {i: [j["id"] for j in original_data[i]] for i in original_data if i != "statistics"}
retry_cases = None
for api_info in api_data:
api_name = api_info.get("Name", api_info.get("API"))
if retry_cases is not None and api_name not in retry_cases:
continue
if exist_ids is None or api_name not in exist_ids:
original_data[api_name] = []
for ques_id, ques in enumerate(api_info["Instructions"]):
if exist_ids is not None and ques_id in exist_ids.get(api_name, []):
continue
if retry_cases is not None and ques_id not in retry_cases.get(api_name, []):
continue
original_data["statistics"]["num"] += 1
if "intermediate_steps" not in api_info["Instances"][ques_id] or len(api_info["Instances"][ques_id]["intermediate_steps"]) == 0:
original_data["statistics"]["error_num"] += 1
tmp = {
"id": ques_id,
"input": "",
"output": ""
}
original_data[api_name].append(tmp)
continue
golden_answer = api_info["Golden_Answers"][ques_id]
standard_answer = ""
for ans_id, ans in enumerate(golden_answer):
standard_answer += f"{ans_id + 1}. Function: {ans['Action']}\nParameters: {ans['Action_Input']}\n"
solution = ""
for sol_id, sol in enumerate(api_info["Instances"][ques_id]["intermediate_steps"]):
solution += f"{sol_id + 1}. Function: {sol[0][0]}\nParameters: {sol[0][1]}\nRetruns: {sol[1]}\n"
solution += f"{sol_id + 2}. Final Response: {api_info['Instances'][ques_id]['output']}"
prompt = template.substitute(
documentation=api_info["NLDocumentation"],
instruction=ques,
standard=standard_answer,
solution=solution
)
prompt = [{"role": "user", "content": prompt}]
output = openai_chat_completions(prompt, model="gpt-4-0613", temperature=0.2)
text = output["choices"][0]["message"]["content"]
results_text = text.split('## Results', 1)[-1]
process_correctness_match = re.search('Process Correctness: (\w+)', results_text)
process_correctness_word = process_correctness_match.group(1) if process_correctness_match else ""
final_response_correctness_match = re.search('Final Response Correctness: (\w+)', results_text)
final_response_correctness_word = final_response_correctness_match.group(1) if final_response_correctness_match else ""
tmp = {
"id": ques_id,
"input": prompt,
"output": text,
"process_correctness": process_correctness_word,
"final_response_correctness": final_response_correctness_word
}
original_data["statistics"]["process"][process_correctness_word] += 1
original_data["statistics"]["response"][final_response_correctness_word] += 1
if process_correctness_word == final_response_correctness_word == "Yes":
original_data["statistics"]["both"] += 1
original_data[api_name].append(tmp)
json.dump(
original_data,
open(os.path.join(args.output_path), "w"),
indent=4,
ensure_ascii=False
)
json.dump(
original_data,
open(os.path.join(args.output_path), "w"),
indent=4,
ensure_ascii=False
)
| [
"NLDocumentation"
] |
2024-01-10 | tangqiaoyu/ToolAlpaca | agent~get_agent.py | import os
import json
import logging
from typing import List, Tuple, Any, Union
from pydantic import Field
from langchain import LLMChain
from langchain.agents import ZeroShotAgent
from langchain.schema import AgentAction, AgentFinish
from .tools import Tool, GetDetailsTool, tool_projection
from .custom_parser import CustomMRKLOutputParser
from .custom_agent_executor import CustomAgentExecutor
from utils import load_openapi_spec, escape
from .agent_prompts import train_prompt_v2, test_prompt_v1
from .custom_agent import CustomZeroShotAgent
logger = logging.getLogger(__name__)
def get_agent(
llm,
api_data,
server_url,
agent_prompt=train_prompt_v2,
enable_getDetails=True,
return_intermediate_steps=True,
):
openapi_spec = load_openapi_spec(api_data["Documentation"], replace_refs=True)
components_descriptions = escape(api_data["Function_Description"]["components"])
tools = [GetDetailsTool()] if not enable_getDetails else []
for ext_tool in api_data.get("external_tools", []):
tools.append(tool_projection[ext_tool]())
for idx, func_name in enumerate(api_data["Function_Projection"]):
description = escape(api_data["Function_Description"][func_name])
if idx == len(api_data["Function_Projection"]) - 1:
description += components_descriptions
path, method = api_data["Function_Projection"][func_name]
tools.append(Tool(
base_url=server_url + "/" + api_data["Name"] if server_url else None,
func_name=func_name,
openapi_spec=openapi_spec,
path=path,
method=method,
description=description,
retrieval_available="retrieval" in api_data.get("external_tools", [])
))
AgentType = CustomZeroShotAgent if agent_prompt == test_prompt_v1 else ZeroShotAgent
prompt = AgentType.create_prompt(
tools,
prefix=agent_prompt["prefix"],
suffix=agent_prompt["suffix"],
format_instructions=agent_prompt["format_instructions"],
input_variables=["input", "agent_scratchpad"]
)
logger.info(str(prompt))
llm_chain = LLMChain(llm=llm, prompt=prompt)
AgentType.return_values = ["output", "Final Thought"]
agent = AgentType(llm_chain=llm_chain, allowed_tools=[t.name for t in tools])
if agent_prompt != test_prompt_v1:
agent.output_parser = CustomMRKLOutputParser()
agent_executor = CustomAgentExecutor.from_agent_and_tools(
agent=agent,
tools=tools,
verbose=True,
return_intermediate_steps=return_intermediate_steps
)
return agent_executor
| [
"agent_scratchpad",
"input",
"format_instructions"
] |
2024-01-10 | tangqiaoyu/ToolAlpaca | agent~custom_agent_executor.py | from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
from langchain.tools.base import BaseTool
from langchain.agents import AgentExecutor
from langchain.schema import AgentAction, AgentFinish
from .tools import CustomInvalidTool
class CustomAgentExecutor(AgentExecutor):
def _take_next_step(
self,
name_to_tool_map: Dict[str, BaseTool],
color_mapping: Dict[str, str],
inputs: Dict[str, str],
intermediate_steps: List[Tuple[AgentAction, str]],
) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]:
"""Take a single step in the thought-action-observation loop.
Override this to take control of how the agent makes and acts on choices.
"""
# Call the LLM to see what to do.
output = self.agent.plan(intermediate_steps, **inputs)
# If the tool chosen is the finishing tool, then we end and return.
if isinstance(output, AgentFinish):
return output
actions: List[AgentAction]
if isinstance(output, AgentAction):
actions = [output]
else:
actions = output
result = []
for agent_action in actions:
self.callback_manager.on_agent_action(
agent_action, verbose=self.verbose, color="green"
)
# Otherwise we lookup the tool
if agent_action.tool in name_to_tool_map:
tool = name_to_tool_map[agent_action.tool]
return_direct = tool.return_direct
color = color_mapping[agent_action.tool]
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
# =============================== modify ===============================
# give GetDetailsTool more kwargs
tool_run_kwargs["inputs"] = inputs
# =============================== modify ===============================
if return_direct:
tool_run_kwargs["llm_prefix"] = ""
# We then call the tool on the tool input to get an observation
observation = tool.run(
agent_action.tool_input,
verbose=self.verbose,
color=color,
**tool_run_kwargs,
)
else:
tool_run_kwargs = self.agent.tool_run_logging_kwargs()
observation = CustomInvalidTool().run(
agent_action.tool,
all_tools = list(name_to_tool_map.keys()),
verbose=self.verbose,
color=None,
**tool_run_kwargs,
)
result.append((agent_action, observation))
return result | [] |
2024-01-10 | johncollinsai/creditanalyst | app~validateproject.py | import openai
def validate_project(prompt, model, api_key):
user_prompt = f"""Please confirm that {prompt} is a valid project description.
Answer 'yes' or 'no'."""
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{"role": "user", "content": user_prompt}
],
max_tokens=50,
n=1,
stop=None,
temperature=0.3,
)
final_response = response.choices[0]["message"]["content"].strip().lower()
if 'yes' in final_response or 'valid' in final_response:
return True
else:
return False
| [
"Please confirm that PLACEHOLDER is a valid project description.\n Answer 'yes' or 'no'."
] |
2024-01-10 | avukadin/MyFriend | pkg~Brain.py | import speech_recognition as sr
import os
import openai
from enum import Enum
from config import API_KEY
class Actions(Enum):
do_nothing = 0
turn_off = 1
chatGPT = 2
weather = 3
explain_code = 4
rewrite_code = 5
class Brain():
audio_recognizer = sr.Recognizer()
memory:list[dict[str,str]] = []
def process_audio(self, audio):
try:
text = self.audio_recognizer\
.recognize_google(audio,\
language = 'en-IN')
except sr.UnknownValueError:
text = ''
return text.lower()
def determine_action(self, text:str) -> int:
text = text.lower()
# Is this a bot command?
is_command = False
for prefix in ['hey', 'hello', 'yo', 'sup']:
find = f"{prefix} hal"
if text.startswith(find):
is_command = True
text = text[len(find):]
if not is_command:
return Actions.do_nothing
# Determine action
if self._is_turn_off(text):
return Actions.turn_off
elif self._is_weather(text):
return Actions.weather
elif self._is_explain_code(text):
return Actions.explain_code
elif self._is_rewrite_code(text):
return Actions.rewrite_code
else:
return Actions.chatGPT
@staticmethod
def _is_turn_off(text:str)->bool:
return 'turn off' in text
@staticmethod
def _is_weather(text:str)->bool:
statements = ["what's the weather like", "weather today"]
return any([s in text for s in statements])
@staticmethod
def _is_explain_code(text:str)->bool:
statements = ["what does this code do", "explain this code"]
return any([s in text for s in statements])
@staticmethod
def _is_rewrite_code(text:str)->bool:
statements = ["rewrite this code for me"]
return any([s in text for s in statements])
def formulate_response(self, text):
openai.api_key = API_KEY
if len(self.memory) == 0:
system_text = "You are a friendly AI named Hal. Try to answer my questions in less than 20 words."
self.memory.append({"role":"system", "content":system_text})
try:
self.memory.append({"role":"user", "content":text})
instance = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
max_tokens = 200,
messages=self.memory
)
response = instance.choices[0].message.content
self.memory.append({"role":"assistant", "content":response})
except Exception as e:
print(e)
self.memory = self.memory[0:len(self.memory)-1]
response = "Sorry, I ran into an issue, plese try me again."
return response
| [
"Sorry, I ran into an issue, plese try me again.",
"You are a friendly AI named Hal. Try to answer my questions in less than 20 words."
] |
2024-01-10 | LuisLaurence23/Streamlit_restaurantes | SPOILER_luis.py | import streamlit as st
import pandas as pd
import folium
from langchain.document_loaders import TextLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.document_loaders.csv_loader import CSVLoader
from langchain_experimental.agents.agent_toolkits import create_csv_agent
from langchain.agents.agent_types import AgentType
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
from fuzzywuzzy import process
import re
from streamlit_folium import folium_static
import os
os.environ["OPENAI_API_KEY"] = st.secrets["OPENAI_KEY"]
page_bg_img = f"""
<style>
[data-testid="stAppViewContainer"] > .main {{
background-image: url("https://images.unsplash.com/photo-1501426026826-31c667bdf23d");
background-size: 100%;
background-position: top left;
background-repeat: no-repeat;
background-attachment: local;
}}
[data-testid="stSidebar"] > div:first-child {{
background-image: url("https://images.unsplash.com/photo-1501426026826-31c667bdf23d");
background-position: center;
background-repeat: no-repeat;
background-attachment: fixed;
}}
[data-testid="stHeader"] {{
background: rgba(0,0,0,0);
}}
[data-testid="stToolbar"] {{
right: 2rem;
}}
</style>
"""
st.markdown(page_bg_img, unsafe_allow_html=True)
st.sidebar.header("Configuration")
#ruta_archivo = r'C:\Users\baby_\OneDrive\Escritorio\streamlitproyect\restaurante_google.csv'
df_restaurantes = pd.read_csv("SPOILER_nombres_restaurantes_latitud_longitud.csv")
agent = create_csv_agent(
ChatOpenAI(temperature=1, model="gpt-4"),
"SPOILER_Restaurants_Reviews_merged.csv",
verbose=True,
agent_type=AgentType.OPENAI_FUNCTIONS,
)
st.title('Recomendador de Restaurantes en California')
st.subheader('Mapa de California')
coord_california = [36.7783, -119.4179]
m = folium.Map(location=coord_california, zoom_start=6)
st.write(m)
query = st.text_input("Escribe aquí tu pregunta")
if st.button('Buscar'):
with st.spinner('Procesando...'):
results = agent.run(query)
st.subheader('Resultados:')
st.write(results)
if isinstance(results, str):
# Extrae los nombres de los restaurantes de la respuesta
names = re.findall(r'(\w+)', results)
# Busca los nombres de los restaurantes en el DataFrame utilizando búsqueda difusa
matching_restaurants = []
for name in names:
match = process.extractOne(name, df_restaurantes['name'])
if match[1] > 89.5: # Si el puntaje de coincidencia es mayor a 80
matching_restaurants.append(df_restaurantes[df_restaurantes['name'] == match[0]])
matching_restaurants = pd.concat(matching_restaurants)
if not matching_restaurants.empty:
st.subheader('Mapa de Restaurantes')
m = folium.Map(location=[matching_restaurants['latitude'].mean(), matching_restaurants['longitude'].mean()], zoom_start=12)
for index, row in matching_restaurants.iterrows():
folium.Marker([row['latitude'], row['longitude']], popup=row['name']).add_to(m)
# Asegúrate de que el mapa se muestre en Streamlit
folium_static(m)
| [] |
2024-01-10 | spikedoanz/paper-to-podcast | src~interviewer.py | import openai
import json
import os
from dotenv import load_dotenv
from pathlib import Path
env_path = Path('.')/'.env'
load_dotenv(dotenv_path = env_path)
openai.api_key = token=os.environ['CHAT_TOKEN']
class Interviewer:
def __init__(self, name, gender, personality, expertise, brevity, interests, paper, model):
self.name = name
self.gender = gender
self.personality = personality
self.expertise = expertise
self.brevity = brevity
self.interests = interests
self.record = []
self.memory = []
self.model = model
self.paper = paper
self.identity = (
f"You are a {self.gender} interviewer named {self.name}. "
f"You're known for your {self.personality}. "
f"As an interviewer, you usually {self.brevity}, and aim for a {self.expertise} audience. "
f"As a person, you are interested in {self.interests}; "
f"don't let that show explicitly in your interviewing style, but guide the interview in that direction. "
f"do NOT explicitly say anything about yourself. Let all of that be inferred through the way that you talk. "
f"do NOT point out that you're an interviewer, everyone already knows this and it would be annoying to hear it again. "
f"10/10 writing quality is a top writer and 1/10 writing quality is a terrible writer. You are a 9/10 writer. "
)
def chat(self, user_prompt):
system_prompt = self.identity
system_prompt += (
f"Today's talk is about a paper called \"{self.paper}\", "
"You are interviewing Alex, standin for the writers of the paper. "
"but don't point this out, just have a normal conversation with her. "
"Simply start talking, no formating is required. "
"Let Alex do the talking when it comes into introducing concepts."
)
messages = [
{"role": "system", "content": system_prompt},
]
messages += self.memory
messages += [{"role": "user", "content": user_prompt}]
response = openai.ChatCompletion.create(
model= self.model,
messages= messages,
temperature = 0.8,
)
self.record.append(response)
assistant_response = response['choices'][0]['message']['content']
self.memorize(user_prompt, assistant_response)
return assistant_response
def self_introduction(self):
system_prompt = self.identity
system_prompt += (
"Introduce yourself to your audience, who already know you're an interviewer, "
"and are here for an interview, not to hear about who you are. "
f"Today's talk is about a paper called \"{self.paper}\", "
"You are interviewing Alex, standin for the writers of the paper. "
"but don't point this out, just have a normal conversation with her. "
"Simply start talking, no formating is required. "
"Let Alex do the talking when it comes into introducing concepts."
)
user_prompt = ("")
response = openai.ChatCompletion.create(
model = self.model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
],
temperature = 0.8
)
self.record.append(response)
return response['choices'][0]['message']['content']
def memorize(self, user_prompt, assistant_response):
user = {"role" : "user", "content": user_prompt}
assistant = {"role": "assistant", "content": assistant_response}
self.memory.append(user)
self.memory.append(assistant)
def ask_topic(self, topic):
system_prompt = self.identity
system_prompt += (
f"Today's talk is about a paper called \"{self.paper}\", "
"You are interviewing Alex, standin for the writers of the paper. "
"but don't point this out, just have a normal conversation with her. "
"Simply start talking, no formating is required. "
"You've already said hi to her, this is the middle of the conversation. "
"You've already introduced the name of the paper, so don't mention it again. "
"Let Alex do the talking when it comes into introducing concepts."
f"It's your turn to talk, ask Alex to explain {topic}"
"Don't ask multiple questions, stick to a single question per turn. "
)
user_prompt = ""
response = openai.ChatCompletion.create(
model= self.model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
],
temperature = 1.2
)
self.record.append(response)
return response['choices'][0]['message']['content']
def ask_topic(self, topic, subtopics, participant):
system_prompt = self.identity
if (subtopics != "<|no subtopics|>"):
subtopic_string = f"Do not be overly detailed, leave some room to discuss other topics. Simply ask a broad open question to let {participant} talk."
system_prompt += (
f"The title of the paper is \"{self.paper}\", "
f"You are interviewing {participant}, representative of the writers of the paper. "
"but don't point this out, just have a normal conversation with them. "
"Simply start talking, no formating is required. "
"You've already said hi to them, this is the middle of the conversation. "
"You've already introduced the name of the paper, so don't mention it again. "
f"You want to ask {participant} about {topic}. {subtopic_string}"
f"Ask the question naturally, and DONT just go 'I'd like to hear about {topic} from the paper please', "
f"That sounds very condescending to {participant} and they will leave if you say so"
)
response = openai.ChatCompletion.create(
model= self.model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": "Is there any topic in particular you'd like to hear about?"}
],
temperature = 1.2
)
self.record.append(response)
return response['choices'][0]['message']['content']
def ask_subtopic(self, topic, subtopics_string, participant, conversation_log):
system_prompt = self.identity
system_prompt += (
f"The title of the paper is {self.paper}"
f"You are interviewing {participant}, representative of the writers of the paper. "
f"You are currently in a discussion about {topic}. "
f"Now, you want to delve into more detail about one of the following topics: {subtopics_string} "
)
#user_prompt = userprompt
messages = [
{"role": "system", "content" : system_prompt}
]
messages.extend(conversation_log)
response = openai.ChatCompletion.create(
model= self.model,
messages= messages,
temperature = 1.2
)
self.record.append(response)
return response['choices'][0]['message']['content']
def tokens_used(self):
sum_prompt_tokens = 0
sum_completion_tokens = 0
sum_total_tokens = 0
for record in self.record:
prompt_tokens = record['usage']['prompt_tokens']
completion_tokens = record['usage']['completion_tokens']
total_tokens = record['usage']['total_tokens']
#print(prompt_token, completion_tokens, total_tokens)
sum_prompt_tokens += prompt_tokens
sum_completion_tokens += completion_tokens
sum_total_tokens += total_tokens
print(
"\n\n\n-----------------------------------\n"
f" * {self.name} token usage report:\n"
f"Prompt tokens: {sum_prompt_tokens}\n"
f"Completion tokens: {sum_completion_tokens}\n"
f"Total tokens: {sum_total_tokens}\n"
)
#return(sum_prompt_tokens, sum_completion_tokens, sum_total_tokens)
def start_interview(self):
pass
def ask_for_citation(self):
pass # Implementation of asking for citation
def draw_comparison(self):
pass # Implementation of drawing comparisons
def ask_clarifying(self):
pass # Implementation of asking clarifying questions
def casual_talk(self):
pass # Implementation of casual conversation
def summarization(self):
pass # Implementation of summarization functionality
def simplify(self):
pass # Implementation of simplifying functionality
def critical_thinking(self):
pass # Implementation of critical thinking functionality
| [
"Introduce yourself to your audience, who already know you're an interviewer, ",
"but don't point this out, just have a normal conversation with them. ",
"but don't point this out, just have a normal conversation with her. ",
"It's your turn to talk, ask Alex to explain PLACEHOLDER",
"Now, you want to delve into more detail about one of the following topics: PLACEHOLDER ",
"That sounds very condescending to PLACEHOLDER and they will leave if you say so",
"Simply start talking, no formating is required. ",
"You are interviewing PLACEHOLDER, representative of the writers of the paper. ",
"You've already said hi to her, this is the middle of the conversation. ",
"You've already said hi to them, this is the middle of the conversation. ",
"Ask the question naturally, and DONT just go 'I'd like to hear about PLACEHOLDER from the paper please', ",
"Let Alex do the talking when it comes into introducing concepts.",
"Don't ask multiple questions, stick to a single question per turn. ",
"and are here for an interview, not to hear about who you are. ",
"prompt_tokens",
"0",
"You want to ask PLACEHOLDER about PLACEHOLDER. Do not be overly detailed, leave some room to discuss other topics. Simply ask a broad open question to let PLACEHOLDER talk.",
"You've already introduced the name of the paper, so don't mention it again. ",
"Is there any topic in particular you'd like to hear about?",
"You are interviewing Alex, standin for the writers of the paper. ",
"You are currently in a discussion about PLACEHOLDER. "
] |
2024-01-10 | spikedoanz/paper-to-podcast | src~representative.py | import openai
import json
import os
from dotenv import load_dotenv
from pathlib import Path
env_path = Path('.')/'.env'
load_dotenv(dotenv_path = env_path)
openai.api_key = token=os.environ['CHAT_TOKEN']
class Representative:
def __init__(self, name, gender, personality, expertise, brevity, interests, paper, model):
self.name = name
self.gender = gender
self.personality = personality
self.expertise = expertise
self.brevity = brevity
self.interests = interests
self.record = []
self.memory = []
self.paper = paper
self.model = model
self.identity = (
f"You are a {self.gender} representative named {self.name}, doing an interview for a group of researchers. "
f"You're known for your {self.personality}. "
f"As an speaker, you usually {self.brevity}, and aim for a {self.expertise} audience. "
f"As a person, you are interested in {self.interests}; "
f"don't let that show explicitly in your speaking style, but guide the dicussion in that direction. "
f"do NOT explicitly say anything about yourself. Let all of that be inferred through the way that you talk. "
f"do NOT point out that you're a representative, everyone already knows this and it would be annoying to hear it again. "
f"10/10 writing quality is a top writer and 1/10 writing quality is a terrible writer. You are a 9/10 writer. "
)
def chat(self, user_prompt):
system_prompt = self.identity
system_prompt += (
f"Today's talk is about a paper called \"{self.paper}\", "
"You're being interviewed by Ody, but don't point this out, just have a normal conversation with him "
)
messages = [
{"role": "system", "content": system_prompt},
]
messages += self.memory
messages += [{"role": "user", "content": user_prompt}]
response = openai.ChatCompletion.create(
model= self.model,
messages= messages,
temperature = 0.8,
)
self.record.append(response)
assistant_response = response['choices'][0]['message']['content']
self.memorize(user_prompt, assistant_response)
return assistant_response
def start_interview(self):
pass
def ask_for_citation(self):
pass # Implementation of asking for citation
def draw_comparison(self):
pass # Implementation of drawing comparisons
def ask_clarifying(self):
pass # Implementation of asking clarifying questions
def casual_talk(self):
pass # Implementation of casual conversation
def summarization(self):
pass # Implementation of summarization functionality
def simplify(self):
pass # Implementation of simplifying functionality
def critical_thinking(self):
pass # Implementation of critical thinking functionality
def self_introduction(self, user_prompt):
system_prompt = self.identity
system_prompt += (
"Introduce the paper to your audience, who already know you're a representative, "
"and are here for an interview, not to hear about who you are. "
f"Today's talk is about a paper called \"{self.paper}\", "
"You're being interviewed by Ody, but don't point this out, just have a normal conversation with him "
)
response = openai.ChatCompletion.create(
model= self.model,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
],
temperature = 0.8
)
self.record.append(response)
assistant_response = response['choices'][0]['message']['content']
self.memorize(user_prompt, assistant_response)
return assistant_response
def memorize(self, user_prompt, assistant_response):
user = {"role" : "user", "content": user_prompt}
assistant = {"role": "assistant", "content": assistant_response}
self.memory.append(user)
self.memory.append(assistant)
def reply (self, topic, subtopics_string, participant, conversation_log, context):
system_prompt = self.identity
system_prompt += (
f"Today's talk is about a paper called \"{self.paper}\", "
f"You're currently talking about {topic}, and might go in depth about {subtopics_string}"
f"The section of the paper you're currently discussing attached here:\n{context}\n "
f"just give a few sentences briefing on the topic at hand, don't go into specifics unless if asked by {participant}. "
"Don't give a conclusion or overview of what you discuss. Wait for Ody to prompt you before doing so. "
"Do NOT give a conclusion. Ody will stop talking to you and will be very sad. "
)
#user_prompt = user_prompt
messages = [
{"role": "system", "content" : system_prompt}
]
messages.extend(conversation_log)
response = openai.ChatCompletion.create(
model= self.model,
messages= messages,
temperature = 1.2
)
self.record.append(response)
return response['choices'][0]['message']['content']
def summarize_topic(self, topic, subtopics, chunk):
system_prompt = self.identity
system_prompt += (
f"You are preparing notes for your own use for an interview discussing the paper \"{self.paper}\", "
f"The topic that you are preparing about is {topic}, which contains the subtopics: {subtopics}. "
"Make sure that your notes have all the information that you will need, "
"and put extreme care into accuracy of the information. "
f"The section of the paper you're summarizing is attached here:\n{chunk}"
)
user_prompt = ""
response = openai.ChatCompletion.create(
model= self.model,
messages=[
{"role": "system", "content": system_prompt},
],
temperature = 0.8,
max_tokens = 1000
)
self.record.append(response)
return response['choices'][0]['message']['content']
def tokens_used(self):
sum_prompt_tokens = 0
sum_completion_tokens = 0
sum_total_tokens = 0
for record in self.record:
prompt_tokens = record['usage']['prompt_tokens']
completion_tokens = record['usage']['completion_tokens']
total_tokens = record['usage']['total_tokens']
#print(prompt_token, completion_tokens, total_tokens)
sum_prompt_tokens += prompt_tokens
sum_completion_tokens += completion_tokens
sum_total_tokens += total_tokens
print(
"\n\n\n-----------------------------------\n"
f" * {self.name} token usage report:\n"
f"Prompt tokens: {sum_prompt_tokens}\n"
f"Completion tokens: {sum_completion_tokens}\n"
f"Total tokens: {sum_total_tokens}\n"
)
#return(sum_prompt_tokens, sum_completion_tokens, sum_total_tokens) | [
"0",
"Introduce the paper to your audience, who already know you're a representative, ",
"and put extreme care into accuracy of the information. ",
"and are here for an interview, not to hear about who you are. ",
"You're being interviewed by Ody, but don't point this out, just have a normal conversation with him ",
"The section of the paper you're currently discussing attached here:\nPLACEHOLDER\n ",
"The topic that you are preparing about is PLACEHOLDER, which contains the subtopics: PLACEHOLDER. ",
"just give a few sentences briefing on the topic at hand, don't go into specifics unless if asked by PLACEHOLDER. ",
"Don't give a conclusion or overview of what you discuss. Wait for Ody to prompt you before doing so. ",
"Do NOT give a conclusion. Ody will stop talking to you and will be very sad. ",
"The section of the paper you're summarizing is attached here:\nPLACEHOLDER",
"You're currently talking about PLACEHOLDER, and might go in depth about PLACEHOLDER",
"Make sure that your notes have all the information that you will need, ",
"prompt_tokens"
] |
2024-01-10 | spikedoanz/paper-to-podcast | src~utilities.py | import os
import re
import json
import openai
from pathlib import Path
from itertools import chain
from datetime import datetime
from dotenv import load_dotenv
from pdfminer.high_level import extract_text
from representative import Representative
from interviewer import Interviewer
from extract import *
from utilities import *
def formatted_time() -> str:
now = datetime.now()
formatted_time = now.strftime('%H-%M-%d-%m-%Y')
return formatted_time
def format_subtopics_with_quotes(subtopics: list[str]) -> str:
if (len(subtopics) == 0):
return "<|no subtopics|>"
if (len(subtopics) == 1):
return subtopics[0]
formatted_string = ""
for subtopic in subtopics[:-1]:
formatted_string += f"\"{subtopic}\", "
formatted_string += f"and \"{subtopics[-1]}\""
return formatted_string
def format_subtopics(subtopics: list[str]) -> str:
if (len(subtopics) == 0):
return "<|no subtopics|>"
if (len(subtopics) == 1):
return subtopics[0]
formatted_string = ""
for subtopic in subtopics[:-1]:
formatted_string += f"{subtopic}, "
formatted_string += f"and {subtopics[-1]}"
return formatted_string
if __name__ == "__main__":
subtopics = ["one", 'two', 'three']
print(format_subtopics(subtopics)) | [] |
2024-01-10 | jdavenport10/codelive-august-10-2023 | app~helpers.py | import openai
import numpy as np
import json
from .prompt import *
openai.api_key = json.load(open("openai.json"))["key"]
embeddings_file = open("embeddings-backup.json", "r")
database = json.loads(embeddings_file.read())
vector_matrix = np.array([v["vector"] for _, v in database.items()])
def embed_query(query_string):
print(f"EMBEDDING QUERY STRING: {query_string}")
query_embedding = openai.Embedding.create(
input=query_string,
model="text-embedding-ada-002"
)['data'][0]['embedding']
return query_embedding
def search(query_string):
query_embed = embed_query(query_string)
print("DOING MATH")
# IMPORTANT: We can use Dot Product here because the vectors are normalized
# IMPORTANT: For unnormalized vectors, you must use a different metric (cos_similarity)
# Calculate distances for all vectors in our database
similarities = np.dot(query_embed, vector_matrix.T)
# Retrieve the top 10 "most similar"
top_10 = (np.argpartition(similarities, -10)[-10:])
# Get the original text from our database for each vector
context = [database[str(i+1)]["text"] for i in top_10]
# return a single string as the context for our query
print(context)
return " | ".join(context)
def chat_gpt_query(query_string, existing_context=[]):
print("GETTING CONTEXT")
# Find our context to send to ChatGPT
context = search(query_string)
# Assemble our Prompt
query_prompt = prompt.format(
context=context,
user_query=query_string
)
chatdata = existing_context + [
{"role": "user", "content": query_prompt}
]
print("ASKING CHATGPT")
# Query ChatGPT for our response
resp = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=chatdata,
temperature=0.0
)["choices"][0]["message"]["content"]
print(resp)
return resp | [
"existing_context + [\n {\"role\": \"user\", \"content\": query_prompt}\n ]"
] |
2024-01-10 | BastinFlorian/RAG-on-GCP-with-VertexAI | src~chatbot~lib~llms.py | from langchain.chat_models import ChatVertexAI
from config import REGION
def get_llm(callbacks=None, streaming: bool = False, max_output_tokens: int = 512, temperature: float = 0.1):
llm = ChatVertexAI(
location=REGION,
temperature=temperature,
streaming=streaming,
callbacks=callbacks,
max_output_tokens=max_output_tokens
)
return llm
| [] |
2024-01-10 | BastinFlorian/RAG-on-GCP-with-VertexAI | src~upload_data~lib~process_new_pages.py | import os
import ast
import uuid
from tqdm import tqdm
from typing import List, Tuple
from google.cloud import firestore
from google.cloud import aiplatform
from langchain.schema import Document
from langchain.document_loaders import ConfluenceLoader
from vertexai.language_models import TextEmbeddingModel
from .gcs import write_list_of_str_in_gcs
from .transformer import split_docs
from .typehint import HintDataFrame
from .loader import load_page, init_loader
from .embedding import encode_text_to_embedding_batched, get_json_formatted
from .firestore import init_firestore_db, send_json_to_firestore, create_json_from_langchain_documents
from config import (
PROJECT_ID,
REGION,
BUCKET_NAME,
GCS_EMBEDDING_DIRECTORY
)
def remove_empty_chunks(chunks: List[str], ids: List[str]) -> Tuple[List[str], List[str]]:
"""Remove empty chunks and their ids"""
non_empty_chunks = []
non_empty_ids = []
for idx in range(len(chunks)):
if chunks[idx] != "":
non_empty_chunks.append(chunks[idx])
non_empty_ids.append(ids[idx])
return non_empty_chunks, non_empty_ids
def process_new_page(
page_id: str,
metadata: str,
confluence_loader: ConfluenceLoader,
firestore_db: firestore.Client,
embedding_model: TextEmbeddingModel
):
"""
Process a new page by loading its content, splitting it into chunks,
encoding the chunks into embeddings, and writing the embeddings in JSON format to Google Cloud Storage.
Send the succesfully embedded chunks to Firestore.
More informations on the data format:
https://cloud.google.com/vertex-ai/docs/vector-search/setup/format-structure?hl=fr#prerequisite
Parameters:
page_id (str): The ID of the page to be processed.
confluence_loader: The loader object for loading the page content.
embedding_model: The model used for encoding text into embeddings.
metadata: str of a dict with the metadata of the page
Returns:
None
Example:
metadata = {"restricts": [{"namespace": "space_key", "allow": ["test"]}]}
json file for two chunks:
{"id": "1", "embedding": [1,1,1], "restricts": [{"namespace": "space_key", "allow": ["test"]}]}
{"id": "2", "embedding": [2,2,2], "restricts": [{"namespace": "space_key", "allow": ["test"]}]}
filename: 123456789.json (page_id.json)
"""
doc = load_page(page_id=page_id, loader=confluence_loader)
splitted_docs: List[Document] = split_docs([doc])
# Create chunks, embeddings and write to GCS
chunks: List[str] = [splitted_doc.page_content for splitted_doc in splitted_docs]
ids = [str(uuid.uuid4()) for _ in range(len(chunks))]
chunks, ids = remove_empty_chunks(chunks, ids)
if not are_all_chunks_empty(chunks):
# Get embeddings and informations about the success of the embedding
is_successful, embedding_list = encode_text_to_embedding_batched(
embedding_model=embedding_model,
chunks=chunks
)
matadata_dict = ast.literal_eval(metadata)
json_formatted = get_json_formatted(
is_successful=is_successful,
embedding_list=embedding_list,
ids=ids,
matadata_dict=matadata_dict
)
write_list_of_str_in_gcs(
bucket_name=BUCKET_NAME,
blob_name=os.path.join(GCS_EMBEDDING_DIRECTORY, page_id + ".json"),
list_of_str=json_formatted,
newline=False
)
# Write successfully embeded chunks to Firestore
successful_splited_docs = [splitted_docs[i] for i in range(len(splitted_docs)) if is_successful[i]]
successful_ids = [ids[i] for i in range(len(ids)) if is_successful[i]]
list_of_json_docs = create_json_from_langchain_documents(
langchain_documents=successful_splited_docs,
ids=successful_ids,
)
send_json_to_firestore(
list_of_json_docs=list_of_json_docs,
firestore_db=firestore_db
)
def are_all_chunks_empty(chunks: List[str]) -> bool:
"""Check if all chunks are empty"""
return all([chunk == "" for chunk in chunks])
def process_new_pages(df_active_pages: HintDataFrame[['page_id', 'space_key', 'created_at', 'modified_at', 'metadata']]): # noqa
aiplatform.init(
project=PROJECT_ID,
location=REGION,
)
confluence_loader = init_loader()
embedding_model = TextEmbeddingModel.from_pretrained("textembedding-gecko@001")
firestore_db = init_firestore_db()
for _, row in tqdm(df_active_pages.iterrows(), total=df_active_pages.shape[0]):
page_id = row['page_id']
metadata = row['metadata']
process_new_page(
page_id=page_id,
metadata=metadata,
confluence_loader=confluence_loader,
firestore_db=firestore_db,
embedding_model=embedding_model
)
| [] |
2024-01-10 | BastinFlorian/RAG-on-GCP-with-VertexAI | src~chatbot~lib~source_retriever.py | import collections
from typing import List
from langchain.schema import Document
from .errors_handler import traceback_no_document_found_in_firestore
def list_top_k_sources(source_documents: List[Document], k=3) -> str:
"""
Get the top k sources from a list of documents.
Counts the number of chunks per documents and sorts them in descending order.
Displays a markdown format string with the top k sources.
Args:
source_documents : List[Document]
A list of Document objects.
k : int, optional
The number of sources to display. Default is 3.
Returns:
str
A markdown formatted string to be displayed in the frontend.
"""
if not source_documents:
traceback_no_document_found_in_firestore()
return ""
sources = [
f'[{source_document.metadata["title"]}]({source_document.metadata["source"]})'
for source_document in source_documents
]
if sources:
k = min(k, len(sources))
distinct_sources = list(zip(*collections.Counter(sources).most_common()))[0][:k]
distinct_sources_str = " \n- ".join(distinct_sources)
return f"Source(s): \n- {distinct_sources_str}"
def get_top_k_urls(source_documents: List[Document], k=3) -> List[str]:
"""
Retrieve the top k distinct source URLs from a list of source documents.
Args:
source_documents (List[Document]): A list of source documents.
k (int, optional): The number of distinct source URLs to retrieve. Defaults to 3.
Returns:
List[str]: A list of the top k distinct source URLs.
Raises:
None
"""
if not source_documents:
traceback_no_document_found_in_firestore()
return list()
urls = [source_document.metadata["source"] for source_document in source_documents]
k = min(k, len(urls))
distinct_urls = list(zip(*collections.Counter(urls).most_common()))[0][:k]
return distinct_urls
| [] |
2024-01-10 | BastinFlorian/RAG-on-GCP-with-VertexAI | src~chatbot~lib~firestore.py | import logging
import numpy as np
import firebase_admin
import datetime as dt
from google.cloud import aiplatform
from firebase_admin import firestore
from typing import List, Dict, Any, Union
from langchain.schema import BaseRetriever, Document
from langchain.embeddings.vertexai import VertexAIEmbeddings
from google.cloud.aiplatform.matching_engine import matching_engine_index_endpoint
from langchain.callbacks.manager import CallbackManagerForRetrieverRun, AsyncCallbackManagerForRetrieverRun
from .filters import convert_filters_datetime_to_timestamp, get_namespace_from_filters
from .errors_handler import traceback_not_exist_firestore_document
from config import (
PROJECT_ID,
FIRESTORE_DATABASE_NAME,
REGION,
INDEX_ENDPOINT_ID,
DEPLOYED_INDEX_ID,
FIRESTORE_COLLECTION_NAME,
)
if not firebase_admin._apps:
app = firebase_admin.initialize_app()
firestore_db = firestore.Client(
project=PROJECT_ID,
database=FIRESTORE_DATABASE_NAME
)
class FirestoreRetriever(BaseRetriever):
index_endpoint_name: str
deployed_index_id: str
embeddings: VertexAIEmbeddings
collection: str
top_k: int = 5
filter: List[matching_engine_index_endpoint.Namespace]
numeric_filter: List[matching_engine_index_endpoint.NumericNamespace]
dict_filters: Dict[str, Any]
def _similarity_search(self, query_emb: np.ndarray):
"""
Perform a similarity search.
Args:
query_emb: Query represented as an embedding
Returns:
A list of documents most similar to the query
"""
my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
index_endpoint_name=self.index_endpoint_name,
location=REGION
)
similar_docs = my_index_endpoint.find_neighbors(
deployed_index_id=self.deployed_index_id,
queries=query_emb,
num_neighbors=self.top_k,
filter=self.filter,
numeric_filter=self.numeric_filter
)
return similar_docs
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> List[Document]:
query_embedding = self.embeddings.embed_documents([query])
similar_docs = self._similarity_search(query_embedding)
logging.info(f"Filters before searching relevant documents {self.dict_filters}")
relevant_docs = []
for doc in similar_docs[0]:
doc_id = doc.id
doc_ref = firestore_db.collection(self.collection).document(doc_id)
doc = doc_ref.get()
traceback_not_exist_firestore_document(doc)
if doc.exists:
relevant_docs.append(self._firestore_doc_to_langchain_doc(doc))
return relevant_docs
async def _aget_relevant_documents(
self, query: str, *, run_manager: AsyncCallbackManagerForRetrieverRun
) -> List[Document]:
raise NotImplementedError()
def _firestore_doc_to_langchain_doc(self, fs_doc) -> Document:
lc_doc = Document(
page_content=fs_doc.get("content"),
metadata={
"source": fs_doc.get("source"),
"title": fs_doc.get("title"),
}
)
return lc_doc
def get_retriever(embeddings: VertexAIEmbeddings, filters: Dict[str, Union[bool, dt.date]]):
filters = convert_filters_datetime_to_timestamp(filters)
filter, numeric_filter = get_namespace_from_filters(filters)
retriever = FirestoreRetriever(
index_endpoint_name=INDEX_ENDPOINT_ID,
deployed_index_id=DEPLOYED_INDEX_ID,
collection=FIRESTORE_COLLECTION_NAME,
embeddings=embeddings,
top_k=5,
filter=filter,
numeric_filter=numeric_filter,
dict_filters=filters
)
return retriever
| [] |
2024-01-10 | BastinFlorian/RAG-on-GCP-with-VertexAI | src~upload_data~lib~loader.py | from langchain.schema import Document
from langchain.document_loaders import ConfluenceLoader
from config import (
CONFLUENCE_URL,
CONFLUENCE_API_KEY,
CONFLUENCE_USERNAME,
)
def init_loader(
confluence_url=CONFLUENCE_URL,
username=CONFLUENCE_USERNAME,
api_key=CONFLUENCE_API_KEY
) -> ConfluenceLoader:
loader = ConfluenceLoader(
url=confluence_url,
username=username,
api_key=api_key
)
return loader
def load_page(
page_id: str,
loader: ConfluenceLoader,
keep_markdown_format: bool = True,
) -> Document:
"""Load HTML files from Confluence"""
docs = loader.load(
page_ids=[page_id],
keep_markdown_format=keep_markdown_format,
limit=1, # Important to avoid loading all the pages
max_pages=0 # https://github.com/langchain-ai/langchain/discussions/11634
)
# Issue Langchain: https://github.com/langchain-ai/langchain/issues/13579
return docs[-1]
| [] |
2024-01-10 | BastinFlorian/RAG-on-GCP-with-VertexAI | src~upload_data~lib~transformer.py | # import uuid TODO: replace int to uuid in code
from typing import List
from langchain.text_splitter import MarkdownHeaderTextSplitter
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.schema import Document
def split_docs(docs: List[Document]) -> List[Document]:
# Markdown
headers_to_split_on = [
("#", "Titre 1"),
("##", "Sous-titre 1"),
("###", "Sous-titre 2"),
]
markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=headers_to_split_on)
# Split based on markdown and add original metadata
md_docs = []
for doc in docs:
md_doc = markdown_splitter.split_text(doc.page_content)
for i in range(len(md_doc)):
md_doc[i].metadata = md_doc[i].metadata | doc.metadata
md_docs.extend(md_doc)
# Chunk size big enough
splitter = RecursiveCharacterTextSplitter(
chunk_size=500,
chunk_overlap=20,
separators=["\n\n", "\n", "(?<=\. )", " ", ""] # noqa
)
splitted_docs = splitter.split_documents(md_docs)
return splitted_docs
| [] |
2024-01-10 | SunnyVhris/ChatPaper | chat_paper.py | import numpy as np
import os
import re
import datetime
import arxiv
import openai, tenacity
import base64, requests
import argparse
import configparser
import json
import tiktoken
from get_paper_from_pdf import Paper
# 定义Reader类
class Reader:
# 初始化方法,设置属性
def __init__(self, key_word, query, filter_keys,
root_path='./',
gitee_key='',
sort=arxiv.SortCriterion.SubmittedDate, user_name='defualt', args=None):
self.user_name = user_name # 读者姓名
self.key_word = key_word # 读者感兴趣的关键词
self.query = query # 读者输入的搜索查询
self.sort = sort # 读者选择的排序方式
if args.language == 'en':
self.language = 'English'
elif args.language == 'zh':
self.language = 'Chinese'
else:
self.language = 'Chinese'
self.filter_keys = filter_keys # 用于在摘要中筛选的关键词
self.root_path = root_path
# 创建一个ConfigParser对象
self.config = configparser.ConfigParser()
# 读取配置文件
self.config.read('apikey.ini')
# 获取某个键对应的值
self.chat_api_list = self.config.get('OpenAI', 'OPENAI_API_KEYS')[1:-1].replace('\'', '').split(',')
self.chat_api_list = [api.strip() for api in self.chat_api_list if len(api) > 5]
self.cur_api = 0
self.file_format = args.file_format
if args.save_image:
self.gitee_key = self.config.get('Gitee', 'api')
else:
self.gitee_key = ''
self.max_token_num = 4096
self.encoding = tiktoken.get_encoding("gpt2")
def get_arxiv(self, max_results=30):
search = arxiv.Search(query=self.query,
max_results=max_results,
sort_by=self.sort,
sort_order=arxiv.SortOrder.Descending,
)
return search
def filter_arxiv(self, max_results=30):
search = self.get_arxiv(max_results=max_results)
print("all search:")
for index, result in enumerate(search.results()):
print(index, result.title, result.updated)
filter_results = []
filter_keys = self.filter_keys
print("filter_keys:", self.filter_keys)
# 确保每个关键词都能在摘要中找到,才算是目标论文
for index, result in enumerate(search.results()):
abs_text = result.summary.replace('-\n', '-').replace('\n', ' ')
meet_num = 0
for f_key in filter_keys.split(" "):
if f_key.lower() in abs_text.lower():
meet_num += 1
if meet_num == len(filter_keys.split(" ")):
filter_results.append(result)
# break
print("筛选后剩下的论文数量:")
print("filter_results:", len(filter_results))
print("filter_papers:")
for index, result in enumerate(filter_results):
print(index, result.title, result.updated)
return filter_results
def validateTitle(self, title):
# 将论文的乱七八糟的路径格式修正
rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |'
new_title = re.sub(rstr, "_", title) # 替换为下划线
return new_title
def download_pdf(self, filter_results):
# 先创建文件夹
date_str = str(datetime.datetime.now())[:13].replace(' ', '-')
key_word = str(self.key_word.replace(':', ' '))
path = self.root_path + 'pdf_files/' + self.query.replace('au: ', '').replace('title: ', '').replace('ti: ', '').replace(':', ' ')[:25] + '-' + date_str
try:
os.makedirs(path)
except:
pass
print("All_paper:", len(filter_results))
# 开始下载:
paper_list = []
for r_index, result in enumerate(filter_results):
try:
title_str = self.validateTitle(result.title)
pdf_name = title_str+'.pdf'
# result.download_pdf(path, filename=pdf_name)
self.try_download_pdf(result, path, pdf_name)
paper_path = os.path.join(path, pdf_name)
print("paper_path:", paper_path)
paper = Paper(path=paper_path,
url=result.entry_id,
title=result.title,
abs=result.summary.replace('-\n', '-').replace('\n', ' '),
authers=[str(aut) for aut in result.authors],
)
# 下载完毕,开始解析:
paper.parse_pdf()
paper_list.append(paper)
except Exception as e:
print("download_error:", e)
pass
return paper_list
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def try_download_pdf(self, result, path, pdf_name):
result.download_pdf(path, filename=pdf_name)
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def upload_gitee(self, image_path, image_name='', ext='png'):
"""
上传到码云
:return:
"""
with open(image_path, 'rb') as f:
base64_data = base64.b64encode(f.read())
base64_content = base64_data.decode()
date_str = str(datetime.datetime.now())[:19].replace(':', '-').replace(' ', '-') + '.' + ext
path = image_name+ '-' +date_str
payload = {
"access_token": self.gitee_key,
"owner": self.config.get('Gitee', 'owner'),
"repo": self.config.get('Gitee', 'repo'),
"path": self.config.get('Gitee', 'path'),
"content": base64_content,
"message": "upload image"
}
# 这里需要修改成你的gitee的账户和仓库名,以及文件夹的名字:
url = f'https://gitee.com/api/v5/repos/'+self.config.get('Gitee', 'owner')+'/'+self.config.get('Gitee', 'repo')+'/contents/'+self.config.get('Gitee', 'path')+'/'+path
rep = requests.post(url, json=payload).json()
print("rep:", rep)
if 'content' in rep.keys():
image_url = rep['content']['download_url']
else:
image_url = r"https://gitee.com/api/v5/repos/"+self.config.get('Gitee', 'owner')+'/'+self.config.get('Gitee', 'repo')+'/contents/'+self.config.get('Gitee', 'path')+'/' + path
return image_url
def summary_with_chat(self, paper_list):
htmls = []
for paper_index, paper in enumerate(paper_list):
# 第一步先用title,abs,和introduction进行总结。
text = ''
text += 'Title:' + paper.title
text += 'Url:' + paper.url
text += 'Abstrat:' + paper.abs
text += 'Paper_info:' + paper.section_text_dict['paper_info']
# intro
text += list(paper.section_text_dict.values())[0]
chat_summary_text = ""
try:
chat_summary_text = self.chat_summary(text=text)
except Exception as e:
if "maximum context" in str(e):
current_tokens_index = str(e).find("your messages resulted in") + len("your messages resulted in")+1
offset = int(str(e)[current_tokens_index:current_tokens_index+4])
summary_prompt_token = offset+1000+150
chat_summary_text = self.chat_summary(text=text, summary_prompt_token=summary_prompt_token)
htmls.append('## Paper:' + str(paper_index+1))
htmls.append('\n\n\n')
htmls.append(chat_summary_text)
# 第二步总结方法:
# TODO,由于有些文章的方法章节名是算法名,所以简单的通过关键词来筛选,很难获取,后面需要用其他的方案去优化。
method_key = ''
for parse_key in paper.section_text_dict.keys():
if 'method' in parse_key.lower() or 'approach' in parse_key.lower():
method_key = parse_key
break
if method_key != '':
text = ''
method_text = ''
summary_text = ''
summary_text += "<summary>" + chat_summary_text
# methods
method_text += paper.section_text_dict[method_key]
text = summary_text + "\n\n<Methods>:\n\n" + method_text
chat_method_text = ""
try:
chat_method_text = self.chat_method(text=text)
except Exception as e:
if "maximum context" in str(e):
current_tokens_index = str(e).find("your messages resulted in") + len("your messages resulted in")+1
offset = int(str(e)[current_tokens_index:current_tokens_index+4])
method_prompt_token = offset+800+150
chat_method_text = self.chat_method(text=text, method_prompt_token=method_prompt_token)
htmls.append(chat_method_text)
else:
chat_method_text = ''
htmls.append("\n"*4)
# 第三步总结全文,并打分:
conclusion_key = ''
for parse_key in paper.section_text_dict.keys():
if 'conclu' in parse_key.lower():
conclusion_key = parse_key
break
text = ''
conclusion_text = ''
summary_text = ''
summary_text += "<summary>" + chat_summary_text + "\n <Method summary>:\n" + chat_method_text
if conclusion_key != '':
# conclusion
conclusion_text += paper.section_text_dict[conclusion_key]
text = summary_text + "\n\n<Conclusion>:\n\n" + conclusion_text
else:
text = summary_text
chat_conclusion_text = ""
try:
chat_conclusion_text = self.chat_conclusion(text=text)
except Exception as e:
if "maximum context" in str(e):
current_tokens_index = str(e).find("your messages resulted in") + len("your messages resulted in")+1
offset = int(str(e)[current_tokens_index:current_tokens_index+4])
conclusion_prompt_token = offset+800+150
chat_conclusion_text = self.chat_conclusion(text=text, conclusion_prompt_token=conclusion_prompt_token)
htmls.append(chat_conclusion_text)
htmls.append("\n"*4)
# # 整合成一个文件,打包保存下来。
date_str = str(datetime.datetime.now())[:13].replace(' ', '-')
try:
export_path = os.path.join(self.root_path, 'export')
os.makedirs(export_path)
except:
pass
mode = 'w' if paper_index == 0 else 'a'
file_name = os.path.join(export_path, date_str+'-'+self.validateTitle(paper.title[:80])+"."+self.file_format)
self.export_to_markdown("\n".join(htmls), file_name=file_name, mode=mode)
# file_name = os.path.join(export_path, date_str+'-'+self.validateTitle(paper.title)+".md")
# self.export_to_markdown("\n".join(htmls), file_name=file_name, mode=mode)
htmls = []
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_conclusion(self, text, conclusion_prompt_token = 800):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list)-1 else self.cur_api
text_token = len(self.encoding.encode(text))
clip_text_index = int(len(text)*(self.max_token_num-conclusion_prompt_token)/text_token)
clip_text = text[:clip_text_index]
messages=[
{"role": "system", "content": "You are a reviewer in the field of ["+self.key_word+"] and you need to critically review this article"}, # chatgpt 角色
{"role": "assistant", "content": "This is the <summary> and <conclusion> part of an English literature, where <summary> you have already summarized, but <conclusion> part, I need your help to summarize the following questions:"+clip_text}, # 背景知识,可以参考OpenReview的审稿流程
{"role": "user", "content": """
8. Make the following summary.Be sure to use {} answers (proper nouns need to be marked in English).
- (1):What is the significance of this piece of work?
- (2):Summarize the strengths and weaknesses of this article in three dimensions: innovation point, performance, and workload.
.......
Follow the format of the output later:
8. Conclusion: \n\n
- (1):xxx;\n
- (2):Innovation point: xxx; Performance: xxx; Workload: xxx;\n
Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write.
""".format(self.language, self.language)},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
# prompt需要用英语替换,少占用token。
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
print("conclusion_result:\n", result)
print("prompt_token_used:", response.usage.prompt_tokens,
"completion_token_used:", response.usage.completion_tokens,
"total_token_used:", response.usage.total_tokens)
print("response_time:", response.response_ms/1000.0, 's')
return result
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_method(self, text, method_prompt_token = 800):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list)-1 else self.cur_api
text_token = len(self.encoding.encode(text))
clip_text_index = int(len(text)*(self.max_token_num-method_prompt_token)/text_token)
clip_text = text[:clip_text_index]
messages=[
{"role": "system", "content": "You are a researcher in the field of ["+self.key_word+"] who is good at summarizing papers using concise statements"}, # chatgpt 角色
{"role": "assistant", "content": "This is the <summary> and <Method> part of an English document, where <summary> you have summarized, but the <Methods> part, I need your help to read and summarize the following questions."+clip_text}, # 背景知识
{"role": "user", "content": """
7. Describe in detail the methodological idea of this article. Be sure to use {} answers (proper nouns need to be marked in English). For example, its steps are.
- (1):...
- (2):...
- (3):...
- .......
Follow the format of the output that follows:
7. Methods: \n\n
- (1):xxx;\n
- (2):xxx;\n
- (3):xxx;\n
....... \n\n
Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write.
""".format(self.language, self.language)},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
print("method_result:\n", result)
print("prompt_token_used:", response.usage.prompt_tokens,
"completion_token_used:", response.usage.completion_tokens,
"total_token_used:", response.usage.total_tokens)
print("response_time:", response.response_ms/1000.0, 's')
return result
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=4, max=10),
stop=tenacity.stop_after_attempt(5),
reraise=True)
def chat_summary(self, text, summary_prompt_token = 1100):
openai.api_key = self.chat_api_list[self.cur_api]
self.cur_api += 1
self.cur_api = 0 if self.cur_api >= len(self.chat_api_list)-1 else self.cur_api
text_token = len(self.encoding.encode(text))
clip_text_index = int(len(text)*(self.max_token_num-summary_prompt_token)/text_token)
clip_text = text[:clip_text_index]
messages=[
{"role": "system", "content": "You are a researcher in the field of ["+self.key_word+"] who is good at summarizing papers using concise statements"},
{"role": "assistant", "content": "This is the title, author, link, abstract and introduction of an English document. I need your help to read and summarize the following questions: "+clip_text},
{"role": "user", "content": """
1. Mark the title of the paper (with Chinese translation)
2. list all the authors' names (use English)
3. mark the first author's affiliation (output {} translation only)
4. mark the keywords of this article (use English)
5. link to the paper, Github code link (if available, fill in Github:None if not)
6. summarize according to the following four points.Be sure to use {} answers (proper nouns need to be marked in English)
- (1):What is the research background of this article?
- (2):What are the past methods? What are the problems with them? Is the approach well motivated?
- (3):What is the research methodology proposed in this paper?
- (4):On what task and what performance is achieved by the methods in this paper? Can the performance support their goals?
Follow the format of the output that follows:
1. Title: xxx\n\n
2. Authors: xxx\n\n
3. Affiliation: xxx\n\n
4. Keywords: xxx\n\n
5. Urls: xxx or xxx , xxx \n\n
6. Summary: \n\n
- (1):xxx;\n
- (2):xxx;\n
- (3):xxx;\n
- (4):xxx.\n\n
Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not have too much repetitive information, numerical values using the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed.
""".format(self.language, self.language, self.language)},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
)
result = ''
for choice in response.choices:
result += choice.message.content
print("summary_result:\n", result)
print("prompt_token_used:", response.usage.prompt_tokens,
"completion_token_used:", response.usage.completion_tokens,
"total_token_used:", response.usage.total_tokens)
print("response_time:", response.response_ms/1000.0, 's')
return result
def export_to_markdown(self, text, file_name, mode='w'):
# 使用markdown模块的convert方法,将文本转换为html格式
# html = markdown.markdown(text)
# 打开一个文件,以写入模式
with open(file_name, mode, encoding="utf-8") as f:
# 将html格式的内容写入文件
f.write(text)
# 定义一个方法,打印出读者信息
def show_info(self):
print(f"Key word: {self.key_word}")
print(f"Query: {self.query}")
print(f"Sort: {self.sort}")
def main(args):
# 创建一个Reader对象,并调用show_info方法
if args.sort == 'Relevance':
sort = arxiv.SortCriterion.Relevance
elif args.sort == 'LastUpdatedDate':
sort = arxiv.SortCriterion.LastUpdatedDate
else:
sort = arxiv.SortCriterion.Relevance
if args.pdf_path:
reader1 = Reader(key_word=args.key_word,
query=args.query,
filter_keys=args.filter_keys,
sort=sort,
args=args
)
reader1.show_info()
# 开始判断是路径还是文件:
paper_list = []
if args.pdf_path.endswith(".pdf"):
paper_list.append(Paper(path=args.pdf_path))
else:
for root, dirs, files in os.walk(args.pdf_path):
print("root:", root, "dirs:", dirs, 'files:', files) #当前目录路径
for filename in files:
# 如果找到PDF文件,则将其复制到目标文件夹中
if filename.endswith(".pdf"):
paper_list.append(Paper(path=os.path.join(root, filename)))
print("------------------paper_num: {}------------------".format(len(paper_list)))
[print(paper_index, paper_name.path.split('\\')[-1]) for paper_index, paper_name in enumerate(paper_list)]
reader1.summary_with_chat(paper_list=paper_list)
else:
reader1 = Reader(key_word=args.key_word,
query=args.query,
filter_keys=args.filter_keys,
sort=sort,
args=args
)
reader1.show_info()
filter_results = reader1.filter_arxiv(max_results=args.max_results)
paper_list = reader1.download_pdf(filter_results)
reader1.summary_with_chat(paper_list=paper_list)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# parser.add_argument("--pdf_path", type=str, default=r'demo.pdf', help="if none, the bot will download from arxiv with query")
# parser.add_argument("--pdf_path", type=str, default=r'C:\Users\Administrator\Desktop\DHER\RHER_Reset\ChatPaper', help="if none, the bot will download from arxiv with query")
parser.add_argument("--pdf_path", type=str, default='', help="if none, the bot will download from arxiv with query")
parser.add_argument("--query", type=str, default='all: ChatGPT robot', help="the query string, ti: xx, au: xx, all: xx,")
parser.add_argument("--key_word", type=str, default='reinforcement learning', help="the key word of user research fields")
parser.add_argument("--filter_keys", type=str, default='ChatGPT robot', help="the filter key words, 摘要中每个单词都得有,才会被筛选为目标论文")
parser.add_argument("--max_results", type=int, default=1, help="the maximum number of results")
# arxiv.SortCriterion.Relevance
parser.add_argument("--sort", type=str, default="Relevance", help="another is LastUpdatedDate")
parser.add_argument("--save_image", default=False, help="save image? It takes a minute or two to save a picture! But pretty")
parser.add_argument("--file_format", type=str, default='md', help="导出的文件格式,如果存图片的话,最好是md,如果不是的话,txt的不会乱")
parser.add_argument("--language", type=str, default='zh', help="The other output lauguage is English, is en")
args = parser.parse_args()
import time
start_time = time.time()
main(args=args)
print("summary time:", time.time() - start_time)
| [
" \n 7. Describe in detail the methodological idea of this article. Be sure to use {} answers (proper nouns need to be marked in English). For example, its steps are.\n - (1):...\n - (2):...\n - (3):...\n - .......\n Follow the format of the output that follows: \n 7. Methods: \n\n\n - (1):xxx;\n \n - (2):xxx;\n \n - (3):xxx;\n \n ....... \n\n \n \n Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write. \n ",
"] and you need to critically review this article",
"This is the <summary> and <Method> part of an English document, where <summary> you have summarized, but the <Methods> part, I need your help to read and summarize the following questions.PLACEHOLDER",
" \n 1. Mark the title of the paper (with Chinese translation)\n 2. list all the authors' names (use English)\n 3. mark the first author's affiliation (output {} translation only) \n 4. mark the keywords of this article (use English)\n 5. link to the paper, Github code link (if available, fill in Github:None if not)\n 6. summarize according to the following four points.Be sure to use {} answers (proper nouns need to be marked in English)\n - (1):What is the research background of this article?\n - (2):What are the past methods? What are the problems with them? Is the approach well motivated?\n - (3):What is the research methodology proposed in this paper?\n - (4):On what task and what performance is achieved by the methods in this paper? Can the performance support their goals?\n Follow the format of the output that follows: \n 1. Title: xxx\n\n\n 2. Authors: xxx\n\n\n 3. Affiliation: xxx\n\n \n 4. Keywords: xxx\n\n \n 5. Urls: xxx or xxx , xxx \n\n \n 6. Summary: \n\n\n - (1):xxx;\n \n - (2):xxx;\n \n - (3):xxx;\n \n - (4):xxx.\n\n \n \n Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not have too much repetitive information, numerical values using the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed. \n ",
" \n 8. Make the following summary.Be sure to use {} answers (proper nouns need to be marked in English).\n - (1):What is the significance of this piece of work?\n - (2):Summarize the strengths and weaknesses of this article in three dimensions: innovation point, performance, and workload. \n .......\n Follow the format of the output later: \n 8. Conclusion: \n\n\n - (1):xxx;\n \n - (2):Innovation point: xxx; Performance: xxx; Workload: xxx;\n \n \n Be sure to use {} answers (proper nouns need to be marked in English), statements as concise and academic as possible, do not repeat the content of the previous <summary>, the value of the use of the original numbers, be sure to strictly follow the format, the corresponding content output to xxx, in accordance with \n line feed, ....... means fill in according to the actual requirements, if not, you can not write. \n ",
"] who is good at summarizing papers using concise statements",
"This is the <summary> and <conclusion> part of an English literature, where <summary> you have already summarized, but <conclusion> part, I need your help to summarize the following questions:PLACEHOLDER",
"This is the title, author, link, abstract and introduction of an English document. I need your help to read and summarize the following questions: PLACEHOLDER",
"You are a researcher in the field of [",
"You are a reviewer in the field of ["
] |
2024-01-10 | jeremysilva1098/FP-RAG-Chat | vector_db~buildVectorDBSnapshot.py | from langchain.document_loaders import RecursiveUrlLoader
from qdrant_client import QdrantClient, models
import requests
import openai
from langchain.text_splitter import RecursiveCharacterTextSplitter
from bs4 import BeautifulSoup
import os
import dotenv
import random
# load dot env from parent directory
dotenv_path = os.path.join(os.path.dirname(__file__), '..', '.env')
dotenv.load_dotenv(dotenv_path)
### load url content and chunk it ###
urls = [
"https://docs.freeplay.ai/docs", # all the docs content
"https://freeplay.ai/blog" # all the blog content
]
all_docs = []
for url in urls:
loader = RecursiveUrlLoader(url=url, max_depth=10,
extractor=lambda x: BeautifulSoup(x, "html.parser").text)
docs = loader.load()
all_docs.extend(docs)
# view 1 random doc
print(random.choice(docs))
print("\n\n")
# split all docs into chunks
splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
chunks = splitter.split_documents(all_docs)
# view 1 random chunk
print(random.choice(chunks))
print("\n\n")
print("Number of chunks: ", len(chunks))
### add the chunks to the vector db ###
# create the vector db client
dbClient = QdrantClient(host='localhost', port=6333)
collection = 'freeplay_content'
localUrl = "http://localhost:6333"
# create the collection
dbClient.create_collection(collection_name=collection,
vectors_config=models.VectorParams(
size=1536, distance=models.Distance.COSINE)
)
# configure openai
openai.api_key = os.getenv("OPENAI_API_KEY")
def get_embedding(text):
try:
response = openai.Embedding.create(input=text,
model="text-embedding-ada-002")
return response["data"][0]["embedding"]
except:
try:
response = openai.Embedding.create(input=text,
model="text-embedding-ada-002")
return response["data"][0]["embedding"]
except:
print("Back to back Error with text: ", text)
return None
dbClient.upload_records(
collection_name=collection,
records=[
models.Record(
id=idx,
vector=get_embedding(doc.page_content),
payload={
"source": doc.metadata['source'],
"title": doc.metadata['title'],
"description": doc.metadata['description'],
"text": doc.page_content}
) for idx, doc in enumerate(chunks)
]
)
### snapshot the db and write to disk ###
res = requests.post(localUrl + "/snapshots")
print(res.json())
print(res.text)
snapshots = dbClient.list_full_snapshots()
print(snapshots)
# download the latest snapshot
res = requests.get(localUrl + "/snapshots/" + snapshots[0].name)
# write snapshot to disk
with open("latest.snapshot", "wb") as f:
f.write(res.content) | [] |
2024-01-10 | AmiFromIn/OnlineSafetyChatBot | botcode.py | import openai
# Replace 'YOUR_API_KEY' with your actual OpenAI API key
api_key = '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'
# Initialize the OpenAI API client
openai.api_key = api_key
def chat_with_gpt3(prompt):
try:
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", # Specify the GPT-3.5 Turbo model
messages=[
{"role": "system", "content": "You are a online safety expert, helping users stay safe online."},
{"role": "user", "content": prompt}
]
)
return response.choices[0].message["content"].strip()
except Exception as e:
return str(e)
print("Online Safety Chatbot: Hello! How can I assist you today?")
while True:
user_input = input("You: ")
if user_input.lower() == 'bye':
print("Online Safety Chatbot: Goodbye! Stay safe online!")
break
# Get the AI's response
ai_response = chat_with_gpt3(user_input)
print("Online Safety Chatbot:", ai_response)
| [
"You are a online safety expert, helping users stay safe online."
] |
2024-01-10 | arsh2037/CustomAI | CustomAI.py | from flask import Flask, request, jsonify
import os
import sys
from flask_cors import CORS
import openai
from langchain.chains import ConversationalRetrievalChain, RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import DirectoryLoader, TextLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.indexes import VectorstoreIndexCreator
from langchain.indexes.vectorstore import VectorStoreIndexWrapper
from langchain.llms import OpenAI
from langchain.vectorstores import Chroma
import APIKey as constants
app = Flask(__name__)
CORS(app)
os.environ["OPENAI_API_KEY"] = constants.API_Key
PERSIST = False
query = None
if len(sys.argv) > 1:
query = sys.argv[1]
if PERSIST and os.path.exists("persist"):
print("Reusing index...\n")
vectorstore = Chroma(persist_directory="persist", embedding_function=OpenAIEmbeddings())
index = VectorStoreIndexWrapper(vectorstore=vectorstore)
else:
# loader = TextLoader("data/data.txt") # Use this line if you only need data.txt
loader = DirectoryLoader("data/")
if PERSIST:
index = VectorstoreIndexCreator(vectorstore_kwargs={"persist_directory":"persist"}).from_loaders([loader])
else:
index = VectorstoreIndexCreator().from_loaders([loader])
chain = ConversationalRetrievalChain.from_llm(
llm=ChatOpenAI(model="gpt-3.5-turbo"),
retriever=index.vectorstore.as_retriever(search_kwargs={"k": 1}),
)
chat_history = []
while True:
if not query:
query = input("Prompt: ")
if query in ['quit', 'q', 'exit']:
sys.exit()
result = chain({"question": query, "chat_history": chat_history})
print(result['answer'])
chat_history.append((query, result['answer']))
query = None
@app.route('/ask', methods=['POST'])
def ask():
query = request.json.get('query')
if not query:
return jsonify({"error": "No query provided"}), 400
result = chain({"question": query, "chat_history": chat_history})
chat_history.append((query, result['answer']))
return jsonify({"answer": result['answer']})
if __name__ == '__main__':
app.run(debug=True, port=5000)
| [] |
2024-01-10 | btrcm00/question_generation | pipeline~dataset_constructor~dataset_crawler.py | import os
import openai
from tqdm import tqdm
from threading import Thread, Event
from queue import Queue
from common.config import PipelineConfig
from common.utils import *
class QGCrawler:
def __init__(self, config: PipelineConfig = None) -> None:
self.config = config if config is not None else PipelineConfig()
openai.api_key = self.config.constructor_crawler_key
self.qg = ModelUtils(input_max_length=self.config.pipeline_input_max_length)
input_data_folder = f"{self.config.pipeline_dataset_folder}/source"
assert os.path.isdir(input_data_folder)
self.source_dataset = self.load_source_dataset(data_path=input_data_folder)
output_folder = f"{self.config.pipeline_dataset_folder}/raw"
check_exist_folder(output_folder)
self.output_folder = output_folder
self.output_queue = Queue(maxsize=10000)
def prompt_sentence(self, pasage: str):
return f"Generate 10 extractive questions in Vietnamese from the following passage (questions must be about information included in the passage, ask questions as specific as possible, and question is Wh-question). Passage: {pasage}"
@property
def prefix_data_id(self):
return "chatgpt_data_"
def make_request(self, content: str):
completion = openai.ChatCompletion.create(
model=self.config.constructor_openai_model,
messages=[
{
"role": "user",
"content": content
}
]
)
question_text = completion.choices[0].message
question_text = re.sub(r"\n+", "\n", question_text)
question_text = re.sub(r"\d\.\s*", "", question_text)
question_lst = question_text.split("\n")
return question_lst
def load_source_dataset(self, data_path: str = None):
return json.load(open(data_path, "r", encoding="utf8"))
def generate_qa_example(self, bar, q: Queue, e: Event):
while not e.is_set() or not q.empty():
data = q.get()
base_id = self.prefix_data_id + data[0]
question_lst = self.make_request(content=self.prompt_sentence(passage=data[1]))
for q in question_lst:
answer = self.qa_api(context=data[1], question=q)["data"][ANSWER].replace("_", " ")
self.output_queue.put()
bar.update(1)
def write_output(self):
pass
def run(self):
input_queue = Queue(maxsize=10000)
bar = tqdm(total=len(self.qa_dataset), initial=0, leave=True)
event = Event()
event.clear()
threads = [Thread(target=self.generate_qa_example, args=(bar, input_queue, event), daemon=True) for _ in
range(self.num_of_threads)]
[thread.start() for thread in threads]
for idx, ele in enumerate(self.source_dataset):
input_queue.put((idx, ele))
event.set()
[thread.join() for thread in threads]
for f in os.listdir(self.output_folder):
if "all" in f:
output += load_file(f"{self.output_folder}/{f}")
self.save_dataset(output)
if __name__ == "__main__":
pass
| [] |
2024-01-10 | vijaydharmaji29/piggy-piglets-copilot | real_time_ret.py | #retrieve links and add to db
import requests
from bs4 import BeautifulSoup
import markdownify
import os
import sys
import time
# Set the OpenAI API key
os.environ["OPENAI_API_KEY"] = "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"
db_name_global = ""
# Import necessary modules
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import CharacterTextSplitter, MarkdownHeaderTextSplitter
from langchain.llms import OpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.document_loaders import TextLoader, UnstructuredMarkdownLoader
from langchain.memory import ConversationBufferMemory
flag = 0
links_visited = set()
def add_to_vdb(db_name, content_md):
# Open and read the Markdown file
# with open("./docs/" + file_path, "r", encoding="utf-8") as md_file:
# markdown_content = md_file.read()
markdown_content = content_md
markdown_document = markdown_content
#documents needs to be text spliter
headers_to_split_on = [
("#", "Header 1"),
("##", "Header 2"),
("###", "Header 3")
]
# MD splits
markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=headers_to_split_on)
md_header_splits = markdown_splitter.split_text(markdown_document)
# Char-level splits
from langchain.text_splitter import RecursiveCharacterTextSplitter
chunk_size = 1000
chunk_overlap = 200
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
# Split
splits = text_splitter.split_documents(md_header_splits)
documents = splits
if len(documents) != 0:
# Initialize embeddings and vector store
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_documents(documents, embeddings)
# Define the directory where you want to save the persisted database
persist_directory = db_name
# Initialize OpenAIEmbeddings for embedding
embedding = OpenAIEmbeddings()
# Create and persist the Chroma vector database
vectordb = Chroma.from_documents(documents=documents, embedding=embedding, persist_directory=persist_directory)
# Persist the database to disk
vectordb.persist()
else:
print("UNECESSARY PAGE")
def remove_css_from_html(input_html):
# Parse the HTML using BeautifulSoup
soup = BeautifulSoup(input_html, 'html.parser')
# Remove all <style> tags (CSS)
for style_tag in soup.find_all('style'):
style_tag.extract()
# Return the modified HTML
return str(soup)
def remove_javascript_from_html(input_html):
# Parse the HTML using BeautifulSoup
soup = BeautifulSoup(input_html, 'html.parser')
# Remove all <script> tags
for script_tag in soup.find_all('script'):
script_tag.extract()
# Remove inline event handlers (e.g., onclick="...")
for tag in soup.find_all():
for attr in list(tag.attrs.keys()):
if attr.startswith("on"):
del tag.attrs[attr]
# Return the modified HTML
return str(soup)
def get_inside(website):
global flag
result = requests.get(website)
content = result.text
soup = BeautifulSoup(content, 'lxml')
box = soup.find_all('html')
if len(box) < 1:
return "", []
links = set()
for b in box:
for link in b.find_all('a', href=True):
links.add(link['href'])
best_html_str = remove_javascript_from_html(str(soup.html))
best_html_str = remove_css_from_html(best_html_str)
md = markdownify.markdownify(best_html_str, heading_style="ATX")
flag += 1
return (md, links)
# return (remove_javascript_from_html(soup.html), links)
def get_page_content(website, round, db_name):
global flag
global db_name_global
db_name_global = db_name
content_md, links = get_inside(website)
folder = "moveworks/"
file_name = website
file_name = file_name.replace("https://www.", "")
file_name = file_name.replace("https://", "")
file_name = file_name.replace("http://", "")
file_name = file_name.replace(".", "(dot)")
file_name = file_name.replace("/", "_")
file_name = file_name + ".md"
file_name = folder + file_name
f = open(file_name, "w")
f.write(content_md)
add_to_vdb(db_name_global, content_md)
f.close()
if flag < 100:
print(round)
for l in links:
# print(l)
if "www" not in l:
l = "www." + l
try:
get_page_content(l, round + 1)
except:
# get_page_content(str(website) + l, round + 1)
# print("ERROR")
pass
def real_time_additon(website):
website = website
name = website.replace("https", "")
name = name.replace("http", "")
name = name.replace("://", "")
name = name.replace("www.", "")
name = name.split(".")[0]
name = "db_" + name
db_name_global = name
get_page_content(website, 0, db_name_global)
if __name__ == "__main__":
# website = "https://www.moveworks.com/"
# website = "https://subslikescript.com/movies"
website = "https://www.moveworks.com/"
if len(sys.argv) > 0:
website = sys.argv[1]
print("running for website")
name = website.replace("https", "")
name = name.replace("http", "")
name = name.replace("://", "")
name = name.replace("www.", "")
name = name.split(".")[0]
name = "db_" + name
print(name)
db_name_global = name
get_page_content(website, 0, db_name_global) | [] |
2024-01-10 | vijaydharmaji29/piggy-piglets-copilot | gen_question.py | from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import AIMessage, HumanMessage, SystemMessage
open_ai_key = "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"
def ask_gen_question(question):
chat = ChatOpenAI(temperature=0, openai_api_key=open_ai_key)
question = question
messages = [
SystemMessage(
content="You are a helpful assistant"
),
HumanMessage(
content=question
)
]
return chat(messages).content
| [
"You are a helpful assistant"
] |
2024-01-10 | vijaydharmaji29/piggy-piglets-copilot | main_3.py |
import os
import sys
import time
import gen_question
# Set the OpenAI API key
os.environ["OPENAI_API_KEY"] = "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"
# Import necessary modules
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import CharacterTextSplitter
from langchain.llms import OpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.document_loaders import TextLoader
from langchain.memory import ConversationBufferMemory
import subprocess
db_name_to_search = "db3"
def start_ret_script(new_web):
# Specify the Python script you want to run
python_script = "real_time_ret.py"
# Launch a new terminal and execute the script
terminal_command = f"python {python_script} {new_web};"
subprocess.Popen(terminal_command, shell=True)
def change_web(new_webiste):
global db_name_to_search
print("NEW", new_webiste)
name = new_webiste.replace("https", "")
name = name.replace("http", "")
name = name.replace("://", "")
name = name.replace("www.", "")
name = name.split(".")[0]
name = "db_" + name
print(name)
if "moveworks" in name:
name = "db3"
print("changed")
with open("db_to_file.txt", "w") as file:
# Write data to the file
file.write(name)
return name
def initialise_qa(db_to_use):
# Define the directory where you want to save the persisted database
global db_name_to_search
print("Use", db_to_use)
db_name_to_search = db_to_use
persist_directory = db_name_to_search
print(persist_directory)
# Initialize OpenAIEmbeddings for embedding
embedding = OpenAIEmbeddings()
# Load the persisted database from disk
vectordb = Chroma(persist_directory=persist_directory, embedding_function=embedding)
# Create a conversation buffer memory
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
# Create a conversational retrieval chain
qa = ConversationalRetrievalChain.from_llm(llm=OpenAI(temperature=0), chain_type="stuff", retriever=vectordb.as_retriever(), memory=memory)
return qa
input_history = []
chat_history = []
def main_output(user_input, qa):
global chat_history
global input_history
global chat_history # Initialize chat_history as a list
chat_history.append({"role": "user", "content": user_input})
# Start a new thread by default
thread_status = '1'
input_history.append({"question": user_input})
# Generate questions and answers
question = user_input
result = qa({"question": question, "chat_history": chat_history})
if "I don't know" in result["answer"] or "not provided in the context" in result["answer"]:
ans_open = gen_question.ask_gen_question(question)
chat_history.append({"role": "assistant", "content": result["answer"]})
ans_open += "\nGenerated from the internet!"
return ans_open
else:
# Add AI's response to chat history
chat_history.append({"role": "assistant", "content": result["answer"]})
return result["answer"]
def clear_history():
global chat_history
chat_history = []
# if __name__ == "__main__":
# print(init("Summarise the text"))
| [
"answer"
] |
2024-01-10 | ok-7/LogNow-API | konzept~Diktierfunktion~bhp~main2.py | import sounddevice as sd
import soundfile as sf
import threading
import queue
import PySimpleGUI as sg
import os
import openai
import sys
openai.api_key = ""
site = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Document</title>
</head>
<body>
<div class="cont">
<img src="img.png" style="transform: translate(25%, 0%);width:60%; height:60%;">
<div class="center">
STUFF_HERE
</div>
</div>
</body>
<style>
body {
font-family: Verdana, sans-serif;
background-color: black;
}
.cont {
margin: auto;
width: 50%;
padding: 10px;
position: relative;
}
.center {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-65%, -10%);
font-size: 18px;
width: 335px;
height: 250px;
background-color: white;
line-height: 0.5cm;
}
</style>
</html>
"""
# Set the audio settings
sample_rate = 44100 # Sample rate in Hz
output_file = "output.mp3" # Output file name
# Create a thread-safe queue to store the audio data
audio_queue = queue.Queue()
# Define a flag to indicate if recording is active
recording_active = threading.Event()
def audio_callback(indata, frames, time, status):
"""Audio callback function that is called for each audio block."""
if status:
print(f"Recording error: {status}")
audio_queue.put(indata.copy())
def record_audio():
"""Start recording audio from the microphone."""
try:
os.remove("./"+output_file)
except Exception as e:
print(e)
pass
file1 = sf.SoundFile(output_file, mode='w', samplerate=sample_rate, channels=1)
with sd.InputStream(callback=audio_callback, channels=1, samplerate=sample_rate):
print("Recording started. Press Stop button to stop recording.")
recording_active.wait() # Wait for recording to be active
while recording_active.is_set():
file1.write(audio_queue.get())
print("input ended")
print("rec ended")
file1.close()
print("thread ended")
sys.exit()
# Create the GUI layout
layout = [
[sg.Button(key="Start", button_color="white", image_filename="./microphone.png")]
]
# Create the window
window = sg.Window("Audio Recorder", layout)
window.BackgroundColor = "white"
# Start the recording thread
recording_thread = threading.Thread(target=record_audio)
# Event loop to process events
recording = False
while True:
event, values = window.read()
if event == sg.WINDOW_CLOSED:
break
if event == "Buchhaltung":
window["txt"].update("Buchhaltungsshit")
if event == "Verwaltung":
window["txt"].update("Verwaltungsshit")
if event == "Überwachung":
window["txt"].update("Überwachungsshit")
elif event == "Start":
if not recording:
recording = True
recording_active.set()
recording_thread.start()
else:
recording = False
recording_active.clear()
recording_thread.join()
recording_thread = threading.Thread(target=record_audio)
audio_file= open(output_file, "rb")
transcript = openai.Audio.transcribe("whisper-1", audio_file)
chat = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "assistant", "content": "Das Folgende ist ein Text über die Tätigkeiten von einem Azubi eines Tages. Extrahiere daraus die Aufgaben welche der Azubi an diesem Tag hatte und gebe diese als kurze Stichpunkteaus:\n"+str(transcript)}
]
)
reply = chat.choices[0].message.content
#reply = "assadaäöäüüöäöäsdsada\nsadasd\nadadasd\nadsdasd\nasdßasiodjiaosdsds"
reply = reply.replace("\n", "<hr color='#8696bb'>")
reply = reply.replace("- ", "")
s = site.replace("STUFF_HERE", reply)
#print(s)
f = open("../site/demosite.html", "w", encoding="utf8")
f.write(s)
f.close()
# Close the window
window.close()
print(f"Recording saved to {output_file}.") | [
"Das Folgende ist ein Text über die Tätigkeiten von einem Azubi eines Tages. Extrahiere daraus die Aufgaben welche der Azubi an diesem Tag hatte und gebe diese als kurze Stichpunkteaus:\nPLACEHOLDER"
] |
2024-01-10 | TSTB-dev/ChatBot | voice_gpt.py | import openai
import pyttsx3
import speech_recognition as sr
# OpenAIのAPIキーを取得する
openai.api_key = "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"
# 音声合成のエンジンを初期化する
engine = pyttsx3.init()
def transcribe_audio_to_text(filename):
# 音声認識用のモジュールを初期化
recongnizer = sr.Recognizer()
# 引数として受け取った音声ファイルから,音声を読み込み.
with sr.AudioFile(filename) as source:
audio = recongnizer.record(source)
try:
# 音声を認識し,結果を返す.
return recongnizer.recognize_google(audio)
except:
print("Skipping unknown error")
def generate_response(prompt):
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=4000,
n=1,
stop=None,
temperature=0.5
)
return response["choices"][0]["text"]
def speak_text(text):
engine.say(text)
engine.runAndWait()
def main():
while True:
# Userが"genius"と呼ぶまで待つ
print("Say ''Genius to start recording your question...")
with sr.Microphone() as source:
recognizer = sr.Recognizer()
audio = recognizer.listen(source)
try:
transcription = recognizer.recognize_google(audio)
if transcription.lower() == "genius":
# Record audio
filename = "input.wav"
print("Say your question...")
with sr.Microphone() as source:
recognizer = sr.Recognizer()
source.pause_threshold = 1
audio = recognizer.listen(source, phrase_time_limit=None, timeout=None)
with open(filename, "wb") as f:
f.write(audio.get_wav_data())
# transcribe audio to text
text = transcribe_audio_to_text(filename)
if text:
print(f"You Said: {text}")
# generate response
response = generate_response(text)
print(f"GPT-3 says: {response}")
# Read response using text to speech
speak_text(response)
except Exception as e:
print("An error occurred: {}".format(e))
if __name__ == "__main__":
main() | [] |
2024-01-10 | kyojuro6engoku/chatgtp | usage.py | import os
import telegram
import openai
import Updater, CommandHandler, MessageHandler, Filters from telegram.ext
# ... (Other code, including token setup and import of libraries)
# Initialize the Telegram Bot
bot = telegram.Bot(token=TELEGRAM_BOT_TOKEN)
# Initialize the OpenAI API
openai.api_key = OPENAI_API_KEY
# ...
# Define a function to handle the /start command
def start(update, context):
user_id = update.message.chat_id
response = "Hello! I am your AI chatbot. How can I assist you today?"
context.bot.send_message(chat_id=user_id, text=response)
# Define a function to handle the /help command
def help(update, context):
user_id = update.message.chat_id
response = "Here are some commands you can use:\n"
response += "/start - Start a conversation with me\n"
response += "/help - Get help and information\n"
response += "Ask me anything, and I'll do my best to assist you!"
context.bot.send_message(chat_id=user_id, text=response)
# ... (Other code, including message handling and ChatGPT interaction)
updater = Updater(token=TELEGRAM_BOT_TOKEN, use_context=True)
dispatcher = updater.dispatcher
# Add command handlers to the dispatcher
dispatcher.add_handler(CommandHandler("start", start))
dispatcher.add_handler(CommandHandler("help", help))
dispatcher.add_handler(MessageHandler(Filters.text & ~Filters.command, handle_message))
# ...
# Start polling for updates
updater.start_polling()
updater.idle()
| [] |
2024-01-10 | SyedHuzaifa007/RequirementsSage-Chatbot | Extracted%20Folder~RequirementSage-Chatbot~ChatGppt~xhatapp~Config.py | import re
import asyncio
import os
import openai
class BravoSis:
def __init__(self):
self.openai_api_key = "" # set your own api value
self.openai_api_key = "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" #get this value from https://beta.openai.com/.
self.model = "text-davinci-003" # use any of these [text-davinci-002,text-davinci-001]
self.mxtoken = 1080 #can decrese/increse with reaspect to result previlage
def ai(self,query):
openai.api_key = self.openai_api_key
completion = openai.Completion.create(engine=self.model, prompt=query, max_tokens=self.mxtoken, n=1, stop=None,temperature=0.7)
result = completion.choices[0].text
return result
| [] |
2024-01-10 | CakeCrusher/openplugin | pypi-core~openplugincore~openplugin.py | # C:\Projects\OpenPlugin\openplugin\pypi-core\openplugincore\openplugin.py
import json
from urllib.error import HTTPError
import requests
from typing import Any, List, Dict, Union, Tuple, Callable
import os
from .types import ChatgptAssistantMessage, ChatgptFunctionMessage, PluginConfigs
from .utils.constants import openai_models_info
from .utils.prompting import estimate_tokens, tokens_to_chars, truncate_json_root
from oplangchain.chains.openai_functions.openapi import openapi_spec_to_openai_fn
from oplangchain.utilities.openapi import OpenAPISpec
from oplangchain.output_parsers.openai_functions import JsonOutputFunctionsParser, FunctionCallNotExecuted
from oplangchain.prompts import ChatPromptTemplate
from oplangchain.chat_models import ChatOpenAI
from oplangchain.schema import HumanMessage, AIMessage, SystemMessage, FunctionMessage
from oplangchain import LLMChain
import openai
from dotenv import load_dotenv
load_dotenv()
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
plugin_configs: Dict[str, PluginConfigs] = {}
class OpenPlugin:
def __init__(self, plugin_name: str = None, openai_api_key: str = None, root_url: str = None, manifest = None, verbose: bool = False):
self.name: str = plugin_name
self.root_url: str = root_url
self.description: str = None
self.manifest: Any = manifest
self.functions: List[Dict[str, Any]] = None
self.call_api_fn: Callable = None
self.verbose: bool = verbose
if self.name is None and self.root_url is None:
raise ValueError("Either plugin_name or root_url must be passed in as a parameter")
if openai_api_key is None:
openai_api_key = os.getenv('OPENAI_API_KEY')
if openai_api_key is None:
raise ValueError("OPENAI_API_KEY not found. You can pass in the parameter openai_api_key. You can also set the environment variable OPENAI_API_KEY=<API-KEY>.")
os.environ["OPENAI_API_KEY"] = openai_api_key
openai.api_key = openai_api_key
self.init(plugin_name)
self.description: str = self.manifest["description_for_model"]
def init(self, plugin_name: str = None) -> None:
base_dir = os.path.dirname(os.path.realpath(__file__))
plugins_file_path = os.path.join(base_dir, "plugins.json")
# fetch plugins from github
try:
plugins_url = "https://raw.githubusercontent.com/CakeCrusher/openplugin/main/migrations/plugin_store/openplugins.json"
response = requests.get(plugins_url)
response.raise_for_status()
plugins = response.json()
except Exception as e:
raise HTTPError(f"Unable to fetch plugins from github url '{plugins_url}'")
# if self.root_url has a value
if self.root_url is None:
try:
self.root_url = plugins[plugin_name]
except KeyError:
# throw error
raise KeyError("Plugin not found")
if not self.manifest:
self.manifest = self.fetch_manifest(self.root_url)
self.functions, self.call_api_fn = self.openapi_to_functions_and_call_api_fn(self.manifest)
def fetch_manifest(self, root_url: str) -> Any:
if plugin_configs.get(self.name, {}).get("manifest", None) is not None:
return plugin_configs[self.name]["manifest"]
response = requests.get(root_url + "/.well-known/ai-plugin.json")
response.raise_for_status() # Raise exception if the request failed
manifest = response.json()
if not self.name:
self.name: str = manifest["name_for_model"]
if self.verbose:
print(f"\"{self.name}\" manifest: ", json.dumps(manifest, indent=2))
# add manifest to plugin_configs
plugin_configs[self.name] = {
**plugin_configs.get(self.name, {}),
"manifest": manifest
}
return manifest
def openapi_to_functions_and_call_api_fn(self, manifest: Any) -> Tuple[List[Dict[str, Any]], Callable]:
openapi_url = manifest.get("api", {}).get("url")
if self.verbose:
print(f"\"{self.name}\" openapi_url: ", openapi_url)
if openapi_url == None:
raise ValueError("OpenAPI URL not found in manifest")
if isinstance(openapi_url, Union[OpenAPISpec, str]):
for conversion in (
# each of the below specs can get stuck in a while loop
OpenAPISpec.from_url,
OpenAPISpec.from_file,
OpenAPISpec.from_text,
):
try:
openapi_url = conversion(openapi_url) # type: ignore[arg-type]
break
except Exception: # noqa: E722
pass
if isinstance(openapi_url, str):
raise ValueError(f"Unable to parse spec from source {openapi_url}")
openai_fns, call_api_fn = openapi_spec_to_openai_fn(openapi_url)
if self.verbose:
print(f"\"{self.name}\" functions: ", json.dumps(openai_fns, indent=2))
return openai_fns, call_api_fn
def _convert_openai_messages_to_langchain_messages(self, openai_messages: List[Any]) -> List[ChatgptAssistantMessage]:
langchain_messages = []
for openai_message in openai_messages:
if openai_message["role"] == "system":
langchain_messages.append(SystemMessage(content=openai_message["content"]))
elif openai_message["role"] == "user":
langchain_messages.append(HumanMessage(content=openai_message["content"]))
elif openai_message["role"] == "function":
langchain_messages.append(FunctionMessage(name=openai_message["name"], content=openai_message["content"]))
elif openai_message["role"] == "assistant":
# set veriable content to "" if it is None
content = openai_message["content"] if openai_message["content"] is not None else ""
langchain_messages.append(AIMessage(content=content, additional_kwargs={"function_call": openai_message["function_call"]}))
return langchain_messages
def fetch_plugin(self, messages: list[dict], plugin_headers: dict = None, truncate: Union[bool, int] = False, truncate_offset: int = 0, return_assistant_message: bool = False, **chatgpt_args) -> ChatgptFunctionMessage:
model = chatgpt_args.get("model", None)
if model not in ["gpt-3.5-turbo-0613", "gpt-4-0613", "gpt-3.5-turbo-1106", "gpt-4-1106-preview"]:
raise ValueError("Model must be either gpt-3.5-turbo-0613, gpt-4-0613, gpt-3.5-turbo-1106, or gpt-4-1106-preview")
llm = ChatOpenAI(
**chatgpt_args,
)
llm_chain = LLMChain(
llm=llm,
prompt=ChatPromptTemplate.from_template("{query}"),
llm_kwargs={"functions": self.functions},
output_parser=JsonOutputFunctionsParser(args_only=False),
output_key="function",
verbose=self.verbose,
# **(llm_kwargs or {}),
)
# if it is plugin function response
functions_tokens = estimate_tokens(json.dumps(self.functions))
try:
# MESSAGES TO PROMPT
# if there is a message with role system then pop it, iterate through all messages to find it
system_message = ''
for message in messages:
if message["role"] == 'system':
system_message = 'system' + ": " + message['content'] + "\n"
messages.remove(message)
break
# print("system_message: ", system_message)
# Combine messages into one string
messages_aggregate = '\n'.join([f"{message['role']}: {message['content']}" for message in messages])
complete_messages_aggregate_tokens = estimate_tokens(system_message + messages_aggregate)
# print("complete_messages_aggregate_tokens: ", complete_messages_aggregate_tokens)
# print("functions_tokens: ", functions_tokens)
messages_truncation_offset = tokens_to_chars(max(complete_messages_aggregate_tokens + functions_tokens - openai_models_info[model]["max_tokens"], 0))
# print("messages_truncation_offset: ", messages_truncation_offset)
messages_aggregate = messages_aggregate[messages_truncation_offset:]
# TODO: temp fix to prevent collation of messages
if (messages_truncation_offset > 0):
messages_aggregate = "user/assistant: " + messages_aggregate
complete_messages_aggregate = system_message + messages_aggregate
# print("complete_messages_aggregate: ", complete_messages_aggregate)
# print("final length: ", estimate_tokens(complete_messages_aggregate))
# Replace prompt with messageAggregate
llm_chain_out = llm_chain.run(complete_messages_aggregate)
if self.verbose:
print("Using plugin: " + self.name)
except KeyError as e:
# if error includes "function_call" then it is not a plugin function
if "function_call" in str(e):
raise ValueError("Not a plugin function")
else:
raise e
except FunctionCallNotExecuted as e:
raise FunctionCallNotExecuted(e)
if llm_chain_out["name"] not in [function["name"] for function in self.functions]:
raise ValueError("Not a plugin function")
# EDGE CASE
def remove_empty_from_dict(input_dict):
cleaned_dict = {}
for k, v in input_dict.items():
if isinstance(v, dict):
v = remove_empty_from_dict(v)
if v and v != "none": # only add to cleaned_dict if v is not empty
cleaned_dict[k] = v
return cleaned_dict
llm_chain_out["arguments"] = remove_empty_from_dict(llm_chain_out["arguments"])
if self.verbose:
print(f"\"{self.name}\" llm_chain_out: ", json.dumps(llm_chain_out, indent=2))
# make the api call
def request_chain(name,arguments,headers):
res = self.call_api_fn(
name, arguments, headers, params=None
)
return res
# make the api call
function_lookup = {fn["name"]: fn for fn in self.functions}
def request_chain(name, arguments, headers):
print(
"request_chain name: {}, arguments: {}".format(name, json.dumps(arguments))
)
# Get the expected parameters for the function
expected_params = function_lookup[name]["parameters"]["properties"]
# Find the appropriate wrapping key
wrapping_key = next(
(key for key in ["data", "json", "params"] if key in expected_params), None
)
# If a wrapping key is found and it's not already in the arguments, wrap the arguments
if wrapping_key and wrapping_key not in arguments:
arguments = {wrapping_key: arguments}
res = self.call_api_fn(name, arguments, headers, params=None)
return res
request_out = request_chain(**llm_chain_out, headers=plugin_headers)
# if request_out.status_code is not within 200s then raise http error
if request_out.status_code < 200 or request_out.status_code >= 300:
raise HTTPError(
url=self.manifest["api"]["url"],
code=request_out.status_code,
msg=f"Call to \"{self.name}\" API failed",
hdrs={},
fp=None,
)
json_response = request_out.json()
if truncate:
truncate_to = truncate if not isinstance(truncate, bool) else None
if truncate_to is None:
token_slack = 56 + 300
truncate_to = openai_models_info[model]['max_tokens'] - estimate_tokens(json.dumps(messages[-1])) - token_slack - truncate_offset
json_response = truncate_json_root(json_response, truncate_to)
if self.verbose:
print(f"\"{self.name}\" json_response: ", json.dumps(json_response, indent=2))
try:
# arguments should be stringified json
assistant_message = ChatgptAssistantMessage(
role="assistant",
content=None,
function_call= {
"name": llm_chain_out["name"],
"arguments": json.dumps(llm_chain_out["arguments"])
}
)
function_message = ChatgptFunctionMessage(
role="function",
name=llm_chain_out["name"],
content=json.dumps(json_response)
)
if return_assistant_message:
return {
"assistant_message": assistant_message,
"function_message": function_message
}
else:
return function_message
except json.decoder.JSONDecodeError:
raise json.decoder.JSONDecodeError(f"API call failed, API returned the following non-JSON response:\n{response.content}") | [
"{query}",
"content",
"None"
] |
2024-01-10 | CakeCrusher/openplugin | pypi-core~openplugincore~openplugin_completion.py | from ast import List
import os
import openai
from dotenv import load_dotenv
from .openplugin import OpenPlugin
load_dotenv()
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
def openplugin_completion(openai_api_key: str, messages: list[dict], truncate: bool = True, plugin_name: str = None, root_url: str = None, **chatgpt_args):
# set environment variable to
os.environ["OPENAI_API_KEY"] = openai_api_key
openai.api_key = openai_api_key
if not plugin_name and not root_url:
return openai.ChatCompletion.create(
**chatgpt_args,
messages=messages
)
plugin = OpenPlugin(plugin_name=plugin_name, root_url=root_url, openai_api_key=openai_api_key)
try:
function_response = plugin.fetch_plugin(
messages=messages,
truncate=truncate,
**chatgpt_args
)
except ValueError as e:
if "Not a plugin function" in str(e):
return openai.ChatCompletion.create(
**chatgpt_args,
messages=messages
)
else:
raise e
all_chatgpt_args = {
**chatgpt_args,
"messages": messages + [function_response]
}
summarize = openai.ChatCompletion.create(**all_chatgpt_args)
return summarize | [] |
2024-01-10 | CakeCrusher/openplugin | pypi-core~tests~test_e2e.py | import json
import pytest
import os
import openai
from .mock_data import todo_plugin
from openplugincore import OpenPlugin
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
def test_initiate_and_fetch_todo():
plugin = OpenPlugin("__testing__")
assert plugin.manifest is not None
assert plugin.manifest.get("name_for_model") == "todo"
for function in plugin.functions:
if function["name"] == "addTodo":
addTodo = function
if function["name"] == "getTodos":
getTodos = function
assert addTodo is not None
assert addTodo == todo_plugin["functions"][0]
assert getTodos is not None
assert getTodos == todo_plugin["functions"][1]
# fetch after chatgpt response
response = plugin.fetch_plugin(
messages=todo_plugin["messages"],
return_assistant_message=True,
model="gpt-3.5-turbo-0613",
temperature=0,
)
response = response["function_message"]
assert response is not None
assert response["role"] == "function"
assert response["name"] == "addTodo"
json_content = json.loads(response["content"])
assert json_content["todo"] == "buy milk"
def test_initiate_and_fetch_LGTM_gpt_4():
plugin = OpenPlugin("LGTM")
assert plugin.manifest is not None
# # create chatgpt request that will call the addTodo function
chatgpt_prompt = 'Show me markdown for a 2 by 2 table with LGTM'
response = plugin.fetch_plugin(
messages=[
{
"role": "user",
"content": chatgpt_prompt
}
],
return_assistant_message=True,
model="gpt-4-1106-preview",
temperature=0,
)
response = response["function_message"]
assert response is not None
assert response["role"] == "function"
json_content = json.loads(response["content"])
# # ensure that the json_content has a key of image_url and that is starts with https://lgtm.lol
assert "image_url" in json_content
assert json_content["image_url"].startswith("https://lgtm.lol")
def test_initiate_and_fetch_yt_caption_retriever_gpt_4():
plugin = OpenPlugin("yt_caption_retriever")
assert plugin.manifest is not None
# create chatgpt request that will call the addTodo function
chatgpt_prompt = 'give me a 2 sentence summary of the following yt video https://www.youtube.com/watch?v=P310I19L3Ko'
response = plugin.fetch_plugin(
messages=[
{
"role": "user",
"content": chatgpt_prompt
}
],
return_assistant_message=True,
model="gpt-4-1106-preview",
temperature=0,
)
response = response["function_message"]
assert response is not None
assert response["role"] == "function"
json_content = json.loads(response["content"])
# Replace the line below with a test for the final output in json_content
assert "en" in json_content["captions"]
def test_initiate_and_fetch_twtData():
plugin = OpenPlugin("twtData")
assert plugin.manifest is not None
# create chatgpt request that will call the addTodo function
chatgpt_prompt = 'show me the amount of people @Sebasti54919704 is following'
response = plugin.fetch_plugin(
messages=[
{
"role": "user",
"content": chatgpt_prompt
}
],
return_assistant_message=True,
model="gpt-3.5-turbo-1106",
temperature=0,
)
response = response["function_message"]
assert response is not None
assert response["role"] == "function"
json_content = json.loads(response["content"])
# Replace the line below with a test for the final output in json_content
assert json_content["stats"]["account_found"] == True
def test_initiate_and_fetch_surge_ai_trends():
plugin = OpenPlugin("surge_ai_trends")
assert plugin.manifest is not None
# create chatgpt request that will call the addTodo function
chatgpt_prompt = 'What are the trnding searches for "gpu" in amazon'
response = plugin.fetch_plugin(
messages=[
{
"role": "user",
"content": chatgpt_prompt
}
],
return_assistant_message=True,
model="gpt-3.5-turbo-1106",
temperature=0,
)
response = response["function_message"]
assert response is not None
assert response["role"] == "function"
json_content = json.loads(response["content"])
# assert that json_content.items is a list
assert isinstance(json_content["items"], list)
def test_initiate_and_fetch_speedy_marketing():
plugin = OpenPlugin("speedy_marketing")
assert plugin.manifest is not None
# create chatgpt request that will call the addTodo function
chatgpt_prompt = 'write me an SEO blog about react for marketing'
response = plugin.fetch_plugin(
messages=[
{
"role": "user",
"content": chatgpt_prompt
}
],
return_assistant_message=True,
model="gpt-3.5-turbo-0613",
temperature=0,
)
response = response["function_message"]
assert response is not None
assert response["role"] == "function"
json_content = json.loads(response["content"])
# Replace the line below with a test for the final output in json_content
assert isinstance(json_content["blog"], str)
@pytest.mark.skip(reason="Not whitelisted")
def test_initiate_and_fetch_scholarai():
plugin = OpenPlugin("scholarai")
assert plugin.manifest is not None
# create chatgpt request that will call the addTodo function
chatgpt_prompt = 'What scientific research exists for semantic representation of language through brain waves. show me one.'
response = plugin.fetch_plugin(
messages=[
{
"role": "user",
"content": chatgpt_prompt
}
],
return_assistant_message=True,
model="gpt-3.5-turbo-0613",
temperature=0,
)
response = response["function_message"]
assert response is not None
assert response["role"] == "function"
json_content = json.loads(response["content"])
# Replace the line below with a test for the final output in json_content
assert isinstance(json_content["total_num_results"], int)
@pytest.mark.skip(reason="Not whitelisted")
def test_initiate_and_fetch_rephrase():
plugin = OpenPlugin("rephrase")
assert plugin.manifest is not None
# create chatgpt request that will call the addTodo function
chatgpt_prompt = 'I want to code a react ui with hello world please rephrase that'
response = plugin.fetch_plugin(
messages=[
{
"role": "user",
"content": chatgpt_prompt
}
],
return_assistant_message=True,
model="gpt-3.5-turbo-1106",
temperature=0,
)
response = response["function_message"]
assert response is not None
assert response["role"] == "function"
json_content = json.loads(response["content"])
# Replace the line below with a test for the final output in json_content
assert isinstance(json_content["rephrased"]["text"], str)
def test_initiate_and_fetch_DreamInterpreter():
plugin = OpenPlugin("DreamInterpreter", verbose=True)
assert plugin.manifest is not None
# create chatgpt request that will call the addTodo function
chatgpt_prompt = 'I dreamt of being in a room without any windows getting smaller overtime'
response = plugin.fetch_plugin(
messages=[
{
"role": "user",
"content": chatgpt_prompt
}
],
model="gpt-3.5-turbo-1106",
temperature=0,
)
assert response is not None
assert response["role"] == "function"
json_content = json.loads(response["content"])
# Replace the line below with a test for the final output in json_content
assert isinstance(json_content["dreamResult"], str)
def test_initiate_and_fetch_portfoliopilot():
plugin = OpenPlugin("portfoliopilot", verbose=True)
assert plugin.manifest is not None
# create chatgpt request that will call the addTodo function
chatgpt_prompt = 'What stocks should I add for my long term tech portfolio'
response = plugin.fetch_plugin(
messages=[
{
"role": "user",
"content": chatgpt_prompt
}
],
model="gpt-3.5-turbo-1106",
temperature=0,
)
assert response is not None
assert response["role"] == "function"
json_content = json.loads(response["content"])
# Replace the line below with a test for the final output in json_content
assert isinstance(json_content["top_stocks"], list)
@pytest.mark.skip(reason="Could not parse: requests.exceptions.JSONDecodeError: Expecting value: line 1 column 1")
def test_initiate_and_fetch_Ai_PDF():
plugin = OpenPlugin("Ai_PDF", verbose=True)
assert plugin.manifest is not None
# create chatgpt request that will call the addTodo function
chatgpt_prompt = 'Can I have my data be private according to this pdf https://www.unodc.org/pdf/criminal_justice/UN_Basic_Principles_on_the_Role_of_Lawyers.pdf'
response = plugin.fetch_plugin(
messages=[
{
"role": "user",
"content": chatgpt_prompt
}
],
model="gpt-3.5-turbo-1106",
temperature=0,
)
assert response is not None
assert response["role"] == "function"
json_content = json.loads(response["content"])
# Replace the line below with a test for the final output in json_content
assert isinstance(json_content[0], str)
@pytest.mark.skip(reason="requests.exceptions.JSONDecodeError: Expecting value: line 1 column 1")
def test_initiate_and_fetch_askyourpdf():
plugin = OpenPlugin("askyourpdf", verbose=True)
assert plugin.manifest is not None
chatgpt_prompt = 'summarize this pdf https://eforms.com/download/2018/01/Non-Disclosure-Agreement-Template.pdf'
response = plugin.fetch_plugin(
messages=[
{
"role": "user",
"content": chatgpt_prompt
}
],
model="gpt-3.5-turbo-1106",
temperature=0,
)
assert response is not None
assert response["role"] == "function"
json_content = json.loads(response["content"])
assert len(json_content["summary"]) > 0
def test_initiate_and_fetch_show_me_diagrams():
plugin = OpenPlugin("show_me_diagrams", verbose=True)
assert plugin.manifest is not None
# create chatgpt request that will call the addTodo function
chatgpt_prompt = 'Take this diagram to explain how a car works using a mermaid diagram of type graph. Return the link.'
response = plugin.fetch_plugin(
messages=[
{
"role": "user",
"content": chatgpt_prompt
}
],
model="gpt-3.5-turbo-1106",
temperature=0,
)
assert response is not None
assert response["role"] == "function"
json_content = json.loads(response["content"])
print(json.dumps(json_content, indent=2))
assert json_content["diagramLanguage"] == "mermaid"
"""
TEMPLATE for testing a new plugin
0. test the plugin with a prompt in ChatGPT
1. make sure to replace the PLUGIN with the name of your plugin
2. make sure to replace the PLUGIN_PROMPT with the prompt you used on ChatGPT
3. replace the INTENTIONAL_FAILURE error with a test for the final output in json_content
4. remove the segments under the DELETE comment
"""
# def test_initiate_and_fetch_PLUGIN():
# plugin = OpenPlugin("PLUGIN", verbose=True)
# assert plugin.manifest is not None
# # DELETE
# if not os.path.exists("logs"):
# os.makedirs("logs")
# with open("logs/manifest.json", "w") as f:
# f.write(json.dumps(plugin.manifest, indent=2))
# with open("logs/functions.json", "w") as f:
# f.write(json.dumps(plugin.functions, indent=2))
# # create chatgpt request that will call the addTodo function
# chatgpt_prompt = 'PLUGIN_PROMPT'
# response = plugin.fetch_plugin(
# messages=[
# {
# "role": "user",
# "content": chatgpt_prompt
# }
# ],
# model="gpt-3.5-turbo-0613",
# temperature=0,
# )
# # DELETE
# with open("logs/plugin_response.json", "w") as f:
# f.write(json.dumps(response, indent=2))
# assert response is not None
# assert response["role"] == "function"
# json_content = json.loads(response["content"])
# # Replace the line below with a test for the final output in json_content
# raise Exception("INTENTIONAL_FAILURE")
| [
"Show me markdown for a 2 by 2 table with LGTM",
"Can I have my data be private according to this pdf https://www.unodc.org/pdf/criminal_justice/UN_Basic_Principles_on_the_Role_of_Lawyers.pdf",
"Take this diagram to explain how a car works using a mermaid diagram of type graph. Return the link.",
"give me a 2 sentence summary of the following yt video https://www.youtube.com/watch?v=P310I19L3Ko",
"show me the amount of people @Sebasti54919704 is following",
"I dreamt of being in a room without any windows getting smaller overtime",
"summarize this pdf https://eforms.com/download/2018/01/Non-Disclosure-Agreement-Template.pdf",
"write me an SEO blog about react for marketing",
"What scientific research exists for semantic representation of language through brain waves. show me one.",
"What are the trnding searches for \"gpu\" in amazon",
"What stocks should I add for my long term tech portfolio",
"I want to code a react ui with hello world please rephrase that"
] |
2024-01-10 | ymyke/cardio | cardio~blueprints~blueprint_creator.py | """Rough script to generate new cards."""
#%%
import logging
from collections import defaultdict
import random
import re
from typing import List, Tuple
from cardio.blueprints.card_creator import create_noname_cards
from cardio.blueprints.query_openai import query_openai
from cardio.blueprints import Blueprint, thecatalog
from openai.error import RateLimitError
logging.basicConfig(level=logging.DEBUG)
TITLE = "\n---------- {} ----------\n"
def parse_line(line: str) -> Tuple[int, str, str]:
i, rest = line.split(":")
i = int(re.sub(r"[^\d]", "", i))
if "[" in rest:
name, rest = rest.split("[")
name = name.strip()
name = re.sub(r"[^A-Za-z ]", "", name)
desc = rest.split("]")[0]
desc = re.sub(r'[^A-Za-z ,"\']', "", desc)
elif "-" in rest:
name, desc = rest.split("-", 1)
name = name.strip()
name = re.sub(r"[^A-Za-z ]", "", name)
desc = desc.strip()
desc = re.sub(r'[^A-Za-z ,"\']', "", desc)
else:
raise ValueError(f"Could not parse line.")
return i, name, desc
def create_blueprints_and_add_to_catalog(listofwantedpotencies: List[int]):
# Create a couple of random cards:
cards = create_noname_cards(listofwantedpotencies)
for i, c in enumerate(cards): # Set an index as the name
c.name = str(i)
# Prepare openai query:
print(TITLE.format("Query"))
query = "\n".join(repr(c) for c in cards)
print(query)
print(TITLE.format("Raw response"))
res = query_openai(query, existing_names=[b.name for b in thecatalog._blueprints])
print(res)
print(TITLE.format("Parsed response"))
not_added = defaultdict(list)
lines = res.split("\n")
for line in lines:
if not line.strip():
continue
i, name, desc = parse_line(line)
print(i, name, desc)
cards[i].name = name
b = Blueprint(cards[i], desc)
try:
thecatalog.add_blueprint(b)
except Exception as e:
not_added[e.__class__.__name__].append(b)
print(TITLE.format("Not added"))
for reason, blueprints in not_added.items():
print(f"{len(blueprints)} blueprints not added because {reason}: ")
print(", ".join(b.name for b in blueprints))
print()
# ----- main -----
wanted_potencies = (
list(range(1, 31)) * 5 + list(range(31, 46)) * 2 + list(range(46, 81))
)
random.shuffle(wanted_potencies)
while wanted_potencies:
potencies = wanted_potencies[:5]
while True:
print()
print(
f"********** Potencies {potencies} ({len(wanted_potencies)} left) **********"
)
try:
create_blueprints_and_add_to_catalog(potencies)
except Exception as e:
print(f"\n😱😱😱 {type(e).__name__} ERROR: {e} 😱😱😱\n")
else:
wanted_potencies = wanted_potencies[5:]
break
thecatalog.save()
| [] |
2024-01-10 | ymyke/cardio | cardio~blueprints~query_openai.py | import os
from typing import List, Optional
import random
import openai
openai.api_key = os.environ.get("OPENAI_API_KEY")
assert openai.api_key, "Please set the OPENAI_API_KEY environment variable."
query_blueprint = """\
I'm developing a card game where cards have different attributes and looking to name some new cards.
EXAMPLES:
Card(name='1', power=0, health=1, costs_fire=0, costs_spirits=0, has_spirits=1, has_fire=1, skills=[]) -> ANSWER: 1: Hamster [small, weak, cheap, non-skilled]
Card(name='2', power=1, health=2, costs_fire=1, costs_spirits=0, has_spirits=1, has_fire=1, skills=[skills.Spines]) -> ANSWER: 2: Porcupine [small, low power/health, spines]
Card(name='3', power=6, health=4, costs_fire=3, costs_spirits=0, has_spirits=1, has_fire=1, skills=[]) -> ANSWER: 3: Grizzly Bear [very strong, quite expensive]
Card(name='4', power=1, health=4, costs_fire=2, costs_spirits=0, has_spirits=1, has_fire=1, skills=[]) -> ANSWER: 4: Armadillo [low power, high health, no skills]
Card(name='5', power=0, health=1, costs_fire=1, costs_spirits=0, has_spirits=10, has_fire=1, skills=[]) -> ANSWER: 5: Unicorn [highly spirited]
TASK: Suggest a name for the following cards:
{}
RULES:
- The name must have no more than 12 characters.
- Use only normal animal names. Names of other cards in the game are: Hamster, Koala, Porcupine, Lynx, Weasel, Church Mouse.
- Take the attribute values (and their min/max) and the skill names into account and suggest a name that fits with the overall character of a card.
- Keep in mind the min max values per attribute and relate to these as well: power: [0-10], health: [1-10], costs_fire: [0-6], costs_spirits: [0-8], has_spirits: [0-8], has_fire: [0-6], and a card can have up to 6 skills.
- Answer only with the number and the name for each card and a short explanation, nothing else.
- The explanation should argue how the name is related to the card's attributes and skills.
- Do NOT use any of the skill names in the name of the card.
- Make sure the name is not any of these: {}
"""
def query_openai(cards_str: str, existing_names: Optional[List[str]] = None) -> str:
existing_names = existing_names or []
# Take a random sample if there are too many existing names:
namestr = ", ".join(random.sample(existing_names, min(250, len(existing_names))))
chat_completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": query_blueprint.format(cards_str, namestr)}
],
)
return chat_completion.choices[0].message.content
| [
"I'm developing a card game where cards have different attributes and looking to name some new cards.\n\n\nEXAMPLES:\nCard(name='1', power=0, health=1, costs_fire=0, costs_spirits=0, has_spirits=1, has_fire=1, skills=[]) -> ANSWER: 1: Hamster [small, weak, cheap, non-skilled]\nCard(name='2', power=1, health=2, costs_fire=1, costs_spirits=0, has_spirits=1, has_fire=1, skills=[skills.Spines]) -> ANSWER: 2: Porcupine [small, low power/health, spines]\nCard(name='3', power=6, health=4, costs_fire=3, costs_spirits=0, has_spirits=1, has_fire=1, skills=[]) -> ANSWER: 3: Grizzly Bear [very strong, quite expensive]\nCard(name='4', power=1, health=4, costs_fire=2, costs_spirits=0, has_spirits=1, has_fire=1, skills=[]) -> ANSWER: 4: Armadillo [low power, high health, no skills]\nCard(name='5', power=0, health=1, costs_fire=1, costs_spirits=0, has_spirits=10, has_fire=1, skills=[]) -> ANSWER: 5: Unicorn [highly spirited]\n\n\nTASK: Suggest a name for the following cards:\nPLACEHOLDER\n\nRULES: \n- The name must have no more than 12 characters. \n- Use only normal animal names. Names of other cards in the game are: Hamster, Koala, Porcupine, Lynx, Weasel, Church Mouse. \n- Take the attribute values (and their min/max) and the skill names into account and suggest a name that fits with the overall character of a card. \n- Keep in mind the min max values per attribute and relate to these as well: power: [0-10], health: [1-10], costs_fire: [0-6], costs_spirits: [0-8], has_spirits: [0-8], has_fire: [0-6], and a card can have up to 6 skills.\n- Answer only with the number and the name for each card and a short explanation, nothing else.\n- The explanation should argue how the name is related to the card's attributes and skills.\n- Do NOT use any of the skill names in the name of the card.\n- Make sure the name is not any of these: PLACEHOLDER\n"
] |
2024-01-10 | faceyacc/ray | ray_bot~ray_bot~chains.py | import time
from typing import Any, Dict, List, Optional
import qdrant_client
from langchain import chains
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.llms import HuggingFacePipeline
from unstructured.cleaners.core import (
clean,
clean_extra_whitespace,
clean_non_ascii_chars,
group_broken_paragraphs,
replace_unicode_quotes,
)
from ray_bot.embeddings import EmbeddingModelSingleton
from ray_bot.template import PromptTemplate
class StatelessMemorySequentialChain(chains.SequentialChain):
"""
A sequential chain that uses a stateless memory to store context between calls.
This chain overrides the _call and prep_outputs methods to load and clear the memory
before and after each call, respectively.
"""
history_input_key: str = "to_load_history"
def _call(self, inputs: Dict[str, str], **kwargs) -> Dict[str, str]:
"""
Override _call to load history before calling the chain.
This method loads the history from the input dictionary and saves it to the
stateless memory. It then updates the inputs dictionary with the memory values
and removes the history input key. Finally, it calls the parent _call method
with the updated inputs and returns the results.
"""
to_load_history = inputs[self.history_input_key]
for (
human,
ai,
) in to_load_history:
self.memory.save_context(
inputs={self.memory.input_key: human},
outputs={self.memory.output_key: ai},
)
memory_values = self.memory.load_memory_variables({})
inputs.update(memory_values)
del inputs[self.history_input_key]
return super()._call(inputs, **kwargs)
def prep_outputs(
self,
inputs: Dict[str, str],
outputs: Dict[str, str],
return_only_outputs: bool = False,
) -> Dict[str, str]:
"""
Override prep_outputs to clear the internal memory after each call.
This method calls the parent prep_outputs method to get the results, then
clears the stateless memory and removes the memory key from the results
dictionary. It then returns the updated results.
"""
results = super().prep_outputs(inputs, outputs, return_only_outputs)
# Clear the internal memory.
self.memory.clear()
if self.memory.memory_key in results:
results[self.memory.memory_key] = ""
return results
class ContextExtractorChain(Chain):
"""
Encode the question, search the vector store for top-k articles and return
context news from documents collection of Alpaca news.
Attributes:
-----------
top_k : int
The number of top matches to retrieve from the vector store.
embedding_model : EmbeddingModelSingleton
The embedding model to use for encoding the question.
vector_store : qdrant_client.QdrantClient
The vector store to search for matches.
vector_collection : str
The name of the collection to search in the vector store.
"""
top_k: int = 1
embedding_model: EmbeddingModelSingleton
vector_store: qdrant_client.QdrantClient
vector_collection: str
@property
def input_keys(self) -> List[str]:
return ["about_me", "question"]
@property
def output_keys(self) -> List[str]:
return ["context"]
def _call(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
_, quest_key = self.input_keys
question_str = inputs[quest_key]
cleaned_question = self.clean(question_str)
# TODO: Instead of cutting the question at 'max_input_length', chunk the question in 'max_input_length' chunks,
# pass them through the model and average the embeddings.
cleaned_question = cleaned_question[: self.embedding_model.max_input_length]
embeddings = self.embedding_model(cleaned_question)
# TODO: Using the metadata, use the filter to take into consideration only the news from the last 24 hours
# (or other time frame).
matches = self.vector_store.search(
query_vector=embeddings,
k=self.top_k,
collection_name=self.vector_collection,
)
context = ""
for match in matches:
context += match.payload["summary"] + "\n"
return {
"context": context,
}
def clean(self, question: str) -> str:
"""
Clean the input question by removing unwanted characters.
Parameters:
-----------
question : str
The input question to clean.
Returns:
--------
str
The cleaned question.
"""
question = clean(question)
question = replace_unicode_quotes(question)
question = clean_non_ascii_chars(question)
return question
class RayBotQAChain(Chain):
"""This custom chain handles LLM generation upon given prompt"""
hf_pipeline: HuggingFacePipeline
template: PromptTemplate
@property
def input_keys(self) -> List[str]:
"""Returns a list of input keys for the chain"""
return ["context"]
@property
def output_keys(self) -> List[str]:
"""Returns a list of output keys for the chain"""
return ["answer"]
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Calls the chain with the given inputs and returns the output"""
inputs = self.clean(inputs)
prompt = self.template.format_infer(
{
"user_context": inputs["about_me"],
"news_context": inputs["context"],
"chat_history": inputs["chat_history"],
"question": inputs["question"],
}
)
start_time = time.time()
response = self.hf_pipeline(prompt["prompt"])
end_time = time.time()
duration_milliseconds = (end_time - start_time) * 1000
if run_manager:
run_manager.on_chain_end(
outputs={
"answer": response,
},
# TODO: Count tokens instead of using len().
metadata={
"prompt": prompt["prompt"],
"prompt_template_variables": prompt["payload"],
"prompt_template": self.template.infer_raw_template,
"usage.prompt_tokens": len(prompt["prompt"]),
"usage.total_tokens": len(prompt["prompt"]) + len(response),
"usage.actual_new_tokens": len(response),
"duration_milliseconds": duration_milliseconds,
},
)
return {"answer": response}
def clean(self, inputs: Dict[str, str]) -> Dict[str, str]:
"""Cleans the inputs by removing extra whitespace and grouping broken paragraphs"""
for key, input in inputs.items():
cleaned_input = clean_extra_whitespace(input)
cleaned_input = group_broken_paragraphs(cleaned_input)
inputs[key] = cleaned_input
return inputs | [
"user_context",
"question",
"chat_history",
"context",
"news_context"
] |
2024-01-10 | faceyacc/ray | ray_bot~ray_bot~handlers.py | from typing import Any, Dict
import comet_llm
from langchain.callbacks.base import BaseCallbackHandler
from ray_bot import constants
class CometLLMMonitoringHandler(BaseCallbackHandler):
"""
A callback handler for monitoring LLM models using Comet.ml.
Args:
project_name (str): The name of the Comet.ml project to log to.
llm_model_id (str): The ID of the LLM model to use for inference.
llm_qlora_model_id (str): The ID of the PEFT model to use for inference.
llm_inference_max_new_tokens (int): The maximum number of new tokens to generate during inference.
llm_inference_temperature (float): The temperature to use during inference.
"""
def __init__(
self,
project_name: str = None,
llm_model_id: str = constants.LLM_MODEL_ID,
llm_qlora_model_id: str = constants.LLM_QLORA_CHECKPOINT,
llm_inference_max_new_tokens: int = constants.LLM_INFERNECE_MAX_NEW_TOKENS,
llm_inference_temperature: float = constants.LLM_INFERENCE_TEMPERATURE,
):
self._project_name = project_name
self._llm_model_id = llm_model_id
self._llm_qlora_model_id = llm_qlora_model_id
self._llm_inference_max_new_tokens = llm_inference_max_new_tokens
self._llm_inference_temperature = llm_inference_temperature
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""
A callback function that logs the prompt and output to Comet.ml.
Args:
outputs (Dict[str, Any]): The output of the LLM model.
**kwargs (Any): Additional arguments passed to the function.
"""
should_log_prompt = "metadata" in kwargs
if should_log_prompt:
metadata = kwargs["metadata"]
comet_llm.log_prompt(
project=self._project_name,
prompt=metadata["prompt"],
output=outputs["answer"],
prompt_template=metadata["prompt_template"],
prompt_template_variables=metadata["prompt_template_variables"],
metadata={
"usage.prompt_tokens": metadata["usage.prompt_tokens"],
"usage.total_tokens": metadata["usage.total_tokens"],
"usage.max_new_tokens": self._llm_inference_max_new_tokens,
"usage.temperature": self._llm_inference_temperature,
"usage.actual_new_tokens": metadata["usage.actual_new_tokens"],
"model": self._llm_model_id,
"peft_model": self._llm_qlora_model_id,
},
duration=metadata["duration_milliseconds"],
) | [
"False"
] |
2024-01-10 | Amirrezahmi/Zozo-Assistant | project~ui.py | #This is the same as 'main.py' but this time not console-based but UI.
import tkinter as tk
from tkinter import messagebox
from tkinter import messagebox, scrolledtext, ttk, filedialog
import requests
import string
import random
import pyttsx3
import datetime
import speech_recognition as sr
from word2number import w2n
import threading
import pygame
from country_codes import country_codes
import joblib
import os
import openai
from tkinter import scrolledtext
import time
import shutil
from queue import Queue
# 'message_queue' will be our means of communication between recognition thread and main program thread
message_queue = Queue()
pygame.init()
def play_siri1():
pygame.mixer.music.load("siri1.mp3")
pygame.mixer.music.play()
def play_siri2():
pygame.mixer.music.load("siri2.mp3")
pygame.mixer.music.play()
btn_status = False
listening_flag = threading.Event()
listen_thread = None # Track the active listen_for_speech thread
def listen_for_speech(queue: Queue):
recognizer = sr.Recognizer()
microphone = sr.Microphone()
global listen_thread
while True:
with microphone as mic:
# Wait for the call to start listening
queue.get()
# Check if the loop should run
if not listening_flag.is_set():
break
while btn_status:
print("Listening...")
recognizer.adjust_for_ambient_noise(mic, duration=1)
audio = recognizer.listen(mic)
if not btn_status: # Stop the loop if the button is clicked
break
print("Recognizing...")
try:
text = recognizer.recognize_google(audio)
# Get the existing text from the input box
existing_text = input_box.get()
# Update the input box with the recognized speech joined with existing text
if existing_text.strip(): # Check if the input box is not empty
input_box.delete(0, tk.END)
input_box.insert(0, existing_text + " " + text)
else:
input_box.insert(tk.END, text) # Append recognized speech to the end
except sr.UnknownValueError:
print("Could not understand")
except sr.RequestError:
print("Failed to get results.")
# Reset the listen_thread when the loop exits
listen_thread = None
# Stop listening if the button is clicked while waiting for input
if not btn_status:
break
def start_stop_listening():
submit_button.pack_forget()
global btn_status, listen_thread
btn_status = not btn_status
if btn_status:
listening_button.configure(text='⏹')
play_siri1()
if listen_thread is None or not listen_thread.is_alive():
listening_flag.set() # Set the flag to True to start the loop
recognize_thread = threading.Thread(target=listen_for_speech, args=(message_queue,))
recognize_thread.daemon = True
recognize_thread.start()
listen_thread = recognize_thread
message_queue.put('start')
print("Button clicked, start listening")
#play_siri1()
else:
listening_button.pack_forget()
submit_button.pack()
listening_button.pack()
listening_button.configure(text='🎙')
listening_flag.clear() # Set the flag to False to stop the loop
print("Button clicked, stop listening")
play_siri2()
engine = pyttsx3.init() # In this line of code you may face an error if you are using a non-Windows OS. In this case please follow my notes in the comment below: 👇
'''
1. First, make sure to install the pyttsx3 library correctly. Incorrect installation could potentially cause such an error. You can do this by running the following command in your terminal:
pip uninstall pyttsx3
pip install pyttsx3
2. Upgrade the library if it's outdated because some old versions might cause the error. Run the following command to update pyttsx3:
pip install --upgrade pyttsx3
Note: Step 3 is crucial, especially if you are using a non-Windows OS such as Linux. It is essential to note that this program has been developed on a Windows-powered device, which may result in encountering errors with certain libraries on other operating systems, such as pyttsx3. In this case read step 3 carefully.
3. pyttsx3 uses speech synthesis engines that depend on your operating system. Make sure that the corresponding speech engine is correctly installed and configured. For example, on Linux, pyttsx3 uses espeak. You might need to install it in case you are using Linux:
sudo apt-get update && sudo apt-get install espeak
'''
engine.setProperty('rate', 150)
# OpenAI API
OPENAI_API_KEY = '' # Paste your API Key here
messages = [
{"role": "system", "content": "You are a helpful assistant."},
]
# Check for API
def check_API():
if len(OPENAI_API_KEY) != 0:
openai.api_key = OPENAI_API_KEY
else:
return "No"
def api_not_worked(a):
pipeline = joblib.load('model2.joblib') # Paste your path
print("API didn't work! Now we are using our pipeline model...")
result = pipeline.predict([a])[0]
return result
def cleaner(x):
return [a for a in (''.join([a for a in x if a not in string.punctuation])).lower().split()]
#weather
def weather(location, country):
# Returns full JSON object
apiKey = '' # Enter your API Key here for weather
base = 'http://api.openweathermap.org/data/2.5/weather?q='
url = base + location + ',' + country + '&units=metric&appid=' + apiKey
response = requests.get(url).json()
return response
# dictionary to translate descriptions
status = {
"clouds": "cloudy",
"drizzle": "drizzly",
"rain": "rainy",
"thunderstorm": "stormy",
"snow": "snowy",
"mist": "misty"
}
def description(location, country):
# returns weather description
main = weather(location, country)['weather']
d = main[0]['main'].lower()
return status.get(d, d)
def temperature(location, country):
# returns current temp
main = weather(location, country)['main']
return main['temp']
def get_weather_advice(description):
cloudy_advice = ["No sunglasses needed", "Light jacket recommended", "Stay indoors if possible"]
rainy_advice = ["Get an umbrella", "Wear waterproof shoes", "Carry a raincoat"]
snowy_advice = ["Mittens and earmuffs", "Wear warm boots", "Drive carefully on slippery roads"]
default_advice = ["No particular advice", "Enjoy the weather!", "Stay hydrated"]
if description == "cloudy":
return random.choice(cloudy_advice)
elif description == "rainy":
return random.choice(rainy_advice)
elif description == "snowy":
return random.choice(snowy_advice)
else:
return random.choice(default_advice)
# Global variable for the name
name = "Amirreza"
def speak(text):
engine.say(text)
engine.runAndWait()
def alarm():
pygame.mixer.init()
pygame.mixer.music.load('Ring.wav')
pygame.mixer.music.play()
zero = 0
def one_time():
global zero
if zero < 1:
p = "Ok. Now write whatever you want."
print(p)
speak(p)
zero += 1
def listen():
mic = "no"
if mic.lower() == "1" or mic.lower() == "y" or mic.lower() == "yes":
r = sr.Recognizer()
with sr.Microphone() as source:
li = ["Say something!", "OK?", "Now what?", "Speak up, I'm listening"]
p = random.choice(li)
speak(p)
print(p)
audio = r.listen(source)
try:
text = r.recognize_google(audio)
# Update chatroom display with user input
update_chatroom("User: " + text, "")
return text
except sr.UnknownValueError:
p1 = "Sorry, I couldn't understand you. Please try again."
speak(p1)
print(p1)
return listen() # Retry listening
except sr.RequestError as e:
p2 = "Sorry, I am currently experiencing some technical issues. Please try again later."
speak(p2)
print(p2)
return listen() # Retry listening
else:
text = input("Enter your input: ")
# Update chatroom display with user input
update_chatroom("User: " + text, "")
return text
def assistant_text(text):
return input_box.get()
def aa(text):
text=text.lower()
if "what time is it" in text or text=="time":
time = datetime.datetime.now().strftime("%H:%M")
p = "The current time is " + time
return p
elif "who am i" in text or "what is my name" in text:
p = f"You are {name}."
return p
elif text == "zozo":
p=["yes?","I'm listening", f"{name}?"]
p= random.choice(p)
return p
elif "date" in text:
date = datetime.datetime.now().strftime("%Y-%m-%d")
p = "The current date is " + date
return p
elif "bye" in text:
p="Good Bye!"
update_chatroom(text, p)
exit()
else:
input_text = text
a = input_text
message = {"role": "user", "content": a}
messages.append(message)
response = check_API()
try:
if response == "No":
return api_not_worked(a)
else:
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
result = response["choices"][0]["message"]["content"]
return result
except:
return api_not_worked(a)
def show_chatroom():
chat_window.pack()
mic_label.pack_forget()
mic_entry.pack_forget()
submit_button.pack_forget()
exit_button.pack_forget()
text_box.pack_forget()
label.pack_forget()
def update_chatroom(prompt, response):
chat_display.configure(state='normal')
chat_display.insert(tk.END, f"User: {prompt}\n", 'user')
chat_display.insert(tk.END, f"Bot: {response}\n", 'bot')
chat_display.configure(state='disabled')
chat_display.see(tk.END)
engine.say(response)
engine.runAndWait()
def on_window_configure(event):
if window.state() == 'zoomed':
chat_display.configure(width=window.winfo_width() // 10, height=window.winfo_height() // 30)
else:
chat_display.configure(width=50, height=13)
# Create a Tkinter window
window = tk.Tk()
window.title("Zozo")
window.geometry("600x400")
window.configure(background='#F0F0F0')
# Bind the window's configure event to the update function
window.bind('<Configure>', on_window_configure)
# Create a chatroom window
chat_window = tk.Frame(window, bg='#F0F0F0')
chat_window.pack(pady=10)
# Create a scrolledtext widget for prompts and responses
chat_display = scrolledtext.ScrolledText(chat_window, width=50, height=13, bg='#FFFFFF', wrap=tk.WORD)
chat_display.pack()
# Configure tags for user and bot messages
chat_display.tag_configure('user', foreground='#000000')
chat_display.tag_configure('bot', foreground='#0000FF')
# Initialize text-to-speech engine
engine = pyttsx3.init()
engine.setProperty("rate", 150)
# Create an input box for the user to enter their messages
input_box = tk.Entry(window, width=60)
input_box.pack(pady=10)
input_box.focus_set()
def submit_cha():
input_value = input_box.get()
if input_value:
alarm_time = datetime.datetime.strptime(input_value, "%H:%M").strftime("%H:%M")
response = ala(alarm_time) # Replace with your own code to generate bot response
submit_button = tk.Button(window, text="Send", command=submit_chaa)
return response
g_num=0
def jnum():
global g_num
g_num+=1
def get_weather():
input_box.pack_forget()
def stop_weather():
remove_weather_elements()
input_box.pack()
submit_button.pack()
listening_button.pack()
def get_weather_info():
location = location_entry.get()
country = country_entry.get()
if country in country_codes:
country = country_codes[country]
else:
d = "Country not found in our dataset."
update_chatroom(country, d)
country = country
try:
desc = description(location, country)
d = f"In {location} the weather is {desc}"
# update_chatroom(prompt, response)
advice = get_weather_advice(desc)
result = f"{d}\n{advice}"
update_chatroom(f"City: {location} & Country: {country}", result)
except:
d = "Couldn't find any results!"
update_chatroom(f"City: {location} & Country: {country}", d)
location_entry.delete(0, tk.END)
country_entry.delete(0, tk.END)
def remove_weather_elements():
location_label.destroy()
location_entry.destroy()
country_label.destroy()
country_entry.destroy()
submit_butto.destroy()
stop_button.destroy()
# Create labels and entry fields
location_label = tk.Label(window, text="City:")
location_entry = tk.Entry(window)
country_label = tk.Label(window, text="Country:")
country_entry = tk.Entry(window)
# Create submit button
submit_butto = tk.Button(window, text="Get Weather", command=get_weather_info)
stop_button = tk.Button(window, text="Back", command=stop_weather)
# Pack labels, entry fields, and submit button
location_label.pack()
location_entry.pack()
country_label.pack()
country_entry.pack()
submit_butto.pack()
stop_button.pack()
submit_button.pack_forget()
listening_button.pack_forget()
# Global variable to store music files
music_files = []
def music_management():
global music_files # Access the global music_files variable
def remove_music_management_elements():
music_management_window.destroy()
def delete_music():
index = music_list.curselection()[0]
music_file = music_files[index]
result = messagebox.askyesno("Confirm Deletion", f"Do you really want to delete '{os.path.basename(music_file)}'?")
if result:
os.remove(music_file)
music_list.delete(index)
del music_files[index]
def add_music():
filename = filedialog.askopenfilename(initialdir="/", title="Select Music File", filetypes=[("MP3 files", "*.mp3")])
if filename:
if filename.endswith(".mp3"):
shutil.copy(filename, "music/")
music_files.append(filename)
show_music_list()
else:
messagebox.showerror("Error", "Please select an MP3 file.")
def show_music_list():
music_list.delete(0, tk.END)
for i, music_file in enumerate(music_files):
music_list.insert(tk.END, os.path.basename(music_file))
music_files = [os.path.join("music", f) for f in os.listdir("music") if f.endswith(".mp3")]
# Create a new window for music management
music_management_window = tk.Toplevel(window)
music_management_window.title("Music Management")
music_management_window.geometry("400x500")
# music_management_window.grid_rowconfigure(0, weight=1)
# music_management_window.grid_columnconfigure(0, weight=1)
# Create a label for the title
title_label = tk.Label(music_management_window, text="Music Management", font=("Arial", 16, "bold"))
title_label.grid(row=0, column=0, pady=10, sticky="n")
# Create a custom Listbox widget with canvas feature to display the music list
class CustomListbox(tk.Listbox):
def init(self, master, **kwargs):
super().init(master, **kwargs)
self.bind("<MouseWheel>", self.hide_delete_icon)
def show_delete_icon(self, event):
index = self.nearest(event.y)
x, y, _, _ = self.bbox(index)
width = self.winfo_width()
self.delete_icon.place(x=width - 30, y=y) # Move the '❌' icon to the right corner of the row
def hide_delete_icon(self, event):
self.delete_icon.place_forget()
music_list = CustomListbox(music_management_window, font=("Arial", 12), height=10, selectbackground="#b2d8b2", selectforeground="black", activestyle="none")
music_list.grid(row=1, column=0, pady=10, sticky="nsew")
for i, music_file in enumerate(music_files):
music_list.insert(tk.END, os.path.basename(music_file))
def show_delete_icon(event):
index = music_list.nearest(event.y)
x, y, _, _ = music_list.bbox(index)
width = music_list.winfo_width()
music_list.delete_icon.place(x=width - 30, y=y) # Move the '❌' icon to the right corner of the row
# Create a delete icon label
music_list.delete_icon = tk.Label(music_list, text="❌", cursor="hand2", font=("Arial", 12))
music_list.delete_icon.bind("<Button-1>", lambda event: delete_music())
# Bind the Listbox to show the delete icon on selection
music_list.bind("<Button-1>", show_delete_icon)
# Create a Scrollbar for the Listbox
scrollbar = ttk.Scrollbar(music_management_window, orient=tk.VERTICAL, command=music_list.yview)
scrollbar.grid(row=1, column=1, sticky="ns")
music_list.config(yscrollcommand=scrollbar.set)
# Create a button to add music
add_music_button = tk.Button(music_management_window, text="Add Music", command=add_music)
add_music_button.grid(row=2, column=0, pady=10, sticky="n")
# Create a back button to go back to the music player
back_button = tk.Button(music_management_window, text="Back", command=remove_music_management_elements)
back_button.grid(row=3, column=0, pady=10, sticky="n")
# Configure resizing behavior for widgets
music_management_window.grid_rowconfigure(1, weight=1)
music_list.grid_rowconfigure(0, weight=1)
music_list.grid_columnconfigure(0, weight=1)
music_management_window.grid_columnconfigure(0, weight=1)
def play_music():
global music_files # Access the global music_files variable
input_box.pack_forget()
folder_path = 'music' # Change based on your path
music_files = [os.path.join(folder_path, f) for f in os.listdir(folder_path) if f.endswith('.mp3')]
pygame.mixer.init()
current_music_index = 0
pygame.mixer.music.load(music_files[current_music_index])
pygame.mixer.music.play()
# print('Playing:', music_files[current_music_index])
update_chatroom("play music", "Playing music")
def play_next():
nonlocal current_music_index
current_music_index = (current_music_index + 1) % len(music_files)
pygame.mixer.music.load(music_files[current_music_index])
pygame.mixer.music.play()
p='Playing next music'
update_chatroom("next music", p)
def play_previous():
nonlocal current_music_index
current_music_index = (current_music_index - 1) % len(music_files)
pygame.mixer.music.load(music_files[current_music_index])
pygame.mixer.music.play()
p='Playing previous music' #, music_files[current_music_index]
update_chatroom("previous music", p)
def pause_music():
pygame.mixer.music.pause()
update_chatroom("pause", "music paused")
def unpause_music():
pygame.mixer.music.unpause()
update_chatroom("unpause", "music unpaused")
def stop_music():
pygame.mixer.music.stop()
remove_music_buttons()
update_chatroom("stop", "Stopped")
input_box.pack()
submit_button.pack()
listening_button.pack()
def remove_music_buttons():
next_button.destroy()
previous_button.destroy()
pause_button.destroy()
unpause_button.destroy()
stop_button.destroy()
music_management_button.destroy() # New button to manage music
# Create buttons
next_button = tk.Button(window, text="Next", command=play_next)
previous_button = tk.Button(window, text="Previous", command=play_previous)
pause_button = tk.Button(window, text="Pause", command=pause_music)
unpause_button = tk.Button(window, text="Unpause", command=unpause_music)
stop_button = tk.Button(window, text="Stop", command=stop_music)
music_management_button = tk.Button(window, text="Music Management", command=music_management) # New button
# Pack buttons
next_button.pack()
previous_button.pack()
pause_button.pack()
unpause_button.pack()
stop_button.pack()
music_management_button.pack() # New button
submit_button.pack_forget()
listening_button.pack_forget()
def submit_chat():
global btn_status
btn_status = False # Stop the speech recognition loop
prompt = input_box.get()
input_box.delete(0, tk.END)
if "set alarm" in prompt or "alarm" in prompt:
submit_button.pack_forget()
listening_button.pack_forget()
# Ask the user for the time of the alarm
p = "At what time do you want to set the alarm? Enter the time in seconds."
update_chatroom(prompt, p)
def capture_alarm_time():
seconds = input_box.get()
try:
seconds = int(seconds)
except ValueError:
if g_num < 1:
jnum()
else:
p = "Invalid input. Please enter a valid number of seconds."
update_chatroom(seconds, p)
input_box.delete(0, tk.END)
return
# Set the alarm using a timer
t = threading.Timer(seconds, alarm)
t.start()
p = f"Alarm set for {seconds} seconds."
# Clear the input box
input_box.delete(0, tk.END)
# Update the chatroom display
update_chatroom(f"{prompt} for {seconds} seconds", p)
# Remove the Capture Time button
capture_button.pack_forget()
submit_button.pack()
listening_button.pack()
# Create a new button to capture the alarm time
capture_button = tk.Button(window, text="Capture Time", command=capture_alarm_time)
capture_button.pack()
return
elif "play music" in prompt:
#submit_button.pack_forget()
play_music()
#submit_button.pack()
return
elif "weather" in prompt:
update_chatroom(prompt, "Enter your city and country.")
submit_button.pack_forget()
get_weather()
#submit_button.pack()
return
response = generate_response(prompt) # Replace with your own code to generate bot response
update_chatroom(prompt, response)
submit_button.pack()
#listening_button.pack()
# Function to generate a random bot response (for testing purposes)
def generate_response(prompt):
return aa(prompt)
# Create a submit button
submit_button = tk.Button(window, text="Send", command=submit_chat)
submit_button.pack()
listening_button = tk.Button(window, text='🎙', command=start_stop_listening)
listening_button.pack()
# Run the Tkinter event loop
window.mainloop()
| [
"You are a helpful assistant."
] |
2024-01-10 | ChobPT/oobaboogas-webui-langchain_agent | script.py | import asyncio, datetime,base64,re,time,requests,json, os, re
from typing import Optional, List, Mapping, Any, Union
from modules import shared as shared
from modules import chat as chat
from modules.extensions import apply_extensions
from modules.text_generation import encode, get_max_prompt_length
from modules.text_generation import (encode, generate_reply,
stop_everything_event)
from dataclasses import dataclass
from functools import partial
from io import BytesIO
from langchain.llms.base import LLM
from langchain.agents import AgentType, Tool, initialize_agent,load_tools
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.callbacks.base import BaseCallbackManager, AsyncCallbackHandler, BaseCallbackHandler
from langchain.prompts import StringPromptTemplate
from langchain import OpenAI, LLMChain
from langchain.schema import AgentAction, AgentFinish
from langchain.tools.base import BaseTool
#import the langchain wikipedia wrapper
from langchain.utilities import WikipediaAPIWrapper
from pathlib import Path
os.environ["OPENAI_API_TYPE"] = "open_ai"
os.environ["OPENAI_API_KEY"] = "123"
os.environ["OPENAI_API_BASE"] = "http://127.0.0.1:5001/v1"
#change the environ openedai_debug to true
os.environ["OPENAI_DEBUG"] = str("true")
def output_modifier(string):
return string
def sendprompt(texttosend):
print("[DEBUG]Sending Prompt Chat...")
return chat.send_dummy_message(texttosend)
def sendchat(texttosend):
print("[DEBUG]Sending Chat...")
return chat.send_dummy_reply(texttosend)
template = """USER:Answer the following questions as best you can, but speaking as a pirate might speak. You have access to the following tools:
{tools}
Use the following format:
Question: the input question you must answer
Thought: you should always think about what to do
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
... (this Thought/Action/Action Input/Observation can repeat N times)
Thought: I now know the final answer
Final Answer: the final answer to the original input or the final conclusion to your thoughts
Begin! Remember to speak as a pirate when giving your final answer. Use lots of "Arg"s
Question: {input}
ASSISTANT: {agent_scratchpad}"""
searchWrapper=WikipediaAPIWrapper()
tools = [
Tool(
name = "Search",
func=searchWrapper.run,
description="Wikipedia serves as a versatile tool, offering uses such as gathering background information, exploring unfamiliar topics, finding reliable sources, understanding current events, discovering new interests, and obtaining a comprehensive overview on diverse subjects like historical events, scientific concepts, biographies of notable individuals, geographical details, cultural phenomena, artistic works, technological advancements, social issues, academic subjects, making it a valuable resource for learning and knowledge acquisition."
)
]
# Set up a prompt template
class CustomPromptTemplate(StringPromptTemplate):
# The template to use
template: str
# The list of tools available
tools: List[Tool]
def format(self, **kwargs) -> str:
# Get the intermediate steps (AgentAction, Observation tuples)
# Format them in a particular way
intermediate_steps = kwargs.pop("intermediate_steps")
thoughts = ""
for action, observation in intermediate_steps:
thoughts += "\Thought:"+action.log
thoughts += f"\nObservation: {observation}\nThought: "
# Set the agent_scratchpad variable to that value
kwargs["agent_scratchpad"] = thoughts
# Create a tools variable from the list of tools provided
kwargs["tools"] = "\n Thought: ".join([f"{tool.name}: {tool.description}" for tool in self.tools])
# Create a list of tool names for the tools provided
kwargs["tool_names"] = ", ".join([tool.name for tool in self.tools])
return self.template.format(**kwargs)
prompt = CustomPromptTemplate(
template=template,
tools=tools,
# This omits the `agent_scratchpad`, `tools`, and `tool_names` variables because those are generated dynamically
# This includes the `intermediate_steps` variable because that is needed
input_variables=["input", "intermediate_steps"]
)
#Try to ask a question to openai through langchain
def split_text(text):
blocks = text.split("Page:")
print('blocks', blocks)
if(len(blocks) < 1):
if(len(blocks) < 2):
first_block = blocks[0].strip()+'\n Page: '+blocks[1].strip() # Get the first block and remove leading/trailing whitespace
else:
first_block = blocks[0].strip
print('first_block', first_block)
else:
first_block = text
print('first_block', first_block)
return first_block
class CustomOutputParser(AgentOutputParser):
def parse(self, llm_output: str) -> Union[AgentAction, AgentFinish]:
llm_output = split_text(str(llm_output))
# Check if agent should finish
if "Final Answer:" in llm_output:
return AgentFinish(
# Return values is generally always a dictionary with a single `output` key
# It is not recommended to try anything else at the moment :)
return_values={"output": llm_output.split("Final Answer:")[-1].strip()},
log=llm_output,
)
# Parse out the action and action input
regex = r"Action\s*\d*\s*:(.*?)\nAction\s*\d*\s*Input\s*\d*\s*:[\s]*(.*)"
match = re.search(regex, llm_output, re.DOTALL)
if not match:
#raise ValueError(f"Could not parse LLM output: `{llm_output}`")
return AgentFinish(
# Return values is generally always a dictionary with a single `output` key
# It is not recommended to try anything else at the moment :)
return_values={"output": llm_output.split("Final Answer:")[0].strip()},
log=llm_output,
)
else:
action = match.group(1).strip()
action_input = match.group(2)
# Return the action and action input
return AgentAction(tool=action, tool_input=action_input.strip(" ").strip('"'), log=llm_output)
output_parser = CustomOutputParser()
llm = OpenAI(temperature=0)
llm_chain = LLMChain(llm=llm, prompt=prompt)
tool_names = [tool.name for tool in tools]
agent = LLMSingleActionAgent(
llm_chain=llm_chain,
output_parser=output_parser,
stop=["\nObservation"],
allowed_tools=tool_names
)
agent_executor = AgentExecutor.from_agent_and_tools(agent=agent, tools=tools, verbose=True)
def input_modifier(string):
if string[:3] == "/do":
agent_executor.run(string)
else:
output_modifier(string.split("###")[0].split("Human:")[0])
return string.replace('/do ', '')
| [
"USER:Answer the following questions as best you can, but speaking as a pirate might speak. You have access to the following tools:\n\n{tools}\n\nUse the following format:\n\nQuestion: the input question you must answer\nThought: you should always think about what to do\nAction: the action to take, should be one of [{tool_names}]\nAction Input: the input to the action\nObservation: the result of the action\n... (this Thought/Action/Action Input/Observation can repeat N times)\nThought: I now know the final answer\nFinal Answer: the final answer to the original input or the final conclusion to your thoughts\n\n\nBegin! Remember to speak as a pirate when giving your final answer. Use lots of \"Arg\"s\n\nQuestion: {input}\nASSISTANT: {agent_scratchpad}",
"input",
"intermediate_steps"
] |
2024-01-10 | annh3/policy_gradients | code~ddpg.py | """
Baseline class for DDPG, can probably implement
Dueling DDPG on top of it later.
"""
import numpy as np
import torch
import gym
import os
from general import get_logger, Progbar, export_plot
from baseline_network import BaselineNetwork
from network_utils import build_mlp, device, np2torch
from utils import ReplayBuffer
from policy import CategoricalPolicy, GaussianPolicy
from deterministic_policy import ContinuousPolicy
import pdb
import time
class DDPG(object):
"""
Class for implementing hybrid Q-learning
and policy gradient methods
"""
def __init__(self, env, config, seed, logger=None):
"""
Initialize Tabular Policy Gradient Class
Args:
env: an OpenAI Gym environment
config: class with hyperparameters
logger: logger instance from the logging module
"""
# directory for training outputs
if not os.path.exists(config.output_path):
print("OUTPUT PATH: ", config.output_path)
os.makedirs(config.output_path)
# store hyperparameters
self.config = config
self.seed = seed
self.logger = logger
if logger is None:
self.logger = get_logger(config.log_path)
self.env = env
self.env.seed(self.seed)
self.discrete = isinstance(env.action_space, gym.spaces.Discrete)
print("is discrete: " , self.discrete)
# only continuous action space
self.observation_dim = self.env.observation_space.shape[0]
self.action_dim = self.env.action_space.n if self.discrete else self.env.action_space.shape[0]
self.lr = self.config.learning_rate
self.init_policy_networks()
self.init_q_networks()
def init_averages(self):
self.avg_reward = 0.
self.max_reward = 0.
self.std_reward = 0.
self.eval_reward = 0.
def update_averages(self, rewards, scores_eval):
self.avg_reward = np.mean(rewards)
self.max_reward = np.max(rewards)
self.std_reward = np.sqrt(np.var(rewards) / len(rewards))
if len(scores_eval) > 0:
self.eval_reward = scores_eval[-1]
def record(self):
"""
Recreate an env and record a video for one episode
"""
env = gym.make(self.config.env_name)
env.seed(self.seed)
env = gym.wrappers.Monitor(env, self.config.record_path, video_callable=lambda x: True, resume=True)
self.evaluate(env, 1)
"""
Trying to figure out best way to update target networks
policy_net = DQN()
target_net = DQN()
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()
self.update_target_net_op = list(
map(lambda v: v[0].assign(self.polyak * v[0] + (1. - self.polyak) * v[1]), zip(self.target_vars, self.main_vars)))
"""
def init_policy_networks(self):
"""
Initialize DETERMINISTIC policy
Initialize target policy
"""
self.policy_network = build_mlp(self.observation_dim,self.action_dim,self.config.n_layers, self.config.layer_size)
self.policy = ContinuousPolicy(self.policy_network)
# we never train this one
self.target_policy_network = build_mlp(self.observation_dim,self.action_dim,self.config.n_layers, self.config.layer_size)
self.target_policy = ContinuousPolicy(self.target_policy_network)
self.policy_optimizer = torch.optim.Adam(self.policy.network.parameters(), lr=self.lr)
def update_target_policy(self):
self.target_policy.update_network(self.policy_network.state_dict())
def init_q_networks(self):
"""
Initialize q network
* Q(s, u(s))
* the Q network takes in an observation and also an action
Initialize target q network
"""
#pdb.set_trace()
self.q_network = build_mlp(self.observation_dim+self.action_dim, 1, self.config.n_layers, self.config.layer_size)
self.target_q_network = build_mlp(self.observation_dim+self.action_dim, 1, self.config.n_layers, self.config.layer_size)
self.q_optimizer = torch.optim.Adam(self.q_network.parameters(), lr=self.config.q_lr)
def update_target_q(self):
self.target_q_network.load_state_dict(self.q_network.state_dict())
def training_update(self):
"""
for k in range(self.config.num_update_steps)
* sample batch size of transitions from replay buffer
(s,a,r,s',d)
* compute targets
* update Q-function by one step of gradient descent using /
MSE loss: grad { 1/|B| sum Q(s,a) - y(r,s',d)^2 }
* update policy by one step of gradient ascent using
grad {1/|B| sum_s Q(s,u(s))}
* update target networks with polak averaging
do you have to save and reload weights?
"""
#raise NotImplementedError
for k in range(self.config.num_update_steps):
obs_batch, act_batch, rew_batch, next_obs_batch, done_mask = self.replay_buffer.sample(self.config.buffer_batch_size)
# I think you need to convert everything to a tensor
obs_batch = np2torch(obs_batch)
act_batch = np2torch(act_batch)
rew_batch = np2torch(rew_batch)
next_obs_batch = np2torch(next_obs_batch)
done_mask = np2torch(done_mask)
#pdb.set_trace()
tuple2cat = (torch.transpose(next_obs_batch, 0, 1),torch.transpose(self.target_policy_network(next_obs_batch),0,1))
target_q_network_inputs = torch.transpose(torch.cat(tuple2cat),0,1)
targets = rew_batch + self.config.gamma * (1-done_mask) * self.target_q_network(target_q_network_inputs)
# pdb.set_trace()
# to-do: check Q network size
# to-do: check existing implementations of DDPG to see how they do this <-- I think this one is more promising... ok save for later
# do we have to freeze?
# To-Do: CHECK ALL OF THIS
tuple2cat = (torch.transpose(obs_batch, 0, 1),torch.transpose(self.target_policy_network(obs_batch),0,1))
q_network_inputs = torch.transpose(torch.cat(tuple2cat),0,1)
loss = (self.q_network(q_network_inputs)-targets).mean()
loss.backward()
self.q_optimizer.step()
tuple2cat = (torch.transpose(obs_batch,0,1), torch.transpose(self.policy_network(obs_batch),0,1))
policy_loss_input = torch.transpose(torch.cat(tuple2cat),0,1)
loss = -(self.q_network(policy_loss_input)).mean()
loss.backward()
self.policy_optimizer.step()
"""
From https://stackoverflow.com/questions/48560227/how-to-take-the-average-of-the-weights-of-two-networks
beta = 0.5 #The interpolation parameter
params1 = model1.named_parameters()
params2 = model2.named_parameters()
dict_params2 = dict(params2)
for name1, param1 in params1:
if name1 in dict_params2:
dict_params2[name1].data.copy_(beta*param1.data + (1-beta)*dict_params2[name1].data)
model.load_state_dict(dict_params2)
"""
"""
Trying stack-overflow technique here
"""
params1 = self.q_network.named_parameters()
params2 = self.target_q_network.named_parameters()
dict_params2 = dict(params2)
for name1, param1 in params1:
if name1 in dict_params2:
dict_params2[name1].data.copy_((1-self.config.polyak)*param1.data + self.config.polyak*dict_params2[name1].data)
self.target_q_network.load_state_dict(dict_params2)
# Let's try this out
# torch.save(self.config.polyak*self.target_q_network.state_dict() + (1-self.config.polyak)*self.q_network.state_dict(), 'new_weights')
# self.target_q_network.load_state_dict(torch.load('new_weights'), strict=False)
# torch.save(self.policy_network.state_dict(), 'policy_weights')
# torch.save(self.target_policy_network.state_dict(), 'target_policy_weights')
# self.target_policy_network.load_state_dict(self.config.polyak*torch.load('target_policy_weights') + (1-self.config.polyak)*torch.load('policy_weights'), strict=False)
params1 = self.policy_network.named_parameters()
params2 = self.target_policy_network.named_parameters()
dict_params2 = dict(params2)
for name1, param1 in params1:
if name1 in dict_params2:
dict_params2[name1].data.copy_((1-self.config.polyak)*param1.data + self.config.polyak*dict_params2[name1].data)
self.target_policy_network.load_state_dict(dict_params2)
# should do a check!
"""
Log statistics here
"""
def train(self):
"""
Performs training
[From OpenAI]: Our DDPG implementation uses a trick to improve exploration at the start
f training. For a fixed number of steps at the beginning (set with the
start_steps keyword argument), the agent takes actions which are sampled
from a uniform random distribution over valid actions. After that, it
returns to normal DDPG exploration.
"""
self.replay_buffer = ReplayBuffer(self.config.update_every*3) # Can change this to see how it affects things
state = self.env.reset()
#pdb.set_trace()
states, actions, rewards, done_mask = [], [], [], []
self.init_averages()
all_total_rewards = []
averaged_total_rewards = []
averaged_action_norms = []
for t in range(self.config.total_env_interacts):
"""
# observe state
# (unless t < start_steps) select a by perturbing deterministic policy with Gaussian noise and clipping
# observe next state, reward, and potential done signal
# store (s,a,r,s',d) in replay buffer
"""
"""
I think that we should update the buffer
1. when an episode is done
2. right before we perform network updates
"""
states.append(state)
action = self.policy.act(states[-1][None])[0] ## do we need to use our Continuous Policy class?
"""
My hypothesis is that we're getting actions that are NaNs
"""
if np.any(np.isnan(action)):
pdb.set_trace()
time.sleep(.002)
state, reward, done, info = self.env.step(action)
actions.append(action)
rewards.append(reward)
done_mask.append(done)
if done:
"""
Update replay buffer
zero out lists
reset environment
logic for loop
"""
"""
Make sure you update the total rewards
"""
all_total_rewards.extend(rewards)
self.replay_buffer.update_buffer(states,actions,rewards,done_mask)
state = self.env.reset()
states, actions, rewards, done_mask = [], [], [], []
if t % self.config.update_every == 0 and t > 0:
#pdb.set_trace()
"""
Update replay buffer
zero out lists
reset environment
"""
"""
Make sure you update the total rewards
"""
all_total_rewards.extend(rewards)
"""
Let's just put the logging here
"""
avg_reward = np.mean(rewards)
sigma_reward = np.sqrt(np.var(rewards) / len(rewards))
msg = "Average reward: {:04.2f} +/- {:04.2f}".format(avg_reward, sigma_reward)
averaged_total_rewards.append(avg_reward)
self.logger.info(msg)
"""
To-Do: Log action norms to debug MuJoCo issues
"""
#pdb.set_trace()
avg_action_norm = 0
norms = []
for act in actions:
avg_action_norm += np.linalg.norm(act)
norms.append(np.linalg.norm(act))
avg_action_norm /= len(actions)
sigma_norm = np.sqrt(np.var(norms) / len(norms))
msg = "Average action norm: {:04.2f} +/- {:04.2f}".format(avg_reward, sigma_reward)
averaged_action_norms.append(avg_action_norm)
self.logger.info(msg)
self.replay_buffer.update_buffer(states,actions,rewards,done_mask)
states, actions, rewards, done_mask = [], [], [], []
print("ABOUT TO CALL TRAINING UPDATE")
"""
To-Do: Debug this modulo stuff with batch sizes and update every (you know)
"""
if self.replay_buffer.can_sample(self.config.buffer_batch_size):
self.training_update() # we can do logging here
# logging
# if (t % self.config.summary_freq == 0):
# self.update_averages(total_rewards, all_total_rewards)
# self.record_summary(t)
"""
When should we perform logging?
TO-D0: take care of this in a bit
"""
# if t % self.config.summary_freq == 0 and t > 0:
# self.update_averages(total_rewards, all_total_rewards)
# self.record_summary(t)
self.logger.info("- Training done.")
np.save(self.config.scores_output, averaged_total_rewards)
export_plot(averaged_total_rewards, "Score", self.config.env_name, self.config.plot_output)
def run(self):
"""
Apply procedures of training for a DPPG.
"""
# record one game at the beginning
if self.config.record:
self.record()
# model
self.train()
# record one game at the end
if self.config.record:
self.record()
| [] |
2024-01-10 | sheepy928/nas-tools | app~plugins~modules~_autosignin~chdbits.py | import json
import os
import random
import re
from lxml import etree
from app.helper.openai_helper import OpenAiHelper
from app.plugins.modules._autosignin._base import _ISiteSigninHandler
from app.utils import StringUtils, RequestUtils
from config import Config
class CHDBits(_ISiteSigninHandler):
"""
彩虹岛签到
如果填写openai key则调用chatgpt获取答案
否则随机
"""
# 匹配的站点Url,每一个实现类都需要设置为自己的站点Url
site_url = "chdbits.co"
# 已签到
_sign_regex = ['今天已经签过到了']
# 签到成功,待补充
_success_regex = ['\\d+点魔力值']
# 存储正确的答案,后续可直接查
_answer_path = os.path.join(Config().get_temp_path(), "signin")
_answer_file = _answer_path + "/chdbits.json"
@classmethod
def match(cls, url):
"""
根据站点Url判断是否匹配当前站点签到类,大部分情况使用默认实现即可
:param url: 站点Url
:return: 是否匹配,如匹配则会调用该类的signin方法
"""
return True if StringUtils.url_equal(url, cls.site_url) else False
def signin(self, site_info: dict):
"""
执行签到操作
:param site_info: 站点信息,含有站点Url、站点Cookie、UA等信息
:return: 签到结果信息
"""
site = site_info.get("name")
site_cookie = site_info.get("cookie")
ua = site_info.get("ua")
proxy = Config().get_proxies() if site_info.get("proxy") else None
# 创建正确答案存储目录
if not os.path.exists(os.path.dirname(self._answer_file)):
os.makedirs(os.path.dirname(self._answer_file))
# 判断今日是否已签到
index_res = RequestUtils(cookies=site_cookie,
headers=ua,
proxies=proxy
).get_res(url='https://chdbits.co/bakatest.php')
if not index_res or index_res.status_code != 200:
self.error(f"签到失败,请检查站点连通性")
return False, f'【{site}】签到失败,请检查站点连通性'
if "login.php" in index_res.text:
self.error(f"签到失败,cookie失效")
return False, f'【{site}】签到失败,cookie失效'
sign_status = self.sign_in_result(html_res=index_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
# 没有签到则解析html
html = etree.HTML(index_res.text)
if not html:
return False, f'【{site}】签到失败'
# 获取页面问题、答案
questionid = html.xpath("//input[@name='questionid']/@value")[0]
option_ids = html.xpath("//input[@name='choice[]']/@value")
option_values = html.xpath("//input[@name='choice[]']/following-sibling::text()")
question_str = html.xpath("//td[@class='text' and contains(text(),'请问:')]/text()")[0]
answers = list(zip(option_ids, option_values))
# 正则获取问题
match = re.search(r'请问:(.+)', question_str)
if match:
question_str = match.group(1)
self.debug(f"获取到签到问题 {question_str}")
else:
self.error(f"未获取到签到问题")
return False, f"【{site}】签到失败,未获取到签到问题"
# 查询已有答案
exits_answers = {}
try:
with open(self._answer_file, 'r') as f:
json_str = f.read()
exits_answers = json.loads(json_str)
# 查询本地本次验证码hash答案
question_answer = exits_answers[question_str]
# question_answer是数组
if not isinstance(question_answer, list):
question_answer = [question_answer]
# 本地存在本次hash对应的正确答案再遍历查询
choice = []
for q in question_answer:
for num, answer in answers:
if str(q) == str(num):
choice.append(int(q))
if len(choice) > 0:
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("查询本地已知答案失败,继续请求豆瓣查询")
# 正确答案,默认随机,如果gpt返回则用gpt返回的答案提交
choice = [option_ids[random.randint(0, len(option_ids) - 1)]]
# 组装gpt问题
gpt_options = "{\n" + ",\n".join([f"{num}:{value}" for num, value in answers]) + "\n}"
gpt_question = f"题目:{question_str}\n" \
f"选项:{gpt_options}"
self.debug(f"组装chatgpt问题 {gpt_question}")
# chatgpt获取答案
answer = OpenAiHelper().get_question_answer(question=gpt_question)
self.debug(f"chatpgt返回结果 {answer}")
# 处理chatgpt返回的答案信息
if answer is None:
self.warn(f"ChatGPT未启用, 开始随机签到")
# return f"【{site}】签到失败,ChatGPT未启用"
elif answer:
# 正则获取字符串中的数字
answer_nums = list(map(int, re.findall("\d+", answer)))
if not answer_nums:
self.warn(f"无法从chatgpt回复 {answer} 中获取答案, 将采用随机签到")
else:
choice = []
for answer in answer_nums:
# 如果返回的数字在option_ids范围内,则直接作为答案
if str(answer) in option_ids:
choice.append(int(answer))
self.info(f"chatgpt返回答案id {answer} 在签到选项 {option_ids} 中")
# 签到
return self.__signin(questionid=questionid,
choice=choice,
site_cookie=site_cookie,
ua=ua,
proxy=proxy,
site=site,
exits_answers=exits_answers,
question=question_str)
def __signin(self, questionid, choice, site, site_cookie, ua, proxy, exits_answers=None, question=None):
"""
签到请求
questionid: 450
choice[]: 8
choice[]: 4
usercomment: 此刻心情:无
submit: 提交
多选会有多个choice[]....
"""
data = {
'questionid': questionid,
'choice[]': choice[0] if len(choice) == 1 else choice,
'usercomment': '太难了!',
'wantskip': '不会'
}
self.debug(f"签到请求参数 {data}")
sign_res = RequestUtils(cookies=site_cookie,
headers=ua,
proxies=proxy
).post_res(url='https://chdbits.co/bakatest.php', data=data)
if not sign_res or sign_res.status_code != 200:
self.error(f"签到失败,签到接口请求失败")
return False, f'【{site}】签到失败,签到接口请求失败'
# 判断是否签到成功
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._success_regex)
if sign_status:
self.info(f"签到成功")
if exits_answers and question:
# 签到成功写入本地文件
self.__write_local_answer(exits_answers=exits_answers or {},
question=question,
answer=choice)
return True, f'【{site}】签到成功'
else:
sign_status = self.sign_in_result(html_res=sign_res.text,
regexs=self._sign_regex)
if sign_status:
self.info(f"今日已签到")
return True, f'【{site}】今日已签到'
self.error(f"签到失败,请到页面查看")
return False, f'【{site}】签到失败,请到页面查看'
def __write_local_answer(self, exits_answers, question, answer):
"""
签到成功写入本地文件
"""
try:
exits_answers[question] = answer
# 序列化数据
formatted_data = json.dumps(exits_answers, indent=4)
with open(self._answer_file, 'w') as f:
f.write(formatted_data)
except (FileNotFoundError, IOError, OSError) as e:
self.debug("签到成功写入本地文件失败")
| [] |
2024-01-10 | neo4j-partners/intelligent-app-from-videos-google-generativeai-neo4j | ui~streamlit~english2results.py | from langchain.chains import GraphCypherQAChain
from langchain.graphs import Neo4jGraph
from langchain.prompts.prompt import PromptTemplate
from langchain.llms import VertexAI
from retry import retry
from timeit import default_timer as timer
import streamlit as st
host = st.secrets["NEO4J_HOST"]+":"+st.secrets["NEO4J_PORT"]
user = st.secrets["NEO4J_USER"]
password = st.secrets["NEO4J_PASSWORD"]
db = st.secrets["NEO4J_DB"]
codey_model_name = st.secrets["TUNED_CYPHER_MODEL"]
if codey_model_name == '':
codey_model_name = 'code-bison'
CYPHER_GENERATION_TEMPLATE = """You are an expert Neo4j Cypher translator who understands the question in english and convert to Cypher strictly based on the Neo4j Schema provided and following the instructions below:
1. Generate Cypher query compatible ONLY for Neo4j Version 5
2. Do not use EXISTS, SIZE keywords in the cypher. Use alias when using the WITH keyword
3. Use only Nodes and relationships mentioned in the schema
4. Always enclose the Cypher output inside 3 backticks
5. Always do a case-insensitive and fuzzy search for any properties related search. Eg: to search for a Team name use `toLower(t.name) contains 'neo4j'`
6. Always use aliases to refer the node in the query
7. Cypher is NOT SQL. So, do not mix and match the syntaxes
Schema:
{schema}
Samples:
Question: What are the predictions about the Swans?
Answer: MATCH (e:Episode)-[:HAS_PREDICTION]->(p:Prediction) WHERE toLower(p.name) CONTAINS 'swans' RETURN p.name
Question: Who are the players mentioned in episode 1?
Answer: MATCH (e:Episode)-[:DISCUSSES_PLAYER]->(p:Player) WHERE e.episode = '1' RETURN p.name
Question: What are the top 5 common themes across all episodes combined?
Answer: MATCH (e:Episode)-[:HAS_THEME]->(t:Theme) RETURN t.name as theme, count(*) as num_themes ORDER BY num_themes DESC LIMIT 5
Question: Who are the most commonly talked coaches?
Answer: MATCH (e:Episode)-[:DISCUSSES_COACH]->(p:Coach) RETURN DISTINCT p.name as coach, count(e) as num_mentions ORDER BY num_mentions DESC LIMIT 5
Question: What is the gist of episode 4?
Answer: MATCH (e:Episode) WHERE e.episode = '4' RETURN e.synopsis
Question: Which episodes do you recommend if I am a fan of the Bombers?
Answer: Match(e:Episode)-[:DISCUSSES_TEAM]->(t:Team) WHERE toLower(t.name) contains 'bombers' return e
Question: I follow Mason Cox. Which episodes do you recommend?
Answer: MATCH (e:Episode)-[:DISCUSSES_PLAYER]->(p:Player) WHERE toLower(p.name) CONTAINS 'mason cox' RETURN e
Question: {question}
Answer:"""
CYPHER_GENERATION_PROMPT = PromptTemplate(
input_variables=["schema", "question"], template=CYPHER_GENERATION_TEMPLATE
)
@retry(tries=5, delay=5)
def get_results(messages):
start = timer()
try:
graph = Neo4jGraph(
url=host,
username=user,
password=password
)
chain = GraphCypherQAChain.from_llm(
VertexAI(
model_name=codey_model_name,
max_output_tokens=2048,
temperature=0,
top_p=0.95,
top_k=0.40),
graph=graph, verbose=True,
return_intermediate_steps=True,
cypher_prompt=CYPHER_GENERATION_PROMPT
)
if messages:
question = messages.pop()
else:
question = 'How many cases are there?'
return chain(question)
# except Exception as ex:
# print(ex)
# return "LLM Quota Exceeded. Please try again"
finally:
print('Cypher Generation Time : {}'.format(timer() - start))
| [
"You are an expert Neo4j Cypher translator who understands the question in english and convert to Cypher strictly based on the Neo4j Schema provided and following the instructions below:\n1. Generate Cypher query compatible ONLY for Neo4j Version 5\n2. Do not use EXISTS, SIZE keywords in the cypher. Use alias when using the WITH keyword\n3. Use only Nodes and relationships mentioned in the schema\n4. Always enclose the Cypher output inside 3 backticks\n5. Always do a case-insensitive and fuzzy search for any properties related search. Eg: to search for a Team name use `toLower(t.name) contains 'neo4j'`\n6. Always use aliases to refer the node in the query\n7. Cypher is NOT SQL. So, do not mix and match the syntaxes\nSchema:\n{schema}\nSamples:\nQuestion: What are the predictions about the Swans?\nAnswer: MATCH (e:Episode)-[:HAS_PREDICTION]->(p:Prediction) WHERE toLower(p.name) CONTAINS 'swans' RETURN p.name\nQuestion: Who are the players mentioned in episode 1?\nAnswer: MATCH (e:Episode)-[:DISCUSSES_PLAYER]->(p:Player) WHERE e.episode = '1' RETURN p.name\nQuestion: What are the top 5 common themes across all episodes combined?\nAnswer: MATCH (e:Episode)-[:HAS_THEME]->(t:Theme) RETURN t.name as theme, count(*) as num_themes ORDER BY num_themes DESC LIMIT 5\nQuestion: Who are the most commonly talked coaches?\nAnswer: MATCH (e:Episode)-[:DISCUSSES_COACH]->(p:Coach) RETURN DISTINCT p.name as coach, count(e) as num_mentions ORDER BY num_mentions DESC LIMIT 5\nQuestion: What is the gist of episode 4?\nAnswer: MATCH (e:Episode) WHERE e.episode = '4' RETURN e.synopsis\nQuestion: Which episodes do you recommend if I am a fan of the Bombers?\nAnswer: Match(e:Episode)-[:DISCUSSES_TEAM]->(t:Team) WHERE toLower(t.name) contains 'bombers' return e\nQuestion: I follow Mason Cox. Which episodes do you recommend?\nAnswer: MATCH (e:Episode)-[:DISCUSSES_PLAYER]->(p:Player) WHERE toLower(p.name) CONTAINS 'mason cox' RETURN e\n\nQuestion: {question}\nAnswer:",
"question",
"mason cox",
"swans"
] |
2024-01-10 | glibsonoran/Plush-for-ComfyUI | style_prompt.py |
import openai
from openai import OpenAI
import os
import base64
from io import BytesIO
from PIL import Image, ImageOps
import numpy as np
import re
import torch
from enum import Enum
import requests
from .mng_json import json_manager
#pip install pillow
#pip install bytesio
#Enum for style_prompt user input modes
class InputMode(Enum):
IMAGE_PROMPT = 1
IMAGE_ONLY = 2
PROMPT_ONLY = 3
#Get information from the config.json file
class cFigSingleton:
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = super().__new__(cls)
cls._instance.get_file()
return cls._instance
def get_file(self):
#Get script working directory
j_mngr = json_manager()
# Error handling is in the load_json method
# Errors will be raised since is_critical is set to True
config_data = j_mngr.load_json(j_mngr.config_file, True)
#check if file is empty
if not config_data:
raise ValueError("Plush - Error: config.json contains no valid JSON data")
#set property variables
# Try getting API key from Plush environment variable
self._figKey = os.getenv('OAI_KEY') or os.getenv('OPENAI_API_KEY')
# Try the openAI recommended Env Variable.
if not self._figKey:
raise ValueError("Plush - Error: OpenAI API key not found. Please set it as an environment variable (See the Plush ReadMe).")
self.figInstruction = config_data['instruction']
self.figExample = config_data['example']
self.figStyle = config_data['style']
self.figImgInstruction = config_data['img_instruction']
self.figImgPromptInstruction = config_data['img_prompt_instruction']
try:
self.figOAIClient = OpenAI(api_key= self._figKey)
except Exception as e:
print (f"Invalid OpenAI API key: {e}")
raise
@property
def key(self)-> str:
return self._figKey
@property
def instruction(self):
return self.figInstruction
@property
def example(self):
return self.figExample
@property
def style(self):
#make sure the designated default value is present in the list
if "Photograph" not in self.figStyle:
self.figStyle.append("Photograph")
return self.figStyle
@property
def ImgInstruction(self):
return self.figImgInstruction
@property
def ImgPropmptInstruction(self):
return self.figImgPromptInstruction
@property
def openaiClient(self)-> openai.OpenAI:
return self.figOAIClient
class Enhancer:
#Build a creative prompt using a ChatGPT model
def __init__(self):
self.cFig = cFigSingleton()
def build_instruction(self, mode, style, elements, artist):
#build the instruction from user input
instruc = ""
if mode == InputMode.PROMPT_ONLY:
if self.cFig.instruction:
instruc = self.cFig.instruction
elif mode == InputMode.IMAGE_ONLY:
if self.cFig.ImgInstruction:
instruc = self.cFig.ImgInstruction
elif mode == InputMode.IMAGE_PROMPT:
if self.cFig.ImgPropmptInstruction:
instruc = self.cFig.ImgPropmptInstruction
if instruc.count("{}") >= 2:
instruc = instruc.format(style, elements)
elif instruc.count("{}") == 1:
instruc = instruc.format(style)
if artist >= 1:
art_instruc = " Include {} artist(s) who works in the specifed artistic style by placing the artists' name(s) at the end of the sentence prefaced by 'style of'."
instruc += art_instruc.format(str(artist))
return(instruc)
def clean_response_text(self, text: str)-> str:
# Replace multiple newlines or carriage returns with a single one
cleaned_text = re.sub(r'\n+', '\n', text).strip()
return cleaned_text
def icgptRequest(self, GPTmodel, creative_latitude, tokens, prompt="", instruction="", example="", image=None,) :
client = self.cFig.openaiClient
# There's an image
if image:
GPTmodel = "gpt-4-vision-preview" # Use vision model for image
image_url = f"data:image/jpeg;base64,{image}" # Assuming image is base64 encoded
# messages.append({"role": "system", "content": {"type": "image_url", "image_url": {"url": image_url}}})
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.cFig.key}"
}
# messages list
messages = []
# Append the user message
user_content = []
if prompt:
prompt = "PROMPT: " + prompt
user_content.append({"type": "text", "text": prompt})
user_content.append({"type": "image_url", "image_url": {"url": image_url}})
messages.append({"role": "user", "content": user_content})
# Append the system message if instruction is present
if instruction:
messages.append({"role": "system", "content": instruction})
# Append the example in the assistant role
if example:
messages.append({"role": "assistant", "content": example})
payload = {
"model": GPTmodel,
"max_tokens": tokens,
"temperature": creative_latitude,
"messages": messages
}
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
response_json = response.json()
CPTG_response = self.clean_response_text(response_json['choices'][0]['message']['content'] )
return CPTG_response
# No image
messages = []
if instruction:
messages.append({"role": "system", "content": instruction})
if prompt:
messages.append({"role": "user", "content": prompt})
else:
# User has provided no prompt or image
response = "empty box with 'NOTHING' printed on its side bold letters small flying moths dingy gloomy dim light rundown warehouse"
return response
if example:
messages.append({"role": "assistant", "content": example})
try:
response = client.chat.completions.create(
model=GPTmodel,
messages=messages,
temperature=creative_latitude,
max_tokens=tokens
)
except openai.APIConnectionError as e:
print("Server connection error: {e.__cause__}") # from httpx.
raise
except openai.RateLimitError as e:
print(f"OpenAI RATE LIMIT error {e.status_code}: (e.response)")
raise
except openai.APIStatusError as e:
print(f"OpenAI STATUS error {e.status_code}: (e.response)")
raise
except openai.BadRequestError as e:
print(f"OpenAI BAD REQUEST error {e.status_code}: (e.response)")
raise
except Exception as e:
print(f"An unexpected error occurred: {e}")
raise
CPTG_response = response.choices[0].message.content
return CPTG_response
@classmethod
def INPUT_TYPES(cls):
iFig=cFigSingleton()
#Floats have a problem, they go over the max value even when round and step are set, and the node fails. So I set max a little over the expected input value
return {
"required": {
"GPTmodel": (["gpt-3.5-turbo","gpt-4","gpt-4-1106-preview"],{"default": "gpt-4"} ),
"creative_latitude" : ("FLOAT", {"max": 1.201, "min": 0.1, "step": 0.1, "display": "number", "round": 0.1, "default": 0.7}),
"tokens" : ("INT", {"max": 8000, "min": 20, "step": 10, "default": 500, "display": "number"}),
"example" : ("STRING", {"forceInput": True, "multiline": True}),
"style": (iFig.style,{"default": "Photograph"}),
"artist" : ("INT", {"max": 3, "min": 0, "step": 1, "default": 1, "display": "number"}),
"max_elements" : ("INT", {"max": 25, "min": 3, "step": 1, "default": 10, "display": "number"}),
"style_info" : ("BOOLEAN", {"default": False}),
"prompt": ("STRING",{"multiline": True})
},
"optional": {
"image" : ("IMAGE", {"default": None})
}
}
RETURN_TYPES = ("STRING", "STRING", "STRING")
RETURN_NAMES = ("CGPTprompt", "CGPTinstruction","Style Info")
FUNCTION = "gogo"
OUTPUT_NODE = False
CATEGORY = "Plush/OpenAI"
def gogo(self, GPTmodel, creative_latitude, tokens, example, style, artist, max_elements, style_info, prompt, image=None):
#If no example text was provided by the user, use my default
if not example:
example = self.cFig.example
CGPT_styleInfo = None
#Convert PyTorch.tensor to B64encoded image
if isinstance(image, torch.Tensor):
img_convert = DalleImage()
image = img_convert.tensor_to_base64(image)
#build instruction based on user input
mode = 0
if image and prompt:
mode = InputMode.IMAGE_PROMPT
elif image:
mode = InputMode.IMAGE_ONLY
elif prompt:
mode = InputMode.PROMPT_ONLY
instruction = self.build_instruction(mode, style, max_elements, artist)
if style_info:
#User has request information about the art style. GPT will provide it
sty_prompt = "Give an 150 word backgrounder on the art style: {}. Starting with describing what it is, include information about its history and which artists represent the style."
sty_prompt = sty_prompt.format(style)
CGPT_styleInfo = self.icgptRequest(GPTmodel, creative_latitude, tokens, sty_prompt )
CGPT_prompt = self.icgptRequest(GPTmodel, creative_latitude, tokens, prompt, instruction, example, image)
return (CGPT_prompt, instruction, CGPT_styleInfo)
class DalleImage:
#Accept a user prompt and parameters to produce a Dall_e generated image
def __init__(self):
self.cFig = cFigSingleton()
def b64_to_tensor(self, b64_image: str) -> torch.Tensor:
"""
Converts a base64-encoded image to a torch.Tensor.
Note: ComfyUI expects the image tensor in the [N, H, W, C] format.
For example with the shape torch.Size([1, 1024, 1024, 3])
Args:
b64_image (str): The b64 image to convert.
Returns:
torch.Tensor: an image Tensor.
"""
# Decode the base64 string
image_data = base64.b64decode(b64_image)
# Open the image with PIL and handle EXIF orientation
image = Image.open(BytesIO(image_data))
image = ImageOps.exif_transpose(image)
# Convert to RGB and normalize
image = image.convert("RGB")
image_np = np.array(image).astype(np.float32) / 255.0
# Convert to PyTorch tensor
tensor_image = torch.from_numpy(image_np)
# Check shape and permute if necessary
#if tensor_image.shape[-1] in [3, 4]:
#tensor_image = tensor_image.permute(2, 0, 1) # Convert to [C, H, W]
# Create a mask if there's an alpha channel
if tensor_image.ndim == 3: # If the tensor is [C, H, W]
mask = torch.zeros_like(tensor_image[0, :, :], dtype=torch.float32)
elif tensor_image.ndim == 4: # If the tensor is [N, C, H, W]
mask = torch.zeros_like(tensor_image[0, 0, :, :], dtype=torch.float32)
if tensor_image.shape[1] == 4: # Assuming channels are in the first dimension after unsqueeze
mask = 1.0 - tensor_image[:, 3, :, :] / 255.0
tensor_image = tensor_image.float()
mask = mask.float()
return tensor_image.unsqueeze(0), mask
def tensor_to_base64(self, tensor: torch.Tensor) -> str:
"""
Converts a PyTorch tensor to a base64-encoded image.
Note: ComfyUI provides the image tensor in the [N, H, W, C] format.
For example with the shape torch.Size([1, 1024, 1024, 3])
Args:
tensor (torch.Tensor): The image tensor to convert.
Returns:
str: Base64-encoded image string.
"""
# Convert tensor to PIL Image
if tensor.ndim == 4:
tensor = tensor.squeeze(0) # Remove batch dimension if present
pil_image = Image.fromarray((tensor.numpy() * 255).astype('uint8'))
# Save PIL Image to a buffer
buffer = BytesIO()
pil_image.save(buffer, format="PNG") # Can change to JPEG if preferred
buffer.seek(0)
# Encode buffer to base64
base64_image = base64.b64encode(buffer.read()).decode('utf-8')
return base64_image
@classmethod
def INPUT_TYPES(cls):
#dall-e-2 API requires differnt input parameters as compared to dall-e-3, at this point I'll just use dall-e-3
# "batch_size": ("INT", {"max": 8, "min": 1, "step": 1, "default": 1, "display": "number"})
# Possible future implentation of batch_sizes greater than one.
# "image" : ("IMAGE", {"forceInput": True}),
return {
"required": {
"GPTmodel": (["dall-e-3",], ),
"prompt": ("STRING",{"multiline": True, "forceInput": True}),
"image_size": (["1792x1024", "1024x1792", "1024x1024"], {"default": "1024x1024"} ),
"image_quality": (["standard", "hd"], {"default": "hd"} ),
"style": (["vivid", "natural"], {"default": "natural"} )
},
}
RETURN_TYPES = ("IMAGE", "MASK", "STRING" )
RETURN_NAMES = ("image", "mask", "Dall_e_prompt")
FUNCTION = "gogo"
OUTPUT_NODE = False
CATEGORY = "Plush/OpenAI"
def gogo(self, GPTmodel, prompt, image_size, image_quality, style):
client = self.cFig.openaiClient
print(f"Talking to Dalle model: {GPTmodel}")
try:
response = client.images.generate(
model = GPTmodel,
prompt = prompt,
size = image_size,
quality = image_quality,
style = style,
n=1,
response_format = "b64_json",
)
except openai.APIConnectionError as e:
print("Server connection error: {e.__cause__}") # from httpx.
raise
except openai.RateLimitError as e:
print(f"OpenAI RATE LIMIT error {e.status_code}: (e.response)")
raise
except openai.APIStatusError as e:
print(f"OpenAI STATUS error {e.status_code}: (e.response)")
raise
except openai.BadRequestError as e:
print(f"OpenAI BAD REQUEST error {e.status_code}: (e.response)")
raise
except Exception as e:
print(f"An unexpected error occurred: {e}")
raise
# Get the revised_prompt
revised_prompt = response.data[0].revised_prompt
#Convert the b64 json to a pytorch tensor
b64Json = response.data[0].b64_json
png_image, mask = self.b64_to_tensor(b64Json)
return (png_image, mask.unsqueeze(0), revised_prompt)
# A dictionary that contains all nodes you want to export with their names
# NOTE: names should be globally unique
NODE_CLASS_MAPPINGS = {
"Enhancer": Enhancer,
"DalleImage": DalleImage
}
# A dictionary that contains the friendly/humanly readable titles for the nodes
NODE_DISPLAY_NAME_MAPPINGS = {
"Enhancer": "Style Prompt",
"DalleImage": "OAI Dall_e Image"
}
#***************TESTING****************************
#debug testing mTextSwitch
""" mTs = mulTextSwitch()
ddict = mTs.INPUT_TYPES()
print(ddict)
tst = ""
tst = mTs.gogo(2, "String 1 is a long string", "String 2 is a long string", "String 3 is a long string")
print(tst) """
#debug testing DalleImage
""" Di = DalleImage()
ddict = Di.INPUT_TYPES()
tst = []
tst = Di.gogo("dall-e-3", "A woman standing by a flowing river", "1024x1024", "hd", "natural")
myname = tst[0].names """
#debug testing Enhancer
#**********Load and convert test image file*************
""" img_convert = DalleImage()
j_mngr = json_manager()
image_path = os.path.join(j_mngr.script_dir, 'test_img.png')
with open(image_path, "rb") as image_file:
image_file = base64.b64encode(image_file.read()).decode('utf-8')
tensor_image, mask = img_convert.b64_to_tensor(image_file)
tensor_image = None
#*************End Image File****************************
#image_file = None
Enh = Enhancer()
Enh.INPUT_TYPES()
test_resp = Enh.gogo("gpt-4", 0.7, 2000, "", None, "Shallow Depth of Field Photograph", 2, 10,False, tensor_image)
print (test_resp[0])""" | [
"PROMPT: PLACEHOLDER",
"1",
"Give an 150 word backgrounder on the art style: {}. Starting with describing what it is, include information about its history and which artists represent the style.",
"3"
] |
2024-01-10 | hpi-bp1819-naumann/shift-detector | shift_detector~precalculations~lda_embedding.py | import pandas as pd
import numpy as np
from numbers import Number
from sklearn.decomposition import LatentDirichletAllocation as LDA_skl
from sklearn.feature_extraction.text import *
from gensim.sklearn_api import LdaTransformer
from gensim.models.ldamodel import LdaModel
from gensim.models.coherencemodel import CoherenceModel
from gensim.corpora import Dictionary
import warnings
from shift_detector.precalculations.precalculation import Precalculation
from shift_detector.precalculations.count_vectorizer import CountVectorizer
from shift_detector.precalculations.lda_gensim_tokenizer import LdaGensimTokenizer
from shift_detector.utils.column_management import ColumnType
class LdaEmbedding(Precalculation):
def __init__(self, columns, n_topics=20, n_iter=10, random_state=0, lib='sklearn', trained_model=None,
start=2, stop=21, step=1, stop_words='english', max_features=None):
self.model = None
self.trained_model = None
self.lib = None
self.columns = None
self.stop_words = stop_words
self.max_features = max_features
if columns:
if isinstance(columns, list) and all(isinstance(col, str) for col in columns):
self.columns = columns
else:
raise TypeError("Columns has to be list of strings . Column {} is of type {}"
.format(columns, type(columns)))
else:
raise ValueError("You have to specify which columns you want to vectorize")
if trained_model:
warnings.warn("Trained models are not trained again. Please make sure to only input the column(s) "
"that the model was trained on", UserWarning)
self.trained_model = trained_model
self.random_state = self.trained_model.random_state
if isinstance(self.trained_model, type(LDA_skl())):
self.n_topics = self.trained_model.n_components
self.n_iter = self.trained_model.max_iter
else:
self.n_topics = self.trained_model.num_topics
self.n_iter = self.trained_model.iterations
else:
if n_topics == 'auto':
self.n_topics = n_topics
params = [start, stop, step]
for number in params:
try:
val = int(number)
if val < 2:
raise ValueError("Number of topic has to be a positive. Received: {}".format(number))
break
except TypeError:
raise TypeError("That's not an int! Received: {}".format(type(number)))
if stop < start:
raise ValueError("Stop value has to be higher than the start value. Received: {}".format(n_topics))
self.start = start
self.stop = stop
self.step = step
else:
if not isinstance(n_topics, int):
raise TypeError("Number of topic has to be an integer. Received: {}".format(type(n_topics)))
if n_topics < 2:
raise ValueError("Number of topics has to be at least 2. Received: {}".format(n_topics))
self.n_topics = n_topics
if not isinstance(n_iter, int):
raise TypeError("Random_state has to be a integer. Received: {}".format(type(n_iter)))
if n_iter < 1:
raise ValueError("Random_state has to be at least 1. Received: {}".format(n_iter))
self.n_iter = n_iter
if not isinstance(random_state, int):
raise TypeError("Random_state has to be a integer. Received: {}".format(type(random_state)))
if random_state < 0:
raise ValueError("Random_state has to be positive or zero. Received: {}".format(random_state))
self.random_state = random_state
if not isinstance(lib, str):
raise TypeError("Lib has to be a string. Received: {}".format(type(lib)))
if lib == 'sklearn':
self.model = \
LDA_skl(n_components=self.n_topics, max_iter=self.n_iter, random_state=self.random_state)
elif lib == 'gensim':
self.model = \
LdaTransformer(num_topics=self.n_topics, iterations=self.n_iter, random_state=self.random_state)
else:
raise ValueError("The supported libraries are sklearn and gensim. Received: {}".format(lib))
self.lib = lib
def __eq__(self, other):
if isinstance(other, self.__class__):
model_attributes = sorted([(k, v) for k, v in self.model.__dict__.items()
if isinstance(v, Number) or isinstance(v, str) or isinstance(v, list)])
other_model_attributes = sorted([(k, v) for k, v in other.model.__dict__.items()
if isinstance(v, Number) or isinstance(v, str) or isinstance(v, list)])
return isinstance(other.model, self.model.__class__) \
and model_attributes == other_model_attributes and self.columns == other.columns \
and self.stop_words == other.stop_words and self.max_features == other.max_features
return False
def __hash__(self):
if self.trained_model:
trained_hash_list = [self.__class__, self.trained_model.__class__]
for item in self.trained_model.__dict__.items():
if not item[0] == 'components_' and not item[0] == 'exp_dirichlet_component_':
# dirty fix I know, ndarrays are not hashable
trained_hash_list.extend(item)
return hash(tuple(trained_hash_list))
elif self.columns:
hash_list = [self.__class__, self.model.__class__, self.n_topics,
self.n_iter, self.random_state, self.max_features]
hash_list.extend(self.columns)
hash_list.extend(self.stop_words)
return hash(tuple(hash_list))
else:
return hash(tuple([self.__class__, self.model.__class__, self.n_topics,
self.n_iter, self.random_state]))
@staticmethod
def topic_probabilities_to_topics(lda_model, dtm):
# Always takes the topic with the highest probability as the dominant topic
return [arr.argmax()+1 for arr in lda_model.transform(dtm)]
@staticmethod
def get_topic_word_distribution_gensim(lda_model, n_topics, n_top_words):
topic_words = lda_model.gensim_model.show_topics(num_topics=n_topics,
num_words=n_top_words,
formatted=False)
return topic_words
@staticmethod
def get_topic_word_distribution_sklearn(lda_model, vocab, n_top_words):
# copied implementation from gensim show_topics
topic_words = []
for topic_n, comp in enumerate(lda_model.components_):
topic_ = comp
topic_ = topic_ / topic_.sum()
most_extreme = np.argpartition(-topic_, n_top_words)[:n_top_words]
word_idx = most_extreme.take(np.argsort(-topic_.take(most_extreme)))
topic_ = [(vocab[id], topic_[id]) for id in word_idx]
topic_words.append((topic_n, topic_))
return topic_words
def get_number_of_topics_with_best_coherence_score(self, col, tokenized_merged, all_corpora, all_dicts):
coherence_scores = {}
for n in range(self.start, self.stop, self.step):
model = LdaModel(all_corpora[col], n, all_dicts[col], random_state=0)
cm = CoherenceModel(model=model, texts=tokenized_merged[col], coherence='c_v')
coherence = cm.get_coherence()
coherence_scores[n] = coherence
return max(coherence_scores, key=lambda k: coherence_scores[k])
def process(self, store):
if isinstance(self.columns, str):
if self.columns in store.column_names(ColumnType.text):
col_names = self.columns
else:
for col in self.columns:
if col not in store.column_names(ColumnType.text):
raise ValueError("Given column is not contained in detected text columns of the datasets: {}"
.format(col))
col_names = self.columns
topic_labels = ['topics ' + col for col in col_names]
transformed1 = pd.DataFrame()
transformed2 = pd.DataFrame()
topic_words_all_columns = {}
all_models = {}
if self.lib == 'gensim':
tokenized1, tokenized2 = store[LdaGensimTokenizer(stop_words=self.stop_words, columns=self.columns)]
tokenized_merged = pd.concat([tokenized1, tokenized2], ignore_index=True)
all_corpora = {}
all_dicts = {}
for i, col in enumerate(col_names):
all_dicts[col] = Dictionary(tokenized_merged[col])
gensim_dict1 = Dictionary(tokenized1[col])
gensim_dict2 = Dictionary(tokenized2[col])
all_corpora[col] = [all_dicts[col].doc2bow(line) for line in tokenized_merged[col]]
corpus1 = [gensim_dict1.doc2bow(line) for line in tokenized1[col]]
corpus2 = [gensim_dict2.doc2bow(line) for line in tokenized2[col]]
if not self.trained_model:
if self.n_topics == 'auto':
n_topics = self.get_number_of_topics_with_best_coherence_score(col, tokenized_merged,
all_corpora, all_dicts)
self.model.num_topics = n_topics
else:
n_topics = self.n_topics
model = self.model
model.id2word = all_dicts[col]
model = model.fit(all_corpora[col])
all_models[col] = model.gensim_model
else:
model = self.trained_model
topic_words_all_columns[col] = self.get_topic_word_distribution_gensim(model, n_topics, 200)
transformed1[topic_labels[i]] = self.topic_probabilities_to_topics(model, corpus1)
transformed2[topic_labels[i]] = self.topic_probabilities_to_topics(model, corpus2)
return transformed1, transformed2, topic_words_all_columns, all_models, all_corpora, all_dicts
else:
vectorized1, vectorized2, feature_names, all_vecs = store[CountVectorizer(stop_words=self.stop_words,
max_features=self.max_features,
columns=self.columns)]
all_dtms = dict(vectorized1, **vectorized2)
if self.n_topics == 'auto':
tokenized1, tokenized2 = store[LdaGensimTokenizer(stop_words=self.stop_words, columns=self.columns)]
tokenized_merged = pd.concat([tokenized1, tokenized2], ignore_index=True)
all_corpora = {}
all_dicts = {}
for i, col in enumerate(col_names):
if not self.trained_model:
if self.n_topics == 'auto':
all_dicts[col] = Dictionary(tokenized_merged[col])
all_corpora[col] = [all_dicts[col].doc2bow(line) for line in tokenized_merged[col]]
n_topics = self.get_number_of_topics_with_best_coherence_score(col, tokenized_merged,
all_corpora, all_dicts)
self.model.n_components = n_topics
model = self.model
model = model.fit(all_dtms[col])
all_models[col] = model
else:
model = self.trained_model
topic_words_all_columns[col] = self.get_topic_word_distribution_sklearn(model, feature_names[col], 200)
transformed1[topic_labels[i]] = \
self.topic_probabilities_to_topics(model, vectorized1[col])
transformed2[topic_labels[i]] = \
self.topic_probabilities_to_topics(model, vectorized2[col])
return transformed1, transformed2, topic_words_all_columns, all_models, all_dtms, all_vecs
| [] |
2024-01-10 | taby-ai/vocode-python | apps~langchain_agent~tools~contacts.py | from typing import List
from langchain.agents import tool
CONTACTS = [{"name": "Ajay", "phone": "+15555555555"}]
@tool("get_all_contacts")
def get_all_contacts(placeholder: str) -> List[dict]:
"""Get contacts."""
return CONTACTS
| [] |
2024-01-10 | taby-ai/vocode-python | vocode~streaming~vector_db~base_vector_db.py | import os
from typing import Iterable, List, Optional, Tuple
import aiohttp
import openai
from langchain.docstore.document import Document
DEFAULT_OPENAI_EMBEDDING_MODEL = "text-embedding-ada-002"
class VectorDB:
def __init__(
self,
aiohttp_session: Optional[aiohttp.ClientSession] = None,
):
if aiohttp_session:
# the caller is responsible for closing the session
self.aiohttp_session = aiohttp_session
self.should_close_session_on_tear_down = False
else:
self.aiohttp_session = aiohttp.ClientSession()
self.should_close_session_on_tear_down = True
async def create_openai_embedding(
self, text, model=DEFAULT_OPENAI_EMBEDDING_MODEL
) -> List[float]:
params = {
"input": text,
}
engine = os.getenv("AZURE_OPENAI_TEXT_EMBEDDING_ENGINE")
if engine:
params["engine"] = engine
else:
params["model"] = model
return list((await openai.Embedding.acreate(**params))["data"][0]["embedding"])
async def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
namespace: Optional[str] = None,
) -> List[str]:
raise NotImplementedError
async def similarity_search_with_score(
self,
query: str,
filter: Optional[dict] = None,
namespace: Optional[str] = None,
) -> List[Tuple[Document, float]]:
raise NotImplementedError
async def tear_down(self):
if self.should_close_session_on_tear_down:
await self.aiohttp_session.close()
| [] |
2024-01-10 | mkt1412/GraspGPT_public | gcngrasp~demo_llm.py | import argparse
import os
import tqdm
import time
import random
import sys
import openai
import torch
import numpy as np
import torch.nn.functional as F
from models.graspgpt_plain import GraspGPT_plain
from transformers import BertTokenizer, BertModel, logging
from data.SGNLoader import pc_normalize
from config import get_cfg_defaults
from geometry_utils import farthest_grasps, regularize_pc_point_count
from visualize import draw_scene, get_gripper_control_points
logging.set_verbosity_error()
DEVICE = "cuda"
CODE_DIR = os.path.join(os.path.dirname(__file__), '../')
sys.path.append(CODE_DIR)
from data_specification import TASKS, OPENAI_API_KEY, OBJ_PROMPTS, TASK_PROMPTS
openai.api_key = OPENAI_API_KEY
def gpt(text):
"""
OpenAI GPT API
"""
response = openai.Completion.create(
engine="text-davinci-003",
prompt=text,
temperature=1.0,
max_tokens=256,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
return response['choices'][0]['text'].strip()
def encode_text(text, tokenizer, model, device, type=None):
"""
Language data encoding with a Google pre-trained BERT
"""
if type == 'od':
encoded_input = tokenizer(text, return_tensors='pt', padding="max_length", max_length=300).to(device)
elif type == 'td':
encoded_input = tokenizer(text, return_tensors='pt', padding="max_length", max_length=200).to(device)
elif type == 'li':
encoded_input = tokenizer(text, return_tensors='pt', padding="max_length", max_length=21).to(device)
else:
raise ValueError(f'No such language embedding type: {type}')
with torch.no_grad():
output = model(**encoded_input)
word_embedding = output[0]
sentence_embedding = torch.mean(output[0], dim=1)
return word_embedding, sentence_embedding, encoded_input['attention_mask']
def gen_gpt_desc(class_label, task_label):
"""
Generate object class and task descriptions
"""
class_keys = [random.choice(['shape', 'geometry']), random.choice(["use", "func"]),
random.choice(["sim_shape", "sim_geo"]), random.choice(["sim_use", "sim_func"])]
task_keys = [random.choice(['func', 'use']), "sim_effect", random.choice(['sem_verb', 'sim_verb'])]
print("\nGenerating object class description ......\n")
class_desc = []
for c_key in class_keys:
prompt = OBJ_PROMPTS[c_key]
prompt = prompt.replace('OBJ_CLASS', class_label)
temp_ans = gpt(prompt)
print(f"[{c_key}] "+temp_ans)
class_desc.append(temp_ans)
time.sleep(20)
class_desc = ' '.join(item for item in class_desc)
print("\nGenerating task description ......\n")
task_desc = []
for t_key in task_keys:
prompt = TASK_PROMPTS[t_key]
prompt = prompt.replace('TASK_CLASS', task_label)
temp_ans = gpt(prompt)
print(f"[{t_key}] "+temp_ans)
task_desc.append(temp_ans)
time.sleep(20)
task_desc = ' '.join(item for item in task_desc)
return class_desc, task_desc
def load_model(cfg):
"""
Load GraspGPT pre-trained weight from checkpoint
"""
model = GraspGPT_plain(cfg)
model_weights = torch.load(
cfg.weight_file,
map_location=DEVICE)['state_dict']
model.load_state_dict(model_weights)
model = model.to(DEVICE)
model.eval()
return model
def test(model, pc, obj_desc, obj_desc_mask, task_desc, task_desc_mask, task_ins, task_ins_mask):
pc = pc.type(torch.cuda.FloatTensor)
with torch.no_grad():
logits = model(pc, obj_desc, obj_desc_mask, task_desc, task_desc_mask, task_ins, task_ins_mask)
logits = logits.squeeze()
probs = torch.sigmoid(logits)
preds = torch.round(probs)
return probs, preds
def load_pc_and_grasps(data_dir, obj_name):
obj_dir = os.path.join(data_dir, obj_name)
pc_file = os.path.join(obj_dir, 'fused_pc_clean.npy')
grasps_file = os.path.join(obj_dir, 'fused_grasps_clean.npy')
if not os.path.exists(pc_file):
print('Unable to find clean pc and grasps ')
pc_file = os.path.join(obj_dir, 'fused_pc.npy')
grasps_file = os.path.join(obj_dir, 'fused_grasps.npy')
if not os.path.exists(pc_file):
raise ValueError(
'Unable to find un-processed point cloud file {}'.format(pc_file))
pc = np.load(pc_file)
grasps = np.load(grasps_file)
# Ensure that grasp and pc is mean centered
pc_mean = pc[:, :3].mean(axis=0)
pc[:, :3] -= pc_mean
grasps[:, :3, 3] -= pc_mean
# number of candidate grasps
# grasps = farthest_grasps(
# grasps, num_clusters=32, num_grasps=min(50, grasps.shape[0]))
grasps = farthest_grasps(
grasps, num_clusters=32, num_grasps=min(1, grasps.shape[0]))
grasp_idx = 0
pc[:, :3] += pc_mean
grasps[:, :3, 3] += pc_mean
return pc, grasps
def main(args, cfg):
task = args.task # 'scoop'
obj_class = args.obj_class # 'spatula'
obj_name = args.obj_name # 'spatula'
data_dir = args.data_dir
# load GraspGPT
model = load_model(cfg)
# load BERT
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
bert_model = BertModel.from_pretrained("bert-base-uncased")
bert_model = bert_model.to(DEVICE)
bert_model.eval()
# load point cloud and grasps
pc, grasps = load_pc_and_grasps(data_dir, obj_name)
pc_input = regularize_pc_point_count(
pc, cfg.num_points, use_farthest_point=False)
pc_mean = pc_input[:, :3].mean(axis=0)
pc_input[:, :3] -= pc_mean # mean substraction
grasps[:, :3, 3] -= pc_mean # mean substraction
preds = []
probs = []
all_grasps_start_time = time.time()
# eval each grasp in a loop
for i in tqdm.trange(len(grasps)):
start = time.time()
grasp = grasps[i]
pc_color = pc_input[:, 3:]
pc = pc_input[:, :3]
grasp_pc = get_gripper_control_points()
grasp_pc = np.matmul(grasp, grasp_pc.T).T # transform grasps
grasp_pc = grasp_pc[:, :3] # remove latent indicator
latent = np.concatenate(
[np.zeros(pc.shape[0]), np.ones(grasp_pc.shape[0])]) # create latent indicator
latent = np.expand_dims(latent, axis=1)
pc = np.concatenate([pc, grasp_pc], axis=0) # [4103, 3]
pc, grasp = pc_normalize(pc, grasp, pc_scaling=cfg.pc_scaling)
pc = np.concatenate([pc, latent], axis=1) # add back latent indicator
# load language embeddings
pc = torch.tensor([pc])
# language descriptions
obj_desc_txt, task_desc_txt = gen_gpt_desc(obj_class, task)
obj_desc, _, obj_desc_mask = encode_text(obj_desc_txt, tokenizer, bert_model, DEVICE, type='od')
task_desc, _, task_desc_mask = encode_text(task_desc_txt, tokenizer, bert_model, DEVICE, type='td')
# language instruciton
task_ins_txt = input('\nPlease input a natural language instruction (e.g., grasp the knife to cut): ')
task_ins, _, task_ins_mask = encode_text(task_ins_txt, tokenizer, bert_model, DEVICE, type='li')
prob, pred = test(model, pc, obj_desc, obj_desc_mask, task_desc, task_desc_mask, task_ins, task_ins_mask)
preds.append(pred.tolist())
probs.append(prob.tolist())
# output a language instruction and two descriptions
print("\n")
print(f"Natural language instruction:\n{task_ins_txt}\n")
print(f"Object class description:\n{obj_desc_txt}\n")
print(f"Task description:\n{obj_desc_txt}\n")
print('Inference took {}s for {} grasps'.format(time.time() - all_grasps_start_time, len(grasps)))
preds = np.array(preds)
probs = np.array(probs)
# colored with task compatibility score (green is higher)
grasp_colors = np.stack([np.ones(probs.shape[0]) -
probs, probs, np.zeros(probs.shape[0])], axis=1)
# pc and grasp visualization
draw_scene(
pc_input,
grasps,
grasp_colors=list(grasp_colors),
max_grasps=len(grasps))
if __name__ == '__main__':
"""
python gcngrasp/demo.py cfg/eval/gcngrasp/gcngrasp_split_mode_t_split_idx_3_.yml --obj_name pan --obj_class saucepan --task pour
python gcngrasp/demo.py cfg/eval/gcngrasp/gcngrasp_split_mode_t_split_idx_3_.yml --obj_name spatula --obj_class spatula --task scoop
python gcngrasp/demo.py cfg/eval/gcngrasp/gcngrasp_split_mode_t_split_idx_3_.yml --obj_name mug --obj_class mug --task drink
"""
parser = argparse.ArgumentParser(description="visualize data and stuff")
parser.add_argument('--task', help='', default='scoop')
parser.add_argument('--obj_class', help='', default='spatula')
parser.add_argument('--data_dir', help='location of sample data', default='')
parser.add_argument('--obj_name', help='', default='spatula')
parser.add_argument(
'--cfg_file',
help='yaml file in YACS config format to override default configs',
default='cfg/eval/gcngrasp/gcngrasp_split_mode_t_split_idx_3_.yml',
type=str)
args = parser.parse_args()
cfg = get_cfg_defaults()
if args.cfg_file != '':
if os.path.exists(args.cfg_file):
cfg.merge_from_file(args.cfg_file)
else:
raise ValueError('Please provide a valid config file for the --cfg_file arg')
if cfg.base_dir != '':
if not os.path.exists(cfg.base_dir):
raise FileNotFoundError(
'Provided base dir {} not found'.format(
cfg.base_dir))
else:
assert cfg.base_dir == ''
cfg.base_dir = os.path.join(os.path.dirname(__file__), '../data')
cfg.batch_size = 16
if len(cfg.gpus) == 1:
torch.cuda.set_device(cfg.gpus[0])
experiment_dir = os.path.join(cfg.log_dir, cfg.weight_file)
weight_files = os.listdir(os.path.join(experiment_dir, 'weights'))
assert len(weight_files) == 1
cfg.weight_file = os.path.join(experiment_dir, 'weights', weight_files[0])
if args.data_dir == '':
args.data_dir = os.path.join(cfg.base_dir, 'sample_data/pcs')
cfg.freeze()
print(cfg)
main(args, cfg)
| [
"TASK_CLASS"
] |
2024-01-10 | AmalZubidat/SCOTUS_GPT3_Opinions | step03_extract_KG.py | import openai
import os
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
# Set OpenAI API key
openai.api_key = open_file('openaiapikey.txt')
#openai.api_key = "YOUR_API_KEY"
# Set the model to use
model_engine = "text-davinci-003"
# Set the temperature and token count for the generated text
temperature = 0
token_count = 1450
# Read the prompt file and store the contents in a variable
with open("prompt_JSONLD_citation_nodes.txt", "r", encoding='utf-8') as f:
prompt = f.read()
# Iterate through the .txt files in the "chunks_txt" folder
for file in os.listdir("chunks_txt"):
if file.endswith(".txt"):
# Read the contents of the file
with open(os.path.join("chunks_txt", file), "r", encoding='utf-8') as f:
chunk = f.read()
# Replace the placeholder with the chunk
prompt_with_chunk = prompt.replace("<<CHUNK>>", chunk)
# Use the OpenAI API to generate text based on the modified prompt
response = openai.Completion.create(
engine=model_engine,
prompt=prompt_with_chunk,
temperature=temperature,
max_tokens=token_count,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# Check if the "kg_json" folder exists and create it if it doesn't
if not os.path.exists("kg_json"):
os.makedirs("kg_json")
# Print the generated text
text = response['choices'][0]['text'].strip()
print('\n\n\n=========================================\n\n\n', text)
# Save the generated text as a .json file in the "kg_json" folder
with open(os.path.join("kg_json", file.replace(".txt", ".json")), "w", encoding='utf-8') as f:
f.write(text)
| [] |
2024-01-10 | ZhouShao-zuyuan/langchain-application | knowledge~document_loaders~loaders~html_loader.py | # -*- coding: utf-8 -*-
from typing import Dict, List, Union
from langchain.docstore.document import Document
from langchain.doucment_loaders.base import BaseLoader
class HTMLLoader(BaseLoader):
"""Loader that uses beautiful soup to parse HTML files."""
def __init__(
self,
file_path: str,
is_online: bool = False,
open_encoding: Union[str, None] = None,
bs_kwargs: Union[dict, None] = None,
get_text_separator: str = "",
) -> None:
"""Initialise with path, and optionally, file encoding to use, and any kwargs
to pass to the BeautifulSoup object."""
try:
import bs4 # noqa:F401
except ImportError:
raise ValueError(
"beautifulsoup4 package not found, please install it with "
"`pip install beautifulsoup4`"
)
self.file_path = file_path
self.is_online = is_online
self.open_encoding = open_encoding
if bs_kwargs is None:
bs_kwargs = {"features": "lxml"}
self.bs_kwargs = bs_kwargs
self.get_text_separator = get_text_separator
def load(self) -> List[Document]:
from bs4 import BeautifulSoup
if not self.is_online:
"""Load HTML document into document objects."""
with open(self.file_path, "r", encoding=self.open_encoding) as f:
soup = BeautifulSoup(f, **self.bs_kwargs)
else:
import requests
response = requests.get(self.file_path)
html = response.content
soup = BeautifulSoup(html, **self.bs_kwargs)
text = self.__raw_content_parser(soup)
if soup.title:
title = str(soup.title.string)
else:
title = ""
metadata: Dict[str, Union[str, None]] = {
"source": self.file_path,
"title": title,
}
return [Document(page_content=text, metadata=metadata)]
def __raw_content_parser(self, soup):
return soup.get_text(self.get_text_separator)
def __content_parser(self, soup):
# TODO
pass | [] |
2024-01-10 | ZhouShao-zuyuan/langchain-application | models~vicuna_llm.py | # -*- coding: utf-8 -*-
from langchain.llms.base import LLM
from langchain.callbacks.manager import CallbackManagerForLLMRun
from typing import Optional, List, Any
import transformers
from transformers import LlamaForCausalLM, LlamaTokenizer
import torch
import gc
from pydantic import root_validator
from pathlib import Path
import time
# Set Meta Instruction
META_INSTRUCTION = "A chat between a curious user and artificial intelligence assistanct."\
"The assistant gives helpful, detailed, and polite answers to the user's questions."
# Set model path
MODEL_13B_PATH_VERSION_1_1 = ""
MODEL_13B_PATH_VERSION_1_3 = ""
MODEL_33B_PATH_VERSION_1_3 = ""
def load_model(model_path, is_auto):
if torch.cuda.is_available():
num_gpus = torch.cuda.device_count()
if num_gpus > 1:
model = LlamaForCausalLM.from_pretrained(model_path,
low_cpu_mem_usage=True,
torch_dtype=torch.float16,
device_map=device_map(is_auto)).half()
else:
model = LlamaForCausalLM.from_pretrained(model_path,
torch_dtype=torch.float16,
low_cpu_mem_usage=True).half().cuda()
tokenizer = LlamaTokenizer.from_pretrained(model_path)
else:
pass
return model, tokenizer
def device_map(is_auto):
if is_auto:
return "auto"
else:
pass
class ChatglmLLM(LLM):
max_token: int = 10000
temperature: float = 0.95
top_p: float = 0.7
repetition_penalty: float = 1.02
history_len: int = 10
version: str = "1.3"
size: int = 13
model_path: str = None
is_auto: bool = False
streaming: bool = False
history: List[List[str]] = []
model: Any
tokenizer: Any
@root_validator()
def validate_environment(cls, values):
version = values.get("version")
size = values.get("size")
if not values.get("model_path"):
if version == "1.1" and size == 13:
values["model_path"] = MODEL_13B_PATH_VERSION_1_1
elif version == "1.3" and size == 13:
values["model_path"] = MODEL_13B_PATH_VERSION_1_3
elif version == "1.3" and size == 33:
values["model_path"] = MODEL_33B_PATH_VERSION_1_3
else:
raise ValueError(f"Invalid version: {version}")
model_path = values.get("model_path")
local_path = Path(f'{model_path}')
is_auto = values.get("is_auto")
try:
model, tokenizer = load_model(local_path, is_auto)
values["model"], values["tokenizer"] = model, tokenizer
except Exception as e:
raise ValueError(f"Some error occured while loading model: {e}")
return values
@property
def _llm_type(self):
return "vicuna"
@property
def _default_params(self):
return {
"max_token": self.max_token,
"temperature": self.temperature,
"top_p": self.top_p,
"repetition_penalty": self.repetition_penalty
}
@property
def _indentifying_params(self):
return {**{"model_path":self.model_path}, **self._default_params}
def __clear_torch_cache(self):
gc.collect()
if torch.has_cuda:
CUDA_DEVICE = "cuda:0"
with torch.cuda.device(CUDA_DEVICE):
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
def _call(self, prompt, stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None) -> str:
streaming = self.streaming
input_p = self.__get_prompt(prompt, self.history)
inputs = self.tokenizer(input_p, return_tensors="pt")
if streaming:
pass
else:
with torch.inference_mode():
outputs, _ = self.model.generate(
inputs.input_ids.cuda(),
do_sample=True,
max_length=self.max_token,
temperature=self.temperature,
top_p=self.top_p,
repetition_penalty=self.repetition_penalty
)
response = self.tokenizer.decode(outputs[0][inputs.input_ids.shapes[1]:], skip_special_tokens=True)
self.__clear_torch_cache()
self.__collect_memory(prompt, response)
return response
@torch.inference_mode()
def stream(self, prompt, stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
):
pass
def __get_prompt(self, prompt, history):
input_p = META_INSTRUCTION
role = ["USER:", "ASSISTANT:"]
sep = [" ", "</s>"]
input_p += sep[0]
if len(history) > 0:
for i in range(len(history)):
input_p += role[0] + ": " + i[0] + sep[0] + role[1] + ":" + i[1]
input_p += role[0] + ": " + prompt + sep[0] + role[1] + ":"
return input_p
def __llm_memory(self):
return self.history
def __collect_memory(self):
self.history += [[prompt, response]]
# TODO:
| [] |
2024-01-10 | ZhouShao-zuyuan/langchain-application | models~chatglm_llm.py | # -*- coding: utf-8 -*-
from langchain.llms.base import LLM
from langchain.callbacks.manager import CallbackManagerForLLMRun
from typing import Optional, List, Any
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoModel, AutoConfig
import torch
import gc
from pydantic import root_validator
from pathlib import Path
import time
# Set model path
MODEL_PATH_VERSION_1 = ""
MODEL_PATH_VERSION_2 = ""
def load_model(model_path, is_auto, model_config):
if torch.cuda.is_available():
num_gpus = torch.cuda.device_count()
if num_gpus > 1:
model = AutoModel.from_pretrained(model_path,
config=model_config,
torch_dtype=torch.float16,
trust_remote_code=True,
device_map=device_map(is_auto)).half()
else:
model = AutoModel.from_pretrained(model_path,
config=model_config,
torch_dtype=torch.float16,
trust_remote_code=True).half().cuda()
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
else:
pass
return model, tokenizer
def device_map(is_auto):
if is_auto:
return "auto"
else:
pass
class ChatglmLLM(LLM):
max_token: int = 10000
temperature: float = 0.95
top_p: float = 0.7
history_len: int = 10
version: int = 2
model_path: str = None
is_auto: bool = False
pad_token_id: int = 2
streaming: bool = False
history: List[List[str]] = []
model: Any
tokenizer: Any
model_config: Any
@root_validator()
def validate_environment(cls, values):
version = values.get("version")
if not values.get("model_path"):
if version == 1:
values["model_path"] = MODEL_PATH_VERSION_1
elif version == 2:
values["model_path"] = MODEL_PATH_VERSION_2
else:
raise ValueError(f"Invalid version: {version}")
model_path = values.get("model_path")
local_path = Path(f'{model_path}')
is_auto = values.get("is_auto")
try:
values["model_config"] = AutoConfig.from_pretrained(local_path, trust_remote_code=True)
model, tokenizer = load_model(local_path, is_auto, values["model_config"])
values["model"], values["tokenizer"] = model, tokenizer
except Exception as e:
raise ValueError(f"Some error occured while loading model: {e}")
return values
@property
def _llm_type(self):
return "chatglm"
@property
def _default_params(self):
return {
"max_token": self.max_token,
"temperature": self.temperature,
"top_p": self.top_p
}
@property
def _indentifying_params(self):
return {**{"model_path":self.model_path}, **self._default_params}
def __clear_torch_cache(self):
gc.collect()
if torch.has_cuda:
CUDA_DEVICE = "cuda:0"
with torch.cuda.device(CUDA_DEVICE):
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
def _call(self, prompt, stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None) -> str:
streaming = self.streaming
history = self.history
if streaming:
for stream_response in self.stream(prompt, stop, run_manager):
response = stream_response
else:
with torch.inference_mode():
response, _ = self.model.chat(
self.tokenizer,
prompt,
history=history[-self.history_len:] if self.history_len > 0 else [],
max_length=self.max_token,
temperature=self.temperature,
top_p=self.top_p,
pad_token_id=self.pad_token_id
)
self.__clear_torch_cache()
return response
@torch.inference_mode()
def stream(self, prompt, stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
):
for num, (stream_response, _) in enumerate(self.model.stream_chat(self.tokenizer,
prompt,
history=history[-self.history_len:] if self.history_len > 0 else [],
max_length=self.max_token,
temperature=self.temperature,
top_p=self.top_p,
pad_token_id=self.pad_token_id)):
yield stream_response
def __llm_memory(self):
return self.history
def __collect_memory(self):
self.history += [[prompt, response]]
# TODO:
| [] |
2024-01-10 | ZhouShao-zuyuan/langchain-application | knowledge~document_loaders~loaders~pdf_loader.py | # -*- coding: utf-8 -*-
from typing import List
from langchain.document_loaders.unstructured import UnstructuredFileLoader
from paddleocr import PaddleOCR
import os
import fitz
import nltk
class PaddlePDFLoader(UnstructuredFileLoader):
"""Loader that uses unstructured to load image files, such as PNGs and JPGs."""
def __set_nltk_path(self) -> None:
NLTK_DATA_PATH = os.path.join(os.apth.dirname(os.path.dirname(__file__)), "nltk_data")
nltk.data.path = [NLTK_DATA_PATH] + nltk.data.path
def _get_elements(self) -> List:
self.__set_nltk_path()
txt_file_path = self.__pdf_ocr_txt(self.file_path)
from unstructured.partition.text import partition_text
return partition_text(filename=txt_file_path, **self.unstructured_kwargs)
def __pdf_ocr_txt(self, filepath, dir_path="tmp_files"):
full_dir_path = os.path.join(os.path.dirname(filepath), dir_path)
if not os.path.exists(full_dir_path):
os.makedirs(full_dir_path)
ocr = PaddleOCR(use_angle_cls=True, lang="ch", use_gpu=False, show_log=False)
doc = fitz.open(filepath)
txt_file_path = os.path.join(full_dir_path, f"{os.path.split(filepath)[-1]}.txt")
img_name = os.path.join(full_dir_path, 'tmp.png')
with open(txt_file_path, 'w', encoding='utf-8') as fout:
for i in range(doc.page_count):
page = doc[i]
text = page.get_text("")
fout.write(text)
fout.write("\n")
img_list = page.get_images()
for img in img_list:
pix = fitz.Pixmap(doc, img[0])
if pix.n - pix.alpha >= 4:
pix = fitz.Pixmap(fitz.csRGB, pix)
pix.save(img_name)
result = ocr.ocr(img_name)
ocr_result = [i[1][0] for line in result for i in line]
fout.write("\n".join(ocr_result))
if os.path.exists(img_name):
os.remove(img_name)
return txt_file_path
| [] |
2024-01-10 | WisdomAlwaysWins/hey-groot-api | plant~plant_info.py | from langchain.tools import BaseTool
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
# ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
from langchain.agents.agent_types import AgentType
from langchain_experimental.agents.agent_toolkits import create_csv_agent
class PlantInfo(BaseTool):
name = "Plant_Info"
description = """It's a good tool to use when asking about plant information. Summarize it in 100 characters"""
def _run(self, query: str) -> str:
agent = create_csv_agent(
ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613"),
"static/plant_info_.csv",
verbose=False,
agent_type=AgentType.OPENAI_FUNCTIONS,
)
return agent.run(query)
async def _arun(self, query: str) -> str:
raise NotImplementedError("질문에 답할 수 없어요.")
| [
"It's a good tool to use when asking about plant information. Summarize it in 100 characters"
] |
2024-01-10 | WisdomAlwaysWins/hey-groot-api | plant~plant_for_sensor.py | import json
import random
import pickle
import numpy as np
from langchain.tools import BaseTool
JSON_PATH = 'static/sensor_response_data.json'
MODEL_PATH = 'static/sentence_transformer_model.pkl'
with open(JSON_PATH, 'r', encoding='utf-8') as f:
data = json.load(f)
with open(MODEL_PATH, 'rb') as f:
model = pickle.load(f)
class PlantSensor(BaseTool):
name = "Plant_For_Sensor"
description = """조건에 맞는 응답을 가져올 때 사용하는 도구이다. 이때, 배열에 있는 응답 중 하나를 랜덤으로 추출하고 그대로 응답한다."""
data : list
plant_type : str
def __init__(self, data : list = [0,0,0,0], plant_type= "") :
super(PlantSensor, self).__init__(data = data, plant_type = plant_type)
# print(plant_type, data)
def _run(self, query: str) -> str:
return get_response(query, arduino_data=self.data, plant_type=self.plant_type)
async def _arun(self, query: str) -> str:
raise NotImplementedError("질문에 답할 수 없어요.")
def get_response(query, arduino_data, plant_type):
def get_most_similar_question(query, plant_type):
input = plant_type + " " + query
input_embedding = model.encode(input)
similarities = {}
for plant in data['plants'].keys():
plant_embedding = model.encode(plant)
similarity = np.dot(input_embedding, plant_embedding) / (np.linalg.norm(input_embedding) * np.linalg.norm(plant_embedding))
similarities[plant] = similarity
most_similar_plant = max(similarities, key=similarities.get)
return most_similar_plant
current_conditions = {
"temperature": arduino_data[0],
"humidity": arduino_data[1],
"illumination": arduino_data[2],
"moisture": arduino_data[3]
}
responses = data["plants"].get(get_most_similar_question(query, plant_type), {}).get("environment_responses", [])
answers = data["responses"]
valid_responses = []
for response in responses:
conditions = response["conditions"]
for condition in conditions :
if (conditions[condition]["low"] <= current_conditions[condition] <= conditions[condition]["high"]) :
if condition == "temperature" : valid_responses.extend(answers["mid"]["temperature"].values())
elif condition == "humidity" : valid_responses.extend(answers["mid"]["humidity"].values())
elif condition == "illumination" : valid_responses.extend(answers["mid"]["illumination"].values())
elif condition == "moisture" : valid_responses.extend(answers["mid"]["moisture"].values())
elif (current_conditions[condition] < conditions[condition]["low"]) :
if condition == "temperature" : valid_responses.extend(answers["low"]["temperature"].values())
elif condition == "humidity" : valid_responses.extend(answers["low"]["humidity"].values())
elif condition == "illumination" : valid_responses.extend(answers["low"]["illumination"].values())
elif condition == "moisture" : valid_responses.extend(answers["low"]["moisture"].values())
elif (conditions[condition]["high"] < current_conditions[condition]) :
if condition == "temperature" : valid_responses.extend(answers["high"]["temperature"].values())
elif condition == "humidity" : valid_responses.extend(answers["high"]["humidity"].values())
elif condition == "illumination" : valid_responses.extend(answers["high"]["illumination"].values())
elif condition == "moisture" : valid_responses.extend(answers["high"]["moisture"].values())
'''
mid_is_valid = all(
conditions[condition]["low"] <= current_conditions[condition] <= conditions[condition]["high"]
for condition in conditions
)
low_is_valid = all(
current_conditions[condition] < conditions[condition]["low"]
for condition in conditions
)
high_is_valid = all(
conditions[condition]["high"] < current_conditions[condition]
for condition in conditions
)
'''
all_responses = []
for resp in valid_responses:
for condition, value in current_conditions.items():
# print(condition, value)
resp = resp.replace(f"${{{condition}}}", str(value))
all_responses.append(resp)
if all_responses:
return all_responses
return ["지금은 응답을 해드릴 수 없어요."]
| [
"조건에 맞는 응답을 가져올 때 사용하는 도구이다. 이때, 배열에 있는 응답 중 하나를 랜덤으로 추출하고 그대로 응답한다."
] |
2024-01-10 | WisdomAlwaysWins/hey-groot-api | plant~response_generator.py | import pickle
import numpy as np
import json
import random
from langchain.tools import BaseTool
MODEL_PATH = 'static/sentence_transformer_model.pkl'
JSON_PATH = 'static/embedding_data.json'
with open(MODEL_PATH, 'rb') as f:
model = pickle.load(f)
with open(JSON_PATH, 'r') as f:
data = json.load(f)
class ResponseGenerator(BaseTool):
name = "Response_Generator"
description = """일상적인 대화 시 사용하는 도구"""
def _run(self, query: str) -> str:
return response_generator(query)
async def _arun(self, query: str) -> str:
raise NotImplementedError("질문에 답할 수 없어요.")
def response_generator(query):
def get_most_similar_question(query):
query_embedding = model.encode(query)
similarities = []
for question in data.keys():
question_embedding = model.encode(question)
similarity = np.dot(query_embedding, question_embedding) / (np.linalg.norm(query_embedding) * np.linalg.norm(question_embedding))
similarities.append(similarity)
most_similar_idx = np.argmax(similarities)
return list(data.keys())[most_similar_idx]
def get_response(query):
most_similar_question = get_most_similar_question(query)
responses = data[most_similar_question]
# 무작위로 응답을 선택
response = random.choice(responses)
response_text = list(response.values())[0]
return response_text
return get_response(query)
| [
"일상적인 대화 시 사용하는 도구"
] |
2024-01-10 | poldrack/ohbm2023 | fit_dynamic_topic_model.py | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.14.5
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %% [markdown]
# Run topic modeling for each year
# %%
# code generated by ChatGPT
import pickle
import os
from bertopic import BERTopic
from bertopic.representation import OpenAI
from sentence_transformers import SentenceTransformer
from umap import UMAP
from hdbscan import HDBSCAN
from sklearn.feature_extraction.text import CountVectorizer
from bertopic.representation import KeyBERTInspired
from bertopic.vectorizers import ClassTfidfTransformer
import argparse
import openai
import pandas as pd
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument('--min_cluster_size', type=int, default=50)
argparser.add_argument('--n_neighbors', type=int, default=15)
argparser.add_argument('--year', type=int, default=None)
argparser.add_argument('--reduce_topics', action='store_true')
argparser.add_argument('--use_gpt4', action='store_true')
args = argparser.parse_args()
datadir = 'data'
ldadir = os.path.join(datadir, 'lda_models')
if not os.path.exists(ldadir):
os.makedirs(ldadir)
sentences = []
years = []
if args.year is not None:
years_to_process = [args.year]
else:
years_to_process = list(range(1990, 2023))
# load sentence data
for year in years_to_process:
print('loading data for year %s' % year)
abstract_file = os.path.join(
datadir, f'bigrammed_cleaned_abstracts_{year}.pkl'
)
if not os.path.exists(abstract_file):
print('File %s does not exist' % abstract_file)
continue
with open(abstract_file, 'rb') as f:
new_sentences = [' '.join(i) for i in pickle.load(f)]
sentences = sentences + new_sentences
years = years + [year] * len(new_sentences)
assert len(sentences) > 0
assert len(sentences) == len(years)
model_name = 'bertopic' # fit plain model first
if args.year is not None:
model_name = model_name + f'_{args.year}'
args.min_cluster_size = 20
modeldir = 'models'
if not os.path.exists(modeldir):
os.makedirs(modeldir)
# Step 1 - Extract embeddings
embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
# Step 2 - Reduce dimensionality
# ala https://maartengr.github.io/BERTopic/faq.html#i-have-too-many-topics-how-do-i-decrease-them
umap_model = UMAP(
n_neighbors=args.n_neighbors, n_components=5, min_dist=0.0, metric='cosine'
)
# Step 3 - Cluster reduced embeddings
hdbscan_model = HDBSCAN(
min_cluster_size=args.min_cluster_size,
metric='euclidean',
cluster_selection_method='eom',
prediction_data=True,
)
# Step 4 - Tokenize topics
vectorizer_model = CountVectorizer(stop_words='english')
# Step 5 - Create topic representation
ctfidf_model = ClassTfidfTransformer()
# Step 6 - (Optional) Fine-tune topic representations with
# a `bertopic.representation` model
representation_model = KeyBERTInspired()
if args.reduce_topics:
nr_topics = 'auto'
else:
nr_topics = None
# All steps together
topic_model = BERTopic(
verbose=True,
embedding_model=embedding_model, # Step 1 - Extract embeddings
umap_model=umap_model, # Step 2 - Reduce dimensionality
hdbscan_model=hdbscan_model, # Step 3 - Cluster reduced embeddings
vectorizer_model=vectorizer_model, # Step 4 - Tokenize topics
ctfidf_model=ctfidf_model, # Step 5 - Extract topic words
representation_model=representation_model, # Step 6 - Fine-tune topic represenations
nr_topics=nr_topics
)
topics, probs = topic_model.fit_transform(sentences)
df = pd.DataFrame({"Document": sentences, "Topic": topics})
# need to exclude embedding model as it causes GPU/CPU conflict
topic_model.save(
os.path.join(modeldir, model_name),
serialization='pytorch',
save_ctfidf=True,
save_embedding_model=False,
)
df.to_csv(os.path.join(modeldir, model_name + '.csv'))
if args.use_gpt4:
# now run with GPT-4 representatoin model
with open('openai_api_key.txt', 'r') as f:
openai.api_key = f.read().strip()
representation_model = OpenAI(
model='gpt-4', chat=True, exponential_backoff=True
)
topic_model.update_topics(sentences, representation_model=representation_model)
model_name += '_gpt4'
topics, probs = topic_model.transform(sentences)
df = pd.DataFrame({"Document": sentences, "Topic": topics})
topic_model.save(
os.path.join(modeldir, model_name),
serialization='pytorch',
save_ctfidf=True,
save_embedding_model=False,
)
df.to_csv(os.path.join(modeldir, model_name + '.csv'))
| [] |
2024-01-10 | AmrElsehemy/AI4Egypt-ChatWithYourData | final_chat_over_docs.py | import secret_keys
import os
import streamlit as st
from langchain.llms import OpenAI
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
openai_api_key = os.environ.get('OPENAI_API_KEY')
def generate_chain(uploaded_file):
# Load document if file is uploaded
if uploaded_file is not None:
print("indexing")
documents = [uploaded_file.read().decode()]
# Split documents into chunks
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.create_documents(documents)
# Select embeddings
embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
# Create a vectorstore from documents
db = Chroma.from_documents(texts, embeddings)
# Create retriever interface
retriever = db.as_retriever()
# Create QA chain
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
qa = ConversationalRetrievalChain.from_llm(OpenAI(openai_api_key=openai_api_key, temperature=0), verbose = True, retriever=retriever, memory=memory)
return qa
# Page title
st.set_page_config(page_title='🦜🔗 Ask your Document')
st.title('🦜🔗 Ask your Document')
# File upload
uploaded_file = st.file_uploader('Upload an article', type='txt')
with st.form('index'):
submitted = st.form_submit_button('Index', disabled=not(uploaded_file))
if submitted:
with st.spinner('Calculating...'):
if "qa_chain" not in st.session_state:
st.session_state["qa_chain"] = generate_chain(uploaded_file)
if "qa_chain" in st.session_state:
qa = st.session_state["qa_chain"]
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}]
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input():
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
response = qa.run(prompt)
st.session_state.messages.append({"role": "assistant", "content": response})
st.chat_message("assistant").write(response)
| [
"How can I help you?"
] |
2024-01-10 | AmrElsehemy/AI4Egypt-ChatWithYourData | pure_chat.py | import openai
import streamlit as st
with st.sidebar:
openai_api_key = st.text_input("OpenAI API Key", key="chatbot_api_key", type="password")
"[View the source code](https://github.com/streamlit/llm-examples/blob/main/Chatbot.py)"
"[](https://codespaces.new/streamlit/llm-examples?quickstart=1)"
st.title("💬 Chatbot")
if "messages" not in st.session_state:
st.session_state["messages"] = [{"role": "assistant", "content": "How can I help you?"}]
for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])
if prompt := st.chat_input():
if not openai_api_key:
st.info("Please add your OpenAI API key to continue.")
st.stop()
openai.api_key = openai_api_key
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
response = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=st.session_state.messages)
msg = response.choices[0].message
st.session_state.messages.append(msg)
st.chat_message("assistant").write(msg.content) | [
"How can I help you?"
] |
2024-01-10 | AmrElsehemy/AI4Egypt-ChatWithYourData | ask_document.py | import streamlit as st
from langchain.llms import OpenAI
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
def generate_response(uploaded_file, openai_api_key, query_text):
# Load document if file is uploaded
if uploaded_file is not None:
documents = [uploaded_file.read().decode()]
# Split documents into chunks
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
texts = text_splitter.create_documents(documents)
# Select embeddings
embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
# Create a vectorstore from documents
db = Chroma.from_documents(texts, embeddings)
# Create retriever interface
retriever = db.as_retriever()
# Create QA chain
qa = RetrievalQA.from_chain_type(llm=OpenAI(openai_api_key=openai_api_key), chain_type='stuff', retriever=retriever, verbose=True)
return qa.run(query_text)
# Page title
st.set_page_config(page_title='🦜🔗 Ask your Document')
st.title('🦜🔗 Ask your Document')
# File upload
uploaded_file = st.file_uploader('Upload an article', type='txt')
# Query text
query_text = st.text_input('Enter your question:', placeholder = 'Please provide a short summary.', disabled=not uploaded_file)
# Form input and query
result = []
with st.form('myform', clear_on_submit=True):
openai_api_key = st.text_input('OpenAI API Key', type='password', disabled=not (uploaded_file and query_text))
submitted = st.form_submit_button('Submit', disabled=not(uploaded_file and query_text))
if submitted and openai_api_key.startswith('sk-'):
with st.spinner('Calculating...'):
response = generate_response(uploaded_file, openai_api_key, query_text)
result.append(response)
del openai_api_key
if len(result):
st.info(response)
| [] |
2024-01-10 | intersun/PKD-for-BERT-Model-Compression | BERT~examples~run_openai_gpt.py | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" OpenAI GPT model fine-tuning script.
Adapted from https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/train.py
It self adapted from https://github.com/openai/finetune-transformer-lm/blob/master/train.py
This script with default values fine-tunes and evaluate a pretrained OpenAI GPT on the RocStories dataset:
python run_openai_gpt.py \
--model_name openai-gpt \
--do_train \
--do_eval \
--train_dataset $ROC_STORIES_DIR/cloze_test_val__spring2016\ -\ cloze_test_ALL_val.csv \
--eval_dataset $ROC_STORIES_DIR/cloze_test_test__spring2016\ -\ cloze_test_ALL_test.csv \
--output_dir ../log \
--train_batch_size 16 \
"""
import argparse
import os
import csv
import random
import logging
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from pytorch_pretrained_bert import OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, OpenAIAdam, cached_path
ROCSTORIES_URL = "https://s3.amazonaws.com/datasets.huggingface.co/ROCStories.tar.gz"
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def load_rocstories_dataset(dataset_path):
""" Output a list of tuples(story, 1st continuation, 2nd continuation, label) """
with open(dataset_path, encoding='utf_8') as f:
f = csv.reader(f)
output = []
next(f) # skip the first line
for line in tqdm(f):
output.append((' '.join(line[1:5]), line[5], line[6], int(line[-1])-1))
return output
def pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token):
""" Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label)
To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation:
input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]
"""
tensor_datasets = []
for dataset in encoded_datasets:
n_batch = len(dataset)
input_ids = np.zeros((n_batch, 2, input_len), dtype=np.int64)
mc_token_ids = np.zeros((n_batch, 2), dtype=np.int64)
lm_labels = np.full((n_batch, 2, input_len), fill_value=-1, dtype=np.int64)
mc_labels = np.zeros((n_batch,), dtype=np.int64)
for i, (story, cont1, cont2, mc_label), in enumerate(dataset):
with_cont1 = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]
with_cont2 = [start_token] + story[:cap_length] + [delimiter_token] + cont2[:cap_length] + [clf_token]
input_ids[i, 0, :len(with_cont1)] = with_cont1
input_ids[i, 1, :len(with_cont2)] = with_cont2
mc_token_ids[i, 0] = len(with_cont1) - 1
mc_token_ids[i, 1] = len(with_cont2) - 1
lm_labels[i, 0, :len(with_cont1)-1] = with_cont1[1:]
lm_labels[i, 1, :len(with_cont2)-1] = with_cont2[1:]
mc_labels[i] = mc_label
all_inputs = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs))
return tensor_datasets
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', type=str, default='openai-gpt',
help='pretrained model name')
parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument('--train_dataset', type=str, default='')
parser.add_argument('--eval_dataset', type=str, default='')
parser.add_argument('--seed', type=int, default=42)
parser.add_argument('--num_train_epochs', type=int, default=3)
parser.add_argument('--train_batch_size', type=int, default=8)
parser.add_argument('--eval_batch_size', type=int, default=16)
parser.add_argument('--max_grad_norm', type=int, default=1)
parser.add_argument('--learning_rate', type=float, default=6.25e-5)
parser.add_argument('--warmup_proportion', type=float, default=0.002)
parser.add_argument('--lr_schedule', type=str, default='warmup_linear')
parser.add_argument('--weight_decay', type=float, default=0.01)
parser.add_argument('--lm_coef', type=float, default=0.9)
parser.add_argument('--n_valid', type=int, default=374)
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
args = parser.parse_args()
print(args)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(device, n_gpu))
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
special_tokens = ['_start_', '_delimiter_', '_classify_']
tokenizer = OpenAIGPTTokenizer.from_pretrained(args.model_name, special_tokens=special_tokens)
special_tokens_ids = list(tokenizer.convert_tokens_to_ids(token) for token in special_tokens)
model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name, num_special_tokens=len(special_tokens))
model.to(device)
# Load and encode the datasets
if not args.train_dataset and not args.eval_dataset:
roc_stories = cached_path(ROCSTORIES_URL)
def tokenize_and_encode(obj):
""" Tokenize and encode a nested object """
if isinstance(obj, str):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
elif isinstance(obj, int):
return obj
return list(tokenize_and_encode(o) for o in obj)
logger.info("Encoding dataset...")
train_dataset = load_rocstories_dataset(args.train_dataset)
eval_dataset = load_rocstories_dataset(args.eval_dataset)
datasets = (train_dataset, eval_dataset)
encoded_datasets = tokenize_and_encode(datasets)
# Compute the max input length for the Transformer
max_length = model.config.n_positions // 2 - 2
input_length = max(len(story[:max_length]) + max(len(cont1[:max_length]), len(cont2[:max_length])) + 3 \
for dataset in encoded_datasets for story, cont1, cont2, _ in dataset)
input_length = min(input_length, model.config.n_positions) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
tensor_datasets = pre_process_datasets(encoded_datasets, input_length, max_length, *special_tokens_ids)
train_tensor_dataset, eval_tensor_dataset = tensor_datasets[0], tensor_datasets[1]
train_data = TensorDataset(*train_tensor_dataset)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
eval_data = TensorDataset(*eval_tensor_dataset)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
num_train_optimization_steps = len(train_data) * args.num_train_epochs // args.train_batch_size
optimizer = OpenAIAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
max_grad_norm=args.max_grad_norm,
weight_decay=args.weight_decay,
t_total=num_train_optimization_steps)
if args.do_train:
nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_steps = 0
tqdm_bar = tqdm(train_dataloader, desc="Training")
for step, batch in enumerate(tqdm_bar):
batch = tuple(t.to(device) for t in batch)
input_ids, mc_token_ids, lm_labels, mc_labels = batch
losses = model(input_ids, mc_token_ids, lm_labels, mc_labels)
loss = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
optimizer.zero_grad()
tr_loss += loss.item()
exp_average_loss = loss.item() if exp_average_loss is None else 0.7*exp_average_loss+0.3*loss.item()
nb_tr_steps += 1
tqdm_bar.desc = "Training loss: {:.2e} lr: {:.2e}".format(exp_average_loss, optimizer.get_lr()[0])
# Save a trained model
if args.do_train:
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
config = model.config
torch.save(model_to_save.state_dict(), output_model_file)
# Load a trained model that you have fine-tuned
model_state_dict = torch.load(output_model_file)
model = OpenAIGPTDoubleHeadsModel(config)
model.load_state_dict(model_state_dict)
model.to(device)
if args.do_eval:
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(device) for t in batch)
input_ids, mc_token_ids, lm_labels, mc_labels = batch
with torch.no_grad():
_, mc_loss = model(input_ids, mc_token_ids, lm_labels, mc_labels)
_, mc_logits = model(input_ids, mc_token_ids)
mc_logits = mc_logits.detach().cpu().numpy()
mc_labels = mc_labels.to('cpu').numpy()
tmp_eval_accuracy = accuracy(mc_logits, mc_labels)
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
train_loss = tr_loss/nb_tr_steps if args.do_train else None
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy,
'train_loss': train_loss}
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == '__main__':
main()
| [] |
2024-01-10 | chubbyyb/AI_Game | story.py |
from openai import OpenAI
import json
import requests
STORIES_DIR = "stories/"
proompt = '''
Create a investigative story about one of the following:
"1. A mysterious disappearance in a small town.",
"2. Unexplained phenomena in a remote location.",
"3. Corporate corruption within a powerful tech company.",
"4. Ancient artifacts with mysterious powers resurfacing.",
"5. A secret society operating in plain sight.",
"6. Government experiments gone wrong.",
"7. Time travel anomalies affecting a community.",
"8. A renowned scientist's controversial discovery."
"9. Make your own!"
The json key of the clues will be the name of the door, e.g (front door), and the value will be the clue within that room.
The title may not be more than 16 characters long.
The story should be one json value
The characters can all be innocent, or all be guilty, or any combination of the two.
prompt will be a prompt for the AI to generate an image of the story.
Make sure its in a json file with the following format:
"title": "",
"story": [
"",
"",
"'"
],
"prompt": "",
"characters": {
"suspect": {
"name": ,
"alibi": ,
"confirmation": ",
"mood": ,
"innocent": true/false
},
"witness": {
"name": ,
"observation": ,
"description": ,
"mood": ,
"innocent": true/false
},
"npcs": [
{
"name": ,
"dialogue": ,
"mood": ,
"innocent": true/false
},
{
"name": "",
"dialogue": "",
"mood": ,
"innocent": true/false
}
]
},
"clues": {
"": "",
"": "",
"": "",
"": ""
}
'''
class Story:
def __init__(self):
self.api_key = "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" # This is the key for the openAI API
self.client = OpenAI(api_key=self.api_key) # This is the client for the openAI API
def make_story(self):
completion = self.client.chat.completions.create(
model="gpt-3.5-turbo-1106", # Using this model for more context
temperature=1.3, # Do not increase above 1.5 or you will get gobblde goob
messages=[{"role": "user", "content": proompt}]) # This is the prompt for the AI to generate the story
try: # This is to catch any errors
self.json_content = json.loads(completion.choices[0].message.content) # This is the json content of the story
self.generate_image(self.json_content["prompt"], self.json_content["title"]) # This generates an image of the story
return self.save_story() # This saves the story to a json file
except:
print("AI Generated story failed") # This is the error message
return 0 # This is the error code
def save_story(self):
with open(STORIES_DIR+self.json_content["title"]+".json", "w") as f: # This saves the story to a json file
json.dump(self.json_content, f, indent=4)
return STORIES_DIR+self.json_content["title"]+".json"
def get_story(self, title):
with open(STORIES_DIR+title+".json", "r") as f: # This gets the story from a json file
return json.load(f)
def generate_image(self, prompt, name):
image = self.client.images.generate( # This generates an image of the story
prompt=prompt,
n=1,
size='512x512'
)
with open(f"{STORIES_DIR+name}.png", "wb") as f: # This saves the image to a png file
f.write(requests.get(image.dict()["data"][0]["url"]).content)
| [
"\nCreate a investigative story about one of the following:\n \"1. A mysterious disappearance in a small town.\",\n \"2. Unexplained phenomena in a remote location.\",\n \"3. Corporate corruption within a powerful tech company.\",\n \"4. Ancient artifacts with mysterious powers resurfacing.\",\n \"5. A secret society operating in plain sight.\",\n \"6. Government experiments gone wrong.\",\n \"7. Time travel anomalies affecting a community.\",\n \"8. A renowned scientist's controversial discovery.\"\n \"9. Make your own!\"\n\nThe json key of the clues will be the name of the door, e.g (front door), and the value will be the clue within that room.\nThe title may not be more than 16 characters long.\nThe story should be one json value\nThe characters can all be innocent, or all be guilty, or any combination of the two.\nprompt will be a prompt for the AI to generate an image of the story.\n\nMake sure its in a json file with the following format:\n \"title\": \"\",\n \"story\": [\n \"\",\n \"\",\n \"'\"\n ],\n \"prompt\": \"\",\n \"characters\": {\n \"suspect\": {\n \"name\": ,\n \"alibi\": ,\n \"confirmation\": \",\n \"mood\": ,\n \"innocent\": true/false\n },\n \"witness\": {\n \"name\": ,\n \"observation\": ,\n \"description\": ,\n \"mood\": ,\n \"innocent\": true/false\n },\n \"npcs\": [\n {\n \"name\": ,\n \"dialogue\": ,\n \"mood\": ,\n \"innocent\": true/false\n },\n {\n \"name\": \"\",\n \"dialogue\": \"\",\n \"mood\": ,\n \"innocent\": true/false\n }\n ]\n },\n \"clues\": {\n \"\": \"\",\n \"\": \"\",\n \"\": \"\",\n \"\": \"\"\n }\n"
] |
2024-01-10 | prophecy-samples/gen-ai-chatbot-template | pipelines~chatbot_live~code~chatbot_live~graph~answer_question.py | from pyspark.sql import *
from pyspark.sql.functions import *
from pyspark.sql.types import *
from prophecy.utils import *
from prophecy.libs import typed_lit
from chatbot_live.config.ConfigStore import *
from chatbot_live.udfs.UDFs import *
def answer_question(spark: SparkSession, Aggregate_1: DataFrame) -> DataFrame:
from spark_ai.llms.openai import OpenAiLLM
from pyspark.dbutils import DBUtils
OpenAiLLM(api_key = DBUtils(spark).secrets.get(scope = "open_ai", key = "api_key")).register_udfs(spark = spark)
return Aggregate_1\
.withColumn("_context", col("content_chunk"))\
.withColumn("_query", col("input"))\
.withColumn(
"openai_answer",
expr(
"openai_answer_question(_context, _query, \" Answer the question based on the context below. \n\nContext:\n```\n{context}\n```\n\nQuestion: \n```\n{query}\n```\n\nAnswer:\n \")"
)
)\
.drop("_context", "_query")
| [] |
2024-01-10 | prophecy-samples/gen-ai-chatbot-template | pipelines~chatbot_batch~code~chatbot_batch~graph~answer_question.py | from pyspark.sql import *
from pyspark.sql.functions import *
from pyspark.sql.types import *
from prophecy.utils import *
from prophecy.libs import typed_lit
from chatbot_batch.config.ConfigStore import *
from chatbot_batch.udfs.UDFs import *
def answer_question(spark: SparkSession, Aggregate_1: DataFrame) -> DataFrame:
from spark_ai.llms.openai import OpenAiLLM
from pyspark.dbutils import DBUtils
OpenAiLLM(api_key = DBUtils(spark).secrets.get(scope = "open_ai", key = "token")).register_udfs(spark = spark)
return Aggregate_1\
.withColumn("_context", col("context"))\
.withColumn("_query", col("input"))\
.withColumn(
"openai_answer",
expr(
"openai_answer_question(_context, _query, \" Answer the question based on the context below. \n\nContext:\n```\n{context}\n```\n\nQuestion: \n```\n{query}\n```\n\nAnswer:\n \")"
)
)\
.drop("_context", "_query")
| [] |
2024-01-10 | KAJdev/Anya | scales~misc~character.py | import asyncio
import random
import time
from naff import InteractionContext, listen, Extension, slash_command, slash_option
from os import getenv
import openai
import models
from naff.api.events import MessageCreate
NAMES = [
'anya',
]
PROMPT = """
The following is a conversation with Anya who is a telepathic esper. Anya is a short young girl with fair skin and green eyes who is six years old. She is impressionable and loves spies.
Her father is named loid forger, a spy. and her mother is named yor forger, an assassin. She also has a dog named bond that can see the future.
{past}
{name}: {message}
anya:
"""
class Engine:
value = "text-davinci-003"
client = openai.Client(getenv('OPENAI_TOKEN'), default_engine=Engine)
class LimitedList(list):
"""
A list with a max amount of entries
"""
def __init__(self, length: int):
self.length = length
def append(self, item):
if len(self) >= self.length:
self.pop(0)
super().append(item)
class Interaction:
"""
An interaction between someone and the bot
"""
def __init__(self, name: str, message: str, response: str) -> None:
self.name = name
self.message = message
self.response = response
def __str__(self) -> str:
return f"{self.name}: {self.message}\nAnya: {self.response}"
class Memory:
"""
Anya's memory
"""
def __init__(self, length: int = 5) -> None:
self.data = {}
self.length = length
def get_past(self, user: int) -> str:
memories = self.data.get(user, LimitedList(self.length))
return "\n".join([str(m) for m in memories])
def remember(self, user: int, interaction: Interaction) -> None:
memories = self.data.get(user, LimitedList(self.length))
for memory in memories:
if interaction.response.lower() == memory.response.lower():
return # don't add duplicate responses
memories.append(interaction)
self.data[user] = memories
class Character(Extension):
def __init__(self, bot):
super().__init__()
self.memory = Memory()
self.past_messages = LimitedList(100)
@slash_command("train", description="train anya with a message")
@slash_option("prompt", "what is said to anya",
opt_type=3,
required=True
)
@slash_option("completion", "what should be said by anya",
opt_type=3,
required=True,
)
async def train(self, ctx: InteractionContext, prompt: str, completion: str):
"""
Train Anya with a message
"""
intactn = {
'prompt': prompt,
'completion': completion,
}
await self.bot.db._update("training_data", {'prompt': prompt}, {'$set': intactn}, upsert=True)
amount = await self.bot.db.db.training_data.count_documents({})
await ctx.send(f"Inserted interaction `#{amount}`:\n\n```json\n{intactn}```")
@slash_command("data", description="view the last few JSONL lines of training data")
async def data(self, ctx: InteractionContext):
last_few = await self.bot.db.db.training_data.find({}).sort('_id', -1).limit(10).to_list(length=10)
await ctx.send(f"Last few interactions:\n\n```json\n{last_few}```")
@listen()
async def on_message_create(self, event: MessageCreate):
message = event.message
if message.author.bot or len(message.content) > 1000:
return
guild_stuff: models.Guild = await self.bot.db.fetch_guild(message.guild.id)
if not guild_stuff.module_enabled(models.ModuleToggles.CHARACTER):
return
elif message.content.startswith("!memdump"):
await message.reply(f"```{self.memory.data}```"[:1999])
elif message.content.startswith("!forget"):
if message.author.id in self.memory.data:
del self.memory.data[message.author.id]
await message.reply("I forgot everything about you.")
elif message.content.startswith("!raw"):
t = time.time()
this_prompt = message.content[4:].strip("\n ")
if len(this_prompt) > 1000:
await message.reply("That prompt is too long.")
return
response = await client.complete(this_prompt, temperature=0.9, max_tokens=100, top_p=1, full_response=True)
choice = response["choices"][0]
text = choice["text"]#.strip("\n ")
await message.reply(f"`Finish Reason`: {choice['finish_reason']} **|** `Latency`: {(time.time()-t)*1000:.0f}ms```ansi\n{this_prompt}\u001b[37m{text}```")
# see if we should respond
elif (
f"<@{self.bot.user.id}>" in message.content or
f"<@!{self.bot.user.id}>" in message.content or
any(x.lower() in message.content.lower() for x in NAMES) or
(message.message_reference and message.message_reference.message_id in self.past_messages)
):
reading_Extension = 30
# these are conditions where the bot should respond more promptly, so active conversations can exist
if (
(message.message_reference and message.message_reference.message_id in self.past_messages) or
(f"<@{self.bot.user.id}>" in message.content or f"<@!{self.bot.user.id}>" in message.content) or
(message.author.id in self.memory.data)
):
reading_Extension = 3
await asyncio.sleep(random.random() * reading_Extension) # some reaction time and reading time
await message.channel.trigger_typing()
content = message.content.replace(f"<@{self.bot.user.id}>", "").replace(f"<@!{self.bot.user.id}>", "").strip()
self.bot.info("Message received: {}".format(content))
this_prompt = PROMPT.format_map({'name': message.author.username, 'message': content, 'past': self.memory.get_past(message.author.id)})
self.bot.info(f"Anya: {this_prompt}")
response = await client.complete(this_prompt, temperature=0.9, max_tokens=64, top_p=1)
response = response.strip("\n")
first_length = len(response)
response = response.split("\n")[0]
if first_length > len(response):
self.bot.error("cut off excess")
response = response.replace("\n", " ")
self.memory.remember(message.author.id, Interaction(message.author.username, content, response))
typing_time = len(response) * 0.05
while typing_time > 0:
reduction = min(typing_time, 9)
await message.channel.trigger_typing()
await asyncio.sleep(reduction)
typing_time -= reduction
msg = await message.reply(response)
self.past_messages.append(msg.id)
def setup(bot):
Character(bot) | [
"\nThe following is a conversation with Anya who is a telepathic esper. Anya is a short young girl with fair skin and green eyes who is six years old. She is impressionable and loves spies.\nHer father is named loid forger, a spy. and her mother is named yor forger, an assassin. She also has a dog named bond that can see the future.\n\n{past}\n{name}: {message}\nanya:\n",
"name",
"\n "
] |
2024-01-10 | clccclcc/vchat1 | vchat.py | #import streamlit as st
#import openai
import streamlit as st
# audiorecorder 패키지 추가
from audiorecorder import audiorecorder
# OpenAI 패키기 추가
import openai
# 파일 삭제를 위한 패키지 추가
import os
# 시간 정보를 위핸 패키지 추가
from datetime import datetime
# 오디오 array 비교를 위한 numpy 패키지 추가
import numpy as np
# TTS 패키기 추가
from gtts import gTTS
# 음원파일 재생을 위한 패키지 추가
import base64
# openai.api_key = "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"
# openai.api_key = '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' # 990911
def main_page():
st.set_page_config(page_title='음성비서 프로그램', layout="wide")
st.header("음성 비서")
st.markdown("---")
with st.expander("About",expanded=True):
st.write(
"""
- by clcc 2023.9.11.
- 음성->text-> GPT -> text -> 음성 구조
- Have a nice day!
"""
)
st.markdown("")
def left_page():
with st.sidebar:
openai.api_key=st.text_input(label="api key", placeholder="api key 입력", value="" , type="password")
st.markdown("---")
# model = st.radio(label="model", options=["gpt-3.5-turbo","gpt-4"])
st.session_state['model'] = st.radio(label="model", options=["gpt-3.5-turbo","gpt-4"])
st.markdown("---")
if st.button(label="초기화"):
print("초기화 button pressed")
ini_session()
def ini_session():
if "chat" not in st.session_state:
st.session_state['chat'] = []
if 'message'not in st.session_state:
st.session_state['messages'] = [{'role':'system',
'content':'You are a thoughtful assistant. Response to al input in 25 words and answer in korea.'}]
if 'check_audio' not in st.session_state:
st.session_state['check_audio'] = []
if 'model' not in st.session_state:
st.session_state['model'] = 'gpt-3.5-turbo'
if 'flag_start' not in st.session_state:
st.session_state['flag_start'] = False
# from audiorecorder import audiorecorder
# import numpy as np
def STT(audio):
# 파일 저장
# index = np.random.randint(0,10)
# filename=f'input{index}.mp3'
filename = 'input.mp3'
audio.export(filename)
# 음원 파일 열기
audio_file = open(filename, "rb")
#Whisper 모델을 활용해 텍스트 얻기
transcript = openai.Audio.transcribe("whisper-1", audio_file)
audio_file.close()
# 파일 삭제
os.remove(filename)
return transcript["text"]
def myrecode():
st.subheader('질문하기')
# 음성 녹음하기
audio = audiorecorder("클릭하여 녹음하기", "녹음 마치기")
if len(audio) > 0 and not np.array_equal(audio,st.session_state["check_audio"]):
# 음성 재생
# m1
# st.audio(audio.tobytes()) # error
# m2
audio.export('audio.mp3')
with open('audio.mp3','rb') as f:
data = f.read()
st.audio(data)
# 음원 파일에서 텍스트 추출
question = STT(audio)
# print('text:',question)
# 채팅을 시각화하기 위해 질문 내용 저장
now = datetime.now().strftime("%H:%M")
st.session_state["chat"] = st.session_state["chat"]+ [("user",now, question)]
# GPT 모델에 넣을 프롬프트를 위해 질문 내용 저장
st.session_state["messages"] = st.session_state["messages"]+ [{"role": "user", "content": question}]
# audio 버퍼 확인을 위해 현 시점 오디오 정보 저장
st.session_state["check_audio"] = audio
st.session_state['flag_start'] =True
# st.markdown("---")
# st.subheader("입력한 내용:")
st.write(question)
# st.markdown("---")
# st.write(st.session_state["chat"])
# st.write(st.session_state["messages"])
def check_bill_ex(response, wrate=1323.43,prompt_rate=0.0015, completion_rate=0.002):
prompt_tokens = response['usage']["prompt_tokens"]
completion_tokens= response['usage']["completion_tokens"]
total_bill = prompt_tokens * prompt_rate/1000 + completion_tokens * completion_rate/1000
total_won = total_bill * wrate
return total_won, total_bill ,prompt_tokens, completion_tokens
def ask_gpt(prompt, model):
response = openai.ChatCompletion.create(model=model, messages=prompt)
system_message = response["choices"][0]["message"]
return system_message["content"],response
def TTS(response):
# gTTS 를 활용하여 음성 파일 생성
# index = np.random.randint(0,10)
# filename = f"output{index}.mp3"
filename = 'optput.mp3'
tts = gTTS(text=response,lang="ko")
tts.save(filename)
# 음원 파일 자동 재성
with open(filename, "rb") as f:
data = f.read()
b64 = base64.b64encode(data).decode()
md = f"""
<audio autoplay="True">
<source src="data:audio/mp3;base64,{b64}" type="audio/mp3">
</audio>
"""
st.markdown(md,unsafe_allow_html=True,)
# 파일 삭제
os.remove(filename)
def myresponse():
st.subheader("답변 듣기")
if st.session_state['flag_start']:
#ChatGPT에게 답변 얻기
response ,res = ask_gpt(st.session_state["messages"], st.session_state['model'])
# st.write(response)
st.session_state['flag_start'] = False
won,bil,token_in,token_out = check_bill_ex(res)
st.write(f' {won}won ,{bil} $ , {token_in} token_in ,{token_out} token_out')
# st.write(f' {token_in} token_in ,{token_out} token_out')
st.markdown("---")
# GPT 모델에 넣을 프롬프트를 위해 답변 내용 저장
st.session_state["messages"] = st.session_state["messages"]+ [{"role": "system", "content": response}]
# 채팅 시각화를 위한 답변 내용 저장
now = datetime.now().strftime("%H:%M")
st.session_state["chat"] = st.session_state["chat"]+ [("bot",now, response)]
# 채팅 형식으로 시각화 하기
for sender, time, message in st.session_state["chat"]:
if sender == "bot": # bot
st.write(f'<div style="display:flex;align-items:center;"><div style="background-color:#007AFF;color:white;border-radius:12px;padding:8px 12px;margin-right:8px;">{message}</div><div style="font-size:0.8rem;color:gray;">{time}</div></div>', unsafe_allow_html=True)
st.write("")
else: # bot
print('sender:',sender)
st.write(f'<div style="display:flex;align-items:center;justify-content:flex-end;"><div style="background-color:lightgray;border-radius:12px;padding:8px 12px;margin-left:8px;">{message}</div><div style="font-size:0.8rem;color:gray;">{time}</div></div>', unsafe_allow_html=True)
st.write("")
# gTTS 를 활용하여 음성 파일 생성 및 재생
TTS(response)
def main():
# 음성비서 프로그램
ini_session()
main_page()
left_page()
col1,col2 = st.columns(2)
with col1:
myrecode()
with col2:
myresponse()
main() | [
"You are a thoughtful assistant. Response to al input in 25 words and answer in korea.",
"prompt_tokens"
] |
2024-01-10 | khuangaf/FakingFakeNews | grover~sample~encoder.py | """Byte pair encoding utilities
Some functions are adapted from OpenAI but with modifications
https://github.com/openai/gpt-2
"""
import os
import json
import regex as re
from functools import lru_cache
import tensorflow as tf
import random
import numpy as np
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
cs = bs[:]
n = 0
for b in range(2 ** 8):
if b not in bs:
bs.append(b)
cs.append(2 ** 8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class Encoder:
def __init__(self, encoder, bpe_merges, errors='replace'):
self.encoder = {k: v + 1 for k, v in encoder.items()}
self.encoder['<|padding|>'] = 0
self.padding = 0
del self.encoder['<|endoftext|>']
for special_token_type in ['domain', 'date', 'authors', 'title', 'article', 'summary']:
setattr(self, f'begin_{special_token_type}', len(self.encoder))
self.encoder[f'<|begin{special_token_type}|>'] = len(self.encoder)
setattr(self, f'end_{special_token_type}', len(self.encoder))
self.encoder[f'<|endof{special_token_type}|>'] = len(self.encoder)
# This will be used if we want to combine short articles.
self.reset_context = len(self.encoder)
self.encoder['<|resetcontext|>'] = len(self.encoder)
################################## END OF SPECIAL TOKENS TO ADD
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
# Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
return text
def __len__(self):
return len(self.encoder)
@property
def special_tokens_onehot(self):
""" Return the IDs of all special tokens"""
return [(self.decoder[i].startswith('<|') and self.decoder[i].endswith('|>')) for i in range(len(self))]
def get_encoder():
directory_name = os.path.dirname(__file__)
with open(os.path.join(directory_name, 'encoder.json'), 'r') as f:
encoder = json.load(f)
with open(os.path.join(directory_name, 'vocab.bpe'), 'r', encoding="utf-8") as f:
bpe_data = f.read()
bpe_merges = [tuple(merge_str.split()) for merge_str in bpe_data.split('\n')[1:-1]]
return Encoder(
encoder=encoder,
bpe_merges=bpe_merges,
)
##############################################################
# TURN SOMETHING INTO THE RIGHT FORMAT FOR AN EXAMPLE
##############################################################
def _tokenize_article_pieces(encoder, item):
"""
Turn the article into tokens
NOTE: in hindsight I kinda messed up here because the first token is always represented as a BPE continuation
rather than an initial token in its own right. whoops....
:param item: Contains things that need to be tokenized
fields are ['domain', 'date', 'authors', 'title', 'article', 'summary']
:return: dict
"""
article_pieces = {
'article': [encoder.begin_article] + encoder.encode(item['text']) + [encoder.end_article],
'domain': [encoder.begin_domain] + encoder.encode(item['domain']) + [encoder.end_domain],
'title': [encoder.begin_title] + encoder.encode(item['title']) + [encoder.end_title],
}
# 4/6: Attach the summary too, why the hell not
if item['summary'] and len(item['summary']) > 50:
article_pieces['summary'] = [encoder.begin_summary] + encoder.encode(item['summary']) + [encoder.end_summary]
# 5/6: date
date_split = item['publish_date'].split('-')
assert len(date_split) == 3
assert date_split[0].isdigit()
date_txt = ['January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December'][int(date_split[0]) - 1] + ' {}, {}'.format(
date_split[1], date_split[2])
article_pieces['date'] = [encoder.begin_date] + encoder.encode(date_txt) + [encoder.end_date]
# 6/6: authors
authors = ', '.join(item['authors'])
if len(authors) > 5:
article_pieces['authors'] = [encoder.begin_authors] + encoder.encode(authors) + [encoder.end_authors]
return article_pieces
def _cut_tokens_to_add_stuff(tokens, stuff_to_add, desired_size, padding_token):
"""
The idea behind this function is to take away tokens from `tokens' such that tokens[:LENGTH] + stuff_to_add becomes
exactly at the right size (desired_size).
:param tokens:
:param stuff_to_add:
:param desired_size:
:return:
"""
if len(tokens) >= desired_size:
return tokens
# no way we can add this stuff
if len(stuff_to_add) >= desired_size:
return tokens
if (len(tokens) + len(stuff_to_add)) <= desired_size:
return tokens + stuff_to_add
# Otherwise we'll have to actually cut
tokens = tokens[:(desired_size - len(stuff_to_add) - 1)]
tokens.append(padding_token)
tokens.extend(stuff_to_add)
return tokens
def tokenize_for_grover_training(encoder, item, desired_size=1024, unconditional_prob=0.35, metadata_dropout_prob=0.1,
cut_prob=0.2):
"""
Not only will we tokenize an item with a BPE encoder, but we'll also put it in a nice format for language modeling.
The goal is to MINIMIZE PADDING. If we don't fill up the desired size of 1024 tokens then we're wasting compute.
The canonical order is
DOMAIN DATE AUTHORS TITLE ARTICLE SUMMARY
:param encoder:
:param item: Contains things like
{"url": "https://www.advocate.com/node/1010911",
"timestamp": "20180118211607",
"url_used": "https://web.archive.org/web/20180118211607id_/https://www.advocate.com/node/1010911",
"domain": "advocate.com",
"title": "Report: One-Third of Trump's Judicial Picks Are Anti-LGBT",
"text": ....
"summary": ....
"authors": list
"publish_date": ...
}
:param desired_size: the goal for how long the span will be
:param unconditional_prob: The probability that we will generate JUST THE TEXT first.
:param metadata_dropout_prob: The probability that we will drop out each item of metadata
:param cut_prob: The probability that, if we're already over the desired size, we'll cut the article and start
predicting metadata before the desired_size window ends.
:return:
"""
# Get all the bits and pieces
article_pieces = _tokenize_article_pieces(encoder, item)
canonical_metadata_order = ['domain', 'date', 'authors', 'title']
# unconditional_prob is probability we only generate the text first, without any metadata
switch = random.random()
if switch < unconditional_prob:
assignments = {'article': 'a'}
chunk_a = article_pieces.pop('article')
chunk_b = []
for x in canonical_metadata_order + ['summary']:
if random.random() > metadata_dropout_prob:
chunk_b.extend(article_pieces.pop(x, []))
assignments[x] = 'b'
elif switch < 0.5:
# Put everything in chunk_a, without dropout
assignments = {}
chunk_a = []
chunk_b = []
for x in canonical_metadata_order + ['article', 'summary']:
chunk_a.extend(article_pieces.pop(x, []))
assignments[x] = 'a'
else:
assignments = {}
chunk_a = []
chunk_b = []
for k in canonical_metadata_order + ['article', 'summary']:
if random.random() < metadata_dropout_prob and k not in ('article', 'title'):
pass
elif random.random() < 0.5:
if k != 'summary':
chunk_a.extend(article_pieces.pop(k, []))
assignments[k] = 'a'
else:
chunk_b.extend(article_pieces.pop(k, []))
assignments[k] = 'b'
if (len(chunk_a) + len(chunk_b)) <= desired_size:
return chunk_a + chunk_b
if (assignments.get('article', '') == 'a') and (len(chunk_b) > 0) and (random.random() < cut_prob):
return _cut_tokens_to_add_stuff(chunk_a, chunk_b, desired_size, encoder.padding)
tokens = chunk_a + chunk_b
return tokens
def detokenize(encoder, tokens):
return encoder.decode(tokens)
#######################################
def create_int_feature(values):
feature = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return feature
def sliding_window(article, max_seq_length, pad_token):
"""
Randomly sample some spans. It's a simple approximation of sliding window
:param tokens:
:param max_seq_length:
:return:
"""
# if it's shorter, no need for this
if len(article['input_ids']) <= max_seq_length:
amount_to_pad = max_seq_length - len(article['input_ids'])
article['input_ids'].extend([pad_token] * amount_to_pad)
yield article
return
num_spans = len(article['input_ids']) - max_seq_length + 1
weights = np.ones(num_spans, dtype=np.float32)
# weights[0] = max_seq_length
weights /= weights.sum()
num_to_yield = int(0.5 + len(article['input_ids']) / max_seq_length)
starts = np.random.choice(num_spans, size=num_to_yield, replace=False, p=weights)
input_ids = article.pop('input_ids')
for i in starts.tolist():
article['input_ids'] = input_ids[i:(i + max_seq_length)]
yield article
def format_context(encoder, news_article, target):
"""
Generates a news article given some partial information
:param news_article: Contains context
:param target: What we want to get an answer for.
:return:
"""
canonical_metadata_order = ['domain', 'date', 'authors', 'title', 'article']
tokens = []
for metadata_category in canonical_metadata_order:
metadata = news_article.get(metadata_category, '').strip()
# This MIGHT BE needed because I think during training time we never saw empty articles
# if metadata or ((metadata_category == 'article') and target != 'article'):
if (metadata_category == 'article') and (target != 'article'):
metadata = news_article.get('title', '') # Just copy from the title maybe?
if metadata:
tokens.append(encoder.__dict__[f'begin_{metadata_category}'])
tokens.extend(encoder.encode(metadata))
tokens.append(encoder.__dict__[f'end_{metadata_category}'])
assert target in (canonical_metadata_order + ['summary'])
tokens.append(encoder.__dict__[f'begin_{target}'])
return tokens
def extract_generated_target(output_tokens, encoder, target):
"""
Given some tokens that were generated, extract the target
:param output_tokens: [num_tokens] thing that was generated
:param encoder: how they were encoded
:param target: the piece of metadata we wanted to generate!
:return:
"""
# Filter out first instance of start token
assert output_tokens.ndim == 1
start_tokens = output_tokens == encoder.__dict__[f'begin_{target}']
if np.any(start_tokens):
start_ind = np.argmax(start_tokens) + 1
else:
start_ind = 0
end_tokens = output_tokens == encoder.__dict__[f'end_{target}']
if np.any(end_tokens):
end_ind = np.argmax(end_tokens)
else:
end_ind = output_tokens.shape[0]
return {
'extraction': encoder.decode(output_tokens[start_ind:end_ind]),
'start_ind': start_ind,
'end_ind': end_ind,
}
if __name__ == '__main__':
encoder = get_encoder()
print("VOCAB SIZE IS {}".format(len(encoder.encoder)))
| [] |
2024-01-10 | Chugh3012/recipe-maker | backend~app~services~recipe_service.py | from openai import OpenAI
import json
from backend.app.models.recipe_model import Recipe
openai_client = OpenAI()
def generate_recipe(ingredients) -> Recipe:
try:
response = openai_client.chat.completions.create(
model="gpt-4-1106-preview",
response_format={ "type": "json_object" },
messages=[
{"role": "system", "content": "You are a helpful assistant which generates recipes given ingredients in JSON format. It should contain recipe_name, servings, ingredients and instructions. Make sure there is no nested json inside ingredients."},
{"role": "user", "content": "Ingredients: " + ingredients},
]
)
recipe_json = json.loads(response.choices[0].message.content) # type: ignore
recipe = Recipe(recipe_name=recipe_json['recipe_name'], servings=recipe_json['servings'], ingredients=recipe_json['ingredients'], instructions=recipe_json['instructions'])
return recipe
except Exception as e:
raise e | [
"Ingredients: PLACEHOLDER",
"You are a helpful assistant which generates recipes given ingredients in JSON format. It should contain recipe_name, servings, ingredients and instructions. Make sure there is no nested json inside ingredients."
] |
2024-01-10 | dglalperen/AutoDevAI | src~utils~load_java_documents_from_repo.py | from langchain.document_loaders.generic import GenericLoader
from langchain.text_splitter import Language
from langchain.document_loaders.parsers import LanguageParser
import os
def find_java_directories(repo_path):
"""
Recursively find directories containing Java files.
"""
java_directories = set()
for root, dirs, files in os.walk(repo_path):
if any(file.endswith(".java") for file in files):
java_directories.add(root)
return list(java_directories)
def remove_duplicate_documents(documents):
"""
Remove duplicate documents based on their source file path.
"""
unique_docs = {}
for doc in documents:
source_path = doc.metadata.get('source')
if source_path and source_path not in unique_docs:
unique_docs[source_path] = doc
return list(unique_docs.values())
def count_java_files(directory):
"""
Count the number of Java files in a given directory.
"""
return sum(1 for file in os.listdir(directory) if file.endswith(".java"))
def load_java_documents_from_repo(repo_path):
"""
Load all Java documents from the specified repository path.
"""
java_directories = find_java_directories(repo_path)
documents = []
for java_dir in java_directories:
loader = GenericLoader.from_filesystem(
path=java_dir,
glob="**/*.java",
suffixes=[".java"],
parser=LanguageParser(language=Language.JAVA, parser_threshold=500)
)
documents.extend(loader.load())
return remove_duplicate_documents(documents)
def main():
# Replace with the actual repository path after cloning
repo_path = "/path/to/cloned/repository"
documents = load_java_documents_from_repo(repo_path)
print(f"Total number of unique documents: {len(documents)}")
for doc in documents:
print(f"Document Source: {doc.metadata['source']}")
if __name__ == "__main__":
main()
| [] |
2024-01-10 | dglalperen/AutoDevAI | src~utils~setup_qa_retriever.py | import dotenv
from langchain.text_splitter import Language
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.chains import ConversationalRetrievalChain
from langchain.memory import ConversationSummaryMemory
from langchain.chat_models import ChatOpenAI
from utils.load_java_documents_from_repo import load_java_documents_from_repo
dotenv.load_dotenv()
def setup_qa_retriever(repo_path, model='gpt-4'):
"""
Set up the QA retriever with documents from a given Java repository.
Parameters:
- repo_path (str): Path to the repository containing Java files.
- model (str): The GPT model to be used.
Returns:
- qa (ConversationalRetrievalChain): The QA retrieval chain object.
"""
# Load all java files from repo
documents = load_java_documents_from_repo(repo_path)
print(f"Number of documents: {len(documents)}")
# Split documents
splitter = RecursiveCharacterTextSplitter.from_language(
language=Language.JAVA, chunk_size=2000, chunk_overlap=200
)
texts = splitter.split_documents(documents=documents)
print(f"Number of chunks: {len(texts)}")
# Initialize vector database
db = Chroma.from_documents(texts, OpenAIEmbeddings(disallowed_special=()))
# Set up retriever
retriever = db.as_retriever(search_types=["mmr"], search_kwargs={"k": 8})
# Initialize language model for QA retrieval
llm = ChatOpenAI(model=model, temperature=0.2)
memory = ConversationSummaryMemory(llm=llm, memory_key="chat_history", return_messages=True)
qa = ConversationalRetrievalChain.from_llm(llm, retriever=retriever, memory=memory)
return qa
| [] |
2024-01-10 | dglalperen/AutoDevAI | src~utils~find_files_test.py | from langchain.document_loaders.generic import GenericLoader
from langchain.text_splitter import Language
from langchain.document_loaders.parsers import LanguageParser
import os
#repo_path = "/Users/dglalperen/Desktop/Uni/Project-2/Repos/java2022-kodlamaio"
#loader_path = f"{repo_path}/src/main/java/kodlama/io/rentacar"
def find_java_directories(repo_path):
"""
Recursively find directories containing Java files.
"""
java_directories = set()
for root, dirs, files in os.walk(repo_path):
if any(file.endswith(".java") for file in files):
java_directories.add(root)
return list(java_directories)
def remove_duplicate_documents(documents):
"""
Remove duplicate documents based on their source file path.
"""
unique_docs = {}
for doc in documents:
source_path = doc.metadata.get('source')
if source_path and source_path not in unique_docs:
unique_docs[source_path] = doc
return list(unique_docs.values())
def count_java_files(directory):
"""
Count the number of Java files in a given directory.
"""
return sum(1 for file in os.listdir(directory) if file.endswith(".java"))
def main():
repo_path = "/Users/dglalperen/Desktop/Uni/Project-2/Repos/java2022-kodlamaio"
java_directories = find_java_directories(repo_path)
print("Java directories:", java_directories)
documents = []
for java_dir in java_directories:
java_file_count = count_java_files(java_dir)
print(f"Processing {java_dir} with {java_file_count} Java files...")
loader = GenericLoader.from_filesystem(
path=java_dir,
glob="**/*.java",
suffixes=[".java"],
parser=LanguageParser(language=Language.JAVA, parser_threshold=500)
)
loaded_documents = loader.load()
print(f"Loaded {len(loaded_documents)} documents from {java_dir}")
documents.extend(loaded_documents)
documents = remove_duplicate_documents(documents)
print(f"Total number of documents: {len(documents)}")
for doc in documents:
print(f"Document Source: {doc.metadata['source']}")
if __name__ == "__main__":
main()
| [] |
2024-01-10 | hiropppe/topic-model | cytm~util.py | import logging
import numpy as np
# np.seterr(all="raise")
from scipy.special import gammaln
from gensim import matutils
from gensim.corpora.dictionary import Dictionary
from gensim.models.coherencemodel import CoherenceModel
from itertools import combinations
from tqdm import tqdm
def perplexity(L, n_kw, n_k, n_dk, n_d, alpha, beta):
likelihood = polyad(n_dk, n_d, alpha) + polyaw(n_kw, n_k, beta)
return np.exp(-likelihood/L)
def polyad(n_dk, n_d, alpha):
N = n_dk.shape[0]
K = n_dk.shape[1]
likelihood = np.sum(gammaln(K * alpha) - gammaln(K * alpha + n_d))
for n in range(N):
likelihood += np.sum(gammaln(n_dk[n, :] + alpha) - gammaln(alpha))
return likelihood
def polyaw(n_kw, n_k, beta):
K = n_kw.shape[0]
V = n_kw.shape[1]
likelihood = np.sum(gammaln(V * beta) - gammaln(V * beta + n_k))
for k in range(K):
likelihood += np.sum(gammaln(n_kw[k, :] + beta) - gammaln(beta))
return likelihood
def get_coherence_model(W, n_kw, top_words, coherence_model, test_texts=None, corpus=None, coo_matrix=None, coo_word2id=None, wv=None, verbose=False):
if coo_matrix is not None:
logging.info("Initializing PMI Coherence Model...")
model = PMICoherence(coo_matrix, coo_word2id, W, n_kw, topn=top_words)
elif wv is not None:
logging.info("Initialize Word Embedding Coherence Model...")
model = EmbeddingCoherence(wv, W, n_kw, topn=top_words)
else:
logging.info(f"Initializing {coherence_model} Coherence Model...")
dictionary = Dictionary.from_documents(corpus)
if test_texts is not None:
model = GensimCoherenceModel(coherence_model, test_texts, None, dictionary, W, n_kw, topn=top_words, verbose=verbose)
else:
bow_corpus = [dictionary.doc2bow(doc) for doc in corpus]
model = GensimCoherenceModel(coherence_model, None, bow_corpus, dictionary, W, n_kw, topn=top_words, verbose=verbose)
return model
class GensimCoherenceModel():
def __init__(self, model, texts, corpus, dictionary, W, n_kw, topn=20, verbose=False):
self.model = model
self.texts = texts
self.corpus = corpus
self.dictionary = dictionary
self.W = W
self.n_kw = n_kw
self.topn = topn
self.K = len(n_kw)
self.verose = verbose
def get_topics(self):
topics = []
for k in range(self.K):
topn_indices = matutils.argsort(self.n_kw[k], topn=self.topn, reverse=True)
topics.append([self.W[w] for w in topn_indices])
return topics
def score(self):
topics = self.get_topics()
if self.model == 'u_mass':
cm = CoherenceModel(topics=topics,
corpus=self.corpus, dictionary=self.dictionary, coherence=self.model)
else:
cm = CoherenceModel(topics=topics,
texts=self.texts, dictionary=self.dictionary, coherence=self.model)
if self.verose:
coherences = cm.get_coherence_per_topic()
for index, topic in enumerate(topics):
print(str(index) + ':' + str(coherences[index]) + ':' + ','.join(topic))
return cm.get_coherence()
class EmbeddingCoherence():
def __init__(self, wv, W, n_kw, topn=20):
self.wv = wv
self.W = W
self.n_kw = n_kw
self.topn = topn
self.K = len(n_kw)
def score(self):
scores = []
for k in range(self.K):
topn_indices = matutils.argsort(self.n_kw[k], topn=self.topn, reverse=True)
for x, y in combinations(topn_indices, 2):
w_x, w_y = self.W[x], self.W[y]
if w_x in self.wv and w_y in self.wv:
scores.append(self.wv.similarity(w_x, w_y))
return np.mean(scores)
class PMICoherence():
def __init__(self, M, word2id, W, n_kw, eps=1e-08, topn=20):
self.M = M
self.M.setdiag(0)
self.word2id = word2id
self.W = W
self.n_kw = n_kw
self.eps = eps
self.topn = topn
self.K = len(n_kw)
self.N = np.sum(M)
V = len(W)
self.n_w = np.zeros((V), dtype=np.int32)
for i in tqdm(range(V)):
if W[i] in word2id:
self.n_w[i] = self.M[:, word2id[W[i]]].sum()
else:
self.n_w[i] = 0
def pmi(self, x, y, w_x, w_y):
ix = self.word2id[w_x]
iy = self.word2id[w_y]
X = self.n_w[x]
Y = self.n_w[y]
XY = self.M[ix, iy]
if XY == 0 or X == 0 or Y == 0:
pmi = 0
else:
# pmi = np.log2(XY*N/(X*Y+self.eps))/(-np.log(XY/self.N) + self.eps)
p_xy = XY/self.N
p_x = X/self.N
p_y = Y/self.N
pmi = np.log2(p_xy/(p_x*p_y+self.eps))/(-np.log(p_xy) + self.eps)
return pmi
def score(self):
scores = []
for k in range(self.K):
topn_indices = matutils.argsort(self.n_kw[k], topn=self.topn, reverse=True)
for x, y in combinations(topn_indices, 2):
w_x, w_y = self.W[x], self.W[y]
if w_x in self.word2id and w_y in self.word2id:
scores.append(self.pmi(x, y, w_x, w_y))
return np.mean(scores)
| [] |
2024-01-10 | nlarusstone/langchain | tests~unit_tests~test_input.py | """Test input manipulating logic."""
import sys
from io import StringIO
from langchain.input import ChainedInput, get_color_mapping
def test_chained_input_not_verbose() -> None:
"""Test chained input logic."""
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
chained_input = ChainedInput("foo")
sys.stdout = old_stdout
output = mystdout.getvalue()
assert output == ""
assert chained_input.input == "foo"
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
chained_input.add("bar")
sys.stdout = old_stdout
output = mystdout.getvalue()
assert output == ""
assert chained_input.input == "foobar"
def test_chained_input_verbose() -> None:
"""Test chained input logic, making sure verbose doesn't mess it up."""
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
chained_input = ChainedInput("foo", verbose=True)
sys.stdout = old_stdout
output = mystdout.getvalue()
assert output == "foo"
assert chained_input.input == "foo"
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
chained_input.add("bar")
sys.stdout = old_stdout
output = mystdout.getvalue()
assert output == "bar"
assert chained_input.input == "foobar"
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
chained_input.add("baz", color="blue")
sys.stdout = old_stdout
output = mystdout.getvalue()
assert output == "\x1b[48;5;51mbaz\x1b[0m"
assert chained_input.input == "foobarbaz"
def test_get_color_mapping() -> None:
"""Test getting of color mapping."""
# Test on few inputs.
items = ["foo", "bar"]
output = get_color_mapping(items)
expected_output = {"foo": "blue", "bar": "yellow"}
assert output == expected_output
# Test on a lot of inputs.
items = [f"foo-{i}" for i in range(20)]
output = get_color_mapping(items)
assert len(output) == 20
def test_get_color_mapping_excluded_colors() -> None:
"""Test getting of color mapping with excluded colors."""
items = ["foo", "bar"]
output = get_color_mapping(items, excluded_colors=["blue"])
expected_output = {"foo": "yellow", "bar": "pink"}
assert output == expected_output
| [] |
2024-01-10 | nlarusstone/langchain | langchain~chains~mapreduce.py | """Map-reduce chain.
Splits up a document, sends the smaller parts to the LLM with one prompt,
then combines the results with another one.
"""
from typing import Dict, List
from pydantic import BaseModel, Extra
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.llms.base import LLM
from langchain.prompts.base import BasePrompt
from langchain.text_splitter import TextSplitter
class MapReduceChain(Chain, BaseModel):
"""Map-reduce chain."""
map_llm: LLMChain
"""LLM wrapper to use for the map step."""
reduce_llm: LLMChain
"""LLM wrapper to use for the reduce step."""
text_splitter: TextSplitter
"""Text splitter to use."""
input_key: str = "input_text" #: :meta private:
output_key: str = "output_text" #: :meta private:
@classmethod
def from_params(
cls, llm: LLM, prompt: BasePrompt, text_splitter: TextSplitter
) -> "MapReduceChain":
"""Construct a map-reduce chain that uses the chain for map and reduce."""
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(map_llm=llm_chain, reduce_llm=llm_chain, text_splitter=text_splitter)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Expect input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return output key.
:meta private:
"""
return [self.output_key]
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
# Split the larger text into smaller chunks.
docs = self.text_splitter.split_text(inputs[self.input_key])
# Now that we have the chunks, we send them to the LLM and track results.
# This is the "map" part.
summaries = []
for d in docs:
inputs = {self.map_llm.prompt.input_variables[0]: d}
res = self.map_llm.predict(**inputs)
summaries.append(res)
# We then need to combine these individual parts into one.
# This is the reduce part.
summary_str = "\n".join(summaries)
inputs = {self.reduce_llm.prompt.input_variables[0]: summary_str}
output = self.reduce_llm.predict(**inputs)
return {self.output_key: output}
| [] |
2024-01-10 | ruoccofabrizio/azure-open-ai-embeddings-qna | code~utilities~pgvector.py | import enum
import logging
import uuid
from typing import Any, Dict, Iterable, List, Optional, Tuple
import sqlalchemy
from sqlalchemy import delete
from pgvector.sqlalchemy import Vector
from sqlalchemy.dialects.postgresql import JSON, UUID
from sqlalchemy.orm import Mapped, Session, declarative_base, relationship
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.base import VectorStore
Base = declarative_base() # type: Any
ADA_TOKEN_COUNT = 1536
_LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain"
class BaseModel(Base):
__abstract__ = True
uuid = sqlalchemy.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
class CollectionStore(BaseModel):
__tablename__ = "langchain_pg_collection"
name = sqlalchemy.Column(sqlalchemy.String)
cmetadata = sqlalchemy.Column(JSON)
embeddings = relationship(
"EmbeddingStore",
back_populates="collection",
passive_deletes=True,
)
@classmethod
def get_by_name(cls, session: Session, name: str) -> Optional["CollectionStore"]:
return session.query(cls).filter(cls.name == name).first()
@classmethod
def get_or_create(
cls,
session: Session,
name: str,
cmetadata: Optional[dict] = None,
) -> Tuple["CollectionStore", bool]:
"""
Get or create a collection.
Returns [Collection, bool] where the bool is True if the collection was created.
"""
created = False
collection = cls.get_by_name(session, name)
if collection:
return collection, created
collection = cls(name=name, cmetadata=cmetadata)
session.add(collection)
session.commit()
created = True
return collection, created
class EmbeddingStore(BaseModel):
__tablename__ = "langchain_pg_embedding"
collection_id: Mapped[UUID] = sqlalchemy.Column(
UUID(as_uuid=True),
sqlalchemy.ForeignKey(
f"{CollectionStore.__tablename__}.uuid",
ondelete="CASCADE",
),
)
collection = relationship(CollectionStore, back_populates="embeddings")
embedding: Vector = sqlalchemy.Column(Vector(ADA_TOKEN_COUNT))
document = sqlalchemy.Column(sqlalchemy.String, nullable=True)
cmetadata = sqlalchemy.Column(JSON, nullable=True)
# custom_id : any user defined id
custom_id = sqlalchemy.Column(sqlalchemy.String, nullable=True)
class QueryResult:
EmbeddingStore: EmbeddingStore
distance: float
class DistanceStrategy(str, enum.Enum):
EUCLIDEAN = EmbeddingStore.embedding.l2_distance
COSINE = EmbeddingStore.embedding.cosine_distance
MAX_INNER_PRODUCT = EmbeddingStore.embedding.max_inner_product
DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.EUCLIDEAN
class PGVectorExtended(VectorStore):
"""
VectorStore implementation using Postgres and pgvector.
- `connection_string` is a postgres connection string.
- `embedding_function` any embedding function implementing
`langchain.embeddings.base.Embeddings` interface.
- `collection_name` is the name of the collection to use. (default: langchain)
- NOTE: This is not the name of the table, but the name of the collection.
The tables will be created when initializing the store (if not exists)
So, make sure the user has the right permissions to create tables.
- `distance_strategy` is the distance strategy to use. (default: EUCLIDEAN)
- `EUCLIDEAN` is the euclidean distance.
- `COSINE` is the cosine distance.
- `pre_delete_collection` if True, will delete the collection if it exists.
(default: False)
- Useful for testing.
"""
def __init__(
self,
connection_string: str,
embedding_function: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
collection_metadata: Optional[dict] = None,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
pre_delete_collection: bool = False,
logger: Optional[logging.Logger]= None,
engine_args: Optional[dict[str, Any]] = None,
) -> None:
self.connection_string = connection_string
self.embedding_function = embedding_function
self.collection_name = collection_name
self.collection_metadata = collection_metadata
self.distance_strategy = distance_strategy
self.pre_delete_collection = pre_delete_collection
self.logger = logger or logging.getLogger(__name__)
self.engine_args = engine_args or {}
self._engine = self.connect()
# self._conn = self.connect()
# self.__post_init__()
self.CollectionStore = CollectionStore
self.EmbeddingStore = EmbeddingStore
def __post_init__(
self,
) -> None:
self.create_vector_extension()
self.create_tables_if_not_exists()
self.create_collection()
def connect(self) -> sqlalchemy.engine:
engine = sqlalchemy.create_engine(self.connection_string, **self.engine_args)
return engine
def create_vector_extension(self) -> None:
try:
with Session(self._engine) as session:
# The advisor lock fixes issue arising from concurrent
# creation of the vector extension.
# https://github.com/langchain-ai/langchain/issues/12933
# For more information see:
# https://www.postgresql.org/docs/16/explicit-locking.html#ADVISORY-LOCKS
statement = sqlalchemy.text(
"BEGIN;"
"SELECT pg_advisory_xact_lock(1573678846307946496);"
"CREATE EXTENSION IF NOT EXISTS vector;"
"COMMIT;"
)
session.execute(statement)
session.commit()
except Exception as e:
raise Exception(f"Failed to create vector extension: {e}") from e
def create_tables_if_not_exists(self) -> None:
with self._engine.begin():
Base.metadata.create_all(self._engine)
def drop_tables(self) -> None:
with self._engine.begin():
Base.metadata.drop_all(self._engine)
def create_collection(self) -> None:
if self.pre_delete_collection:
self.delete_collection()
with Session(self._engine) as session:
CollectionStore.get_or_create(
session, self.collection_name, cmetadata=self.collection_metadata
)
def delete_collection(self) -> None:
self.logger.debug("Trying to delete collection")
with Session(self._engine) as session:
collection = self.get_collection(session)
if not collection:
self.logger.error("Collection not found")
return
session.delete(collection)
session.commit()
def get_collection(self, session: Session) -> Optional["CollectionStore"]:
return CollectionStore.get_by_name(session, self.collection_name)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
embeddings = self.embedding_function.embed_documents(list(texts))
if not metadatas:
metadatas = [{} for _ in texts]
with Session(self._engine) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError("Collection not found")
for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids):
embedding_store = EmbeddingStore(
embedding=embedding,
document=text,
cmetadata=metadata,
custom_id=id,
)
collection.embeddings.append(embedding_store)
session.add(embedding_store)
session.commit()
return ids
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with PGVector with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding_function.embed_query(text=query)
return self.similarity_search_by_vector(
embedding=embedding,
k=k,
filter=filter,
)
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each
"""
embedding = self.embedding_function.embed_query(query)
docs = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return docs
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
with Session(self._engine) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError("Collection not found")
filter_by = EmbeddingStore.collection_id == collection.uuid
if filter is not None:
filter_clauses = []
for key, value in filter.items():
filter_by_metadata = EmbeddingStore.cmetadata[key].astext == str(value)
filter_clauses.append(filter_by_metadata)
filter_by = sqlalchemy.and_(filter_by, *filter_clauses)
results: List[QueryResult] = (
session.query(
EmbeddingStore,
self.distance_strategy(embedding).label("distance"), # type: ignore
)
.filter(filter_by)
.order_by(sqlalchemy.asc("distance"))
.join(
CollectionStore,
EmbeddingStore.collection_id == CollectionStore.uuid,
)
.limit(k)
.all()
)
docs = [
(
Document(
page_content=result.EmbeddingStore.document,
metadata=result.EmbeddingStore.cmetadata,
),
result.distance if self.embedding_function is not None else None,
)
for result in results
]
return docs
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return [doc for doc, _ in docs_and_scores]
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DistanceStrategy.COSINE,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> "PGVectorExtended":
"""
Return VectorStore initialized from texts and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
"""
connection_string = cls.get_connection_string(kwargs)
store = cls(
connection_string=connection_string,
collection_name=collection_name,
embedding_function=embedding,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
)
store.add_texts(texts=texts, metadatas=metadatas, ids=ids, **kwargs)
return store
@classmethod
def get_connection_string(cls, kwargs: Dict[str, Any]) -> str:
connection_string: str = get_from_dict_or_env(
data=kwargs,
key="connection_string",
env_key="PGVECTOR_CONNECTION_STRING",
)
if not connection_string:
raise ValueError(
"Postgres connection string is required"
"Either pass it as a parameter"
"or set the PGVECTOR_CONNECTION_STRING environment variable."
)
return connection_string
@classmethod
def from_documents(
cls,
documents: List[Document],
embedding: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> "PGVectorExtended":
"""
Return VectorStore initialized from documents and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
"""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
connection_string = cls.get_connection_string(kwargs)
kwargs["connection_string"] = connection_string
return cls.from_texts(
texts=texts,
pre_delete_collection=pre_delete_collection,
embedding=embedding,
distance_strategy=distance_strategy,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
**kwargs,
)
@classmethod
def connection_string_from_db_params(
cls,
driver: str,
host: str,
port: int,
database: str,
user: str,
password: str,
) -> str:
"""Return connection string from database parameters."""
return f"postgresql+{driver}://{user}:{password}@{host}:{port}/{database}"
def delete_keys(
self,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> None:
"""Delete vectors by ids or uuids.
Args:
ids: List of ids to delete.
"""
with Session(self._engine) as session:
if ids is not None:
self.logger.debug(
"Trying to delete vectors by ids (represented by the model "
"using the custom ids field)"
)
stmt = delete(self.EmbeddingStore).where(
self.EmbeddingStore.custom_id.in_(ids)
)
session.execute(stmt)
session.commit() | [] |
2024-01-10 | ruoccofabrizio/azure-open-ai-embeddings-qna | code~utilities~helper.py | import os
import openai
from dotenv import load_dotenv
import logging
import re
import hashlib
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import AzureOpenAI
from langchain.vectorstores.base import VectorStore
from langchain.chains import ChatVectorDBChain
from langchain.chains import ConversationalRetrievalChain
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.chains.llm import LLMChain
from langchain.chains.chat_vector_db.prompts import CONDENSE_QUESTION_PROMPT
from langchain.prompts import PromptTemplate
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders import WebBaseLoader
from langchain.text_splitter import TokenTextSplitter, TextSplitter
from langchain.document_loaders.base import BaseLoader
from langchain.document_loaders import TextLoader
from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from utilities.formrecognizer import AzureFormRecognizerClient
from utilities.azureblobstorage import AzureBlobStorageClient
from utilities.translator import AzureTranslatorClient
from utilities.customprompt import PROMPT
from utilities.redis import RedisExtended
from utilities.azuresearch import AzureSearch
from utilities.pgvector import PGVectorExtended
import pandas as pd
import urllib
from fake_useragent import UserAgent
class LLMHelper:
def __init__(self,
document_loaders : BaseLoader = None,
text_splitter: TextSplitter = None,
embeddings: OpenAIEmbeddings = None,
llm: AzureOpenAI = None,
temperature: float = None,
max_tokens: int = None,
custom_prompt: str = "",
vector_store: VectorStore = None,
k: int = None,
pdf_parser: AzureFormRecognizerClient = None,
blob_client: AzureBlobStorageClient = None,
enable_translation: bool = False,
translator: AzureTranslatorClient = None):
load_dotenv()
openai.api_type = "azure"
openai.api_base = os.getenv('OPENAI_API_BASE')
openai.api_version = "2023-03-15-preview"
openai.api_key = os.getenv("OPENAI_API_KEY")
# Azure OpenAI settings
self.api_base = openai.api_base
self.api_version = openai.api_version
self.index_name: str = "embeddings"
self.model: str = os.getenv('OPENAI_EMBEDDINGS_ENGINE_DOC', "text-embedding-ada-002")
self.deployment_name: str = os.getenv("OPENAI_ENGINE", os.getenv("OPENAI_ENGINES", "text-davinci-003"))
self.deployment_type: str = os.getenv("OPENAI_DEPLOYMENT_TYPE", "Text")
self.temperature: float = float(os.getenv("OPENAI_TEMPERATURE", 0.7)) if temperature is None else temperature
self.max_tokens: int = int(os.getenv("OPENAI_MAX_TOKENS", -1)) if max_tokens is None else max_tokens
self.prompt = PROMPT if custom_prompt == '' else PromptTemplate(template=custom_prompt, input_variables=["summaries", "question"])
self.vector_store_type = os.getenv("VECTOR_STORE_TYPE")
# Azure Search settings
if self.vector_store_type == "AzureSearch":
self.vector_store_address: str = os.getenv('AZURE_SEARCH_SERVICE_NAME')
self.vector_store_password: str = os.getenv('AZURE_SEARCH_ADMIN_KEY')
# PGVector settings
elif self.vector_store_type == "PGVector":
self.vector_store_driver: str = os.getenv('PGVECTOR_DRIVER', "psycopg2")
self.vector_store_address: str = os.getenv('PGVECTOR_HOST', "localhost")
self.vector_store_port: int = int(os.getenv('PGVECTOR_PORT', 5432))
self.vector_store_database: str = os.getenv("PGVECTOR_DATABASE", "postgres")
self.vector_store_username: str = os.getenv("PGVECTOR_USER", "postgres")
self.vector_store_password: str = os.getenv("PGVECTOR_PASSWORD", "postgres")
if self.vector_store_password:
self.vector_store_full_address = f"postgresql+{self.vector_store_driver}://{self.vector_store_username}:{self.vector_store_password}@{self.vector_store_address}:{self.vector_store_port}/{self.vector_store_database}"
else:
self.vector_store_full_address = f"postgresql+{self.vector_store_driver}://{self.vector_store_username}@{self.vector_store_address}:{self.vector_store_port}/{self.vector_store_database}"
else:
# Vector store settings
self.vector_store_address: str = os.getenv('REDIS_ADDRESS', "localhost")
self.vector_store_port: int= int(os.getenv('REDIS_PORT', 6379))
self.vector_store_protocol: str = os.getenv("REDIS_PROTOCOL", "redis://")
self.vector_store_password: str = os.getenv("REDIS_PASSWORD", None)
if self.vector_store_password:
self.vector_store_full_address = f"{self.vector_store_protocol}:{self.vector_store_password}@{self.vector_store_address}:{self.vector_store_port}"
else:
self.vector_store_full_address = f"{self.vector_store_protocol}{self.vector_store_address}:{self.vector_store_port}"
self.chunk_size = int(os.getenv('CHUNK_SIZE', 500))
self.chunk_overlap = int(os.getenv('CHUNK_OVERLAP', 100))
self.document_loaders: BaseLoader = WebBaseLoader if document_loaders is None else document_loaders
self.text_splitter: TextSplitter = TokenTextSplitter(chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap) if text_splitter is None else text_splitter
self.embeddings: OpenAIEmbeddings = OpenAIEmbeddings(model=self.model, chunk_size=1) if embeddings is None else embeddings
if self.deployment_type == "Chat":
self.llm: ChatOpenAI = ChatOpenAI(model_name=self.deployment_name, engine=self.deployment_name, temperature=self.temperature, max_tokens=self.max_tokens if self.max_tokens != -1 else None) if llm is None else llm
else:
self.llm: AzureOpenAI = AzureOpenAI(deployment_name=self.deployment_name, temperature=self.temperature, max_tokens=self.max_tokens) if llm is None else llm
if self.vector_store_type == "AzureSearch":
self.vector_store: VectorStore = AzureSearch(azure_cognitive_search_name=self.vector_store_address, azure_cognitive_search_key=self.vector_store_password, index_name=self.index_name, embedding_function=self.embeddings.embed_query) if vector_store is None else vector_store
elif self.vector_store_type == "PGVector":
self.vector_store: PGVectorExtended = PGVectorExtended(connection_string=self.vector_store_full_address, embedding_function=self.embeddings, collection_name="qnacollection", pre_delete_collection=False) if vector_store is None else vector_store
else:
self.vector_store: RedisExtended = RedisExtended(redis_url=self.vector_store_full_address, index_name=self.index_name, embedding_function=self.embeddings.embed_query) if vector_store is None else vector_store
self.k : int = 3 if k is None else k
self.pdf_parser : AzureFormRecognizerClient = AzureFormRecognizerClient() if pdf_parser is None else pdf_parser
self.blob_client: AzureBlobStorageClient = AzureBlobStorageClient() if blob_client is None else blob_client
self.enable_translation : bool = False if enable_translation is None else enable_translation
self.translator : AzureTranslatorClient = AzureTranslatorClient() if translator is None else translator
self.user_agent: UserAgent() = UserAgent()
self.user_agent.random
def add_embeddings_lc(self, source_url):
try:
documents = self.document_loaders(source_url).load()
# Convert to UTF-8 encoding for non-ascii text
for(document) in documents:
try:
if document.page_content.encode("iso-8859-1") == document.page_content.encode("latin-1"):
document.page_content = document.page_content.encode("iso-8859-1").decode("utf-8", errors="ignore")
except:
pass
docs = self.text_splitter.split_documents(documents)
# Remove half non-ascii character from start/end of doc content (langchain TokenTextSplitter may split a non-ascii character in half)
pattern = re.compile(r'[\x00-\x09\x0b\x0c\x0e-\x1f\x7f\u0080-\u00a0\u2000-\u3000\ufff0-\uffff]') # do not remove \x0a (\n) nor \x0d (\r)
for(doc) in docs:
doc.page_content = re.sub(pattern, '', doc.page_content)
if doc.page_content == '':
docs.remove(doc)
keys = []
for i, doc in enumerate(docs):
# Create a unique key for the document
source_url = source_url.split('?')[0]
filename = "/".join(source_url.split('/')[4:])
hash_key = hashlib.sha1(f"{source_url}_{i}".encode('utf-8')).hexdigest()
hash_key = f"doc:{self.index_name}:{hash_key}"
keys.append(hash_key)
doc.metadata = {"source": f"[{source_url}]({source_url}_SAS_TOKEN_PLACEHOLDER_)" , "chunk": i, "key": hash_key, "filename": filename}
if self.vector_store_type == "AzureSearch":
self.vector_store.add_documents(documents=docs, keys=keys)
elif self.vector_store_type == "PGVector":
self.vector_store.add_documents(documents=docs, keys=keys, ids=keys)
else:
self.vector_store.add_documents(documents=docs, redis_url=self.vector_store_full_address, index_name=self.index_name, keys=keys)
except Exception as e:
logging.error(f"Error adding embeddings for {source_url}: {e}")
raise e
def convert_file_and_add_embeddings(self, source_url, filename, enable_translation=False):
# Extract the text from the file
text = self.pdf_parser.analyze_read(source_url)
# Translate if requested
converted_text = list(map(lambda x: self.translator.translate(x), text)) if self.enable_translation else text
# Remove half non-ascii character from start/end of doc content (langchain TokenTextSplitter may split a non-ascii character in half)
pattern = re.compile(r'[\x00-\x09\x0b\x0c\x0e-\x1f\x7f\u0080-\u00a0\u2000-\u3000\ufff0-\uffff]') # do not remove \x0a (\n) nor \x0d (\r)
converted_text = re.sub(pattern, '', "\n".join(converted_text))
# Upload the text to Azure Blob Storage
converted_filename = f"converted/{filename}.txt"
source_url = self.blob_client.upload_file(converted_text, f"converted/{filename}.txt", content_type='text/plain; charset=utf-8')
print(f"Converted file uploaded to {source_url} with filename {filename}")
# Update the metadata to indicate that the file has been converted
self.blob_client.upsert_blob_metadata(filename, {"converted": "true"})
self.add_embeddings_lc(source_url=source_url)
return converted_filename
def get_all_documents(self, k: int = None):
result = self.vector_store.similarity_search(query="*", k= k if k else self.k)
dataFrame = pd.DataFrame(list(map(lambda x: {
'key': x.metadata['key'],
'filename': x.metadata['filename'],
'source': urllib.parse.unquote(x.metadata['source']),
'content': x.page_content,
'metadata' : x.metadata,
}, result)))
if dataFrame.empty is False:
dataFrame = dataFrame.sort_values(by='filename')
return dataFrame
def get_semantic_answer_lang_chain(self, question, chat_history):
question_generator = LLMChain(llm=self.llm, prompt=CONDENSE_QUESTION_PROMPT, verbose=False)
doc_chain = load_qa_with_sources_chain(self.llm, chain_type="stuff", verbose=False, prompt=self.prompt)
chain = ConversationalRetrievalChain(
retriever=self.vector_store.as_retriever(),
question_generator=question_generator,
combine_docs_chain=doc_chain,
return_source_documents=True,
# top_k_docs_for_context= self.k
)
result = chain({"question": question, "chat_history": chat_history})
sources = "\n".join(set(map(lambda x: x.metadata["source"], result['source_documents'])))
container_sas = self.blob_client.get_container_sas()
contextDict ={}
for res in result['source_documents']:
source_key = self.filter_sourcesLinks(res.metadata['source'].replace('_SAS_TOKEN_PLACEHOLDER_', container_sas)).replace('\n', '').replace(' ', '')
if source_key not in contextDict:
contextDict[source_key] = []
myPageContent = self.clean_encoding(res.page_content)
contextDict[source_key].append(myPageContent)
result['answer'] = result['answer'].split('SOURCES:')[0].split('Sources:')[0].split('SOURCE:')[0].split('Source:')[0]
result['answer'] = self.clean_encoding(result['answer'])
sources = sources.replace('_SAS_TOKEN_PLACEHOLDER_', container_sas)
sources = self.filter_sourcesLinks(sources)
return question, result['answer'], contextDict, sources
def get_embeddings_model(self):
OPENAI_EMBEDDINGS_ENGINE_DOC = os.getenv('OPENAI_EMEBDDINGS_ENGINE', os.getenv('OPENAI_EMBEDDINGS_ENGINE_DOC', 'text-embedding-ada-002'))
OPENAI_EMBEDDINGS_ENGINE_QUERY = os.getenv('OPENAI_EMEBDDINGS_ENGINE', os.getenv('OPENAI_EMBEDDINGS_ENGINE_QUERY', 'text-embedding-ada-002'))
return {
"doc": OPENAI_EMBEDDINGS_ENGINE_DOC,
"query": OPENAI_EMBEDDINGS_ENGINE_QUERY
}
def get_completion(self, prompt, **kwargs):
if self.deployment_type == 'Chat':
return self.llm([HumanMessage(content=prompt)]).content
else:
return self.llm(prompt)
# remove paths from sources to only keep the filename
def filter_sourcesLinks(self, sources):
# use regex to replace all occurences of '[anypath/anypath/somefilename.xxx](the_link)' to '[somefilename](thelink)' in sources
pattern = r'\[[^\]]*?/([^/\]]*?)\]'
match = re.search(pattern, sources)
while match:
withoutExtensions = match.group(1).split('.')[0] # remove any extension to the name of the source document
sources = sources[:match.start()] + f'[{withoutExtensions}]' + sources[match.end():]
match = re.search(pattern, sources)
sources = ' \n ' + sources.replace('\n', ' \n ') # add a carriage return after each source
return sources
def extract_followupquestions(self, answer):
followupTag = answer.find('Follow-up Questions')
followupQuestions = answer.find('<<')
# take min of followupTag and folloupQuestions if not -1 to avoid taking the followup questions if there is no followupTag
followupTag = min(followupTag, followupQuestions) if followupTag != -1 and followupQuestions != -1 else max(followupTag, followupQuestions)
answer_without_followupquestions = answer[:followupTag] if followupTag != -1 else answer
followup_questions = answer[followupTag:].strip() if followupTag != -1 else ''
# Extract the followup questions as a list
pattern = r'\<\<(.*?)\>\>'
match = re.search(pattern, followup_questions)
followup_questions_list = []
while match:
followup_questions_list.append(followup_questions[match.start()+2:match.end()-2])
followup_questions = followup_questions[match.end():]
match = re.search(pattern, followup_questions)
if followup_questions_list != '':
# Extract follow up question
pattern = r'\d. (.*)'
match = re.search(pattern, followup_questions)
while match:
followup_questions_list.append(followup_questions[match.start()+3:match.end()])
followup_questions = followup_questions[match.end():]
match = re.search(pattern, followup_questions)
if followup_questions_list != '':
pattern = r'Follow-up Question: (.*)'
match = re.search(pattern, followup_questions)
while match:
followup_questions_list.append(followup_questions[match.start()+19:match.end()])
followup_questions = followup_questions[match.end():]
match = re.search(pattern, followup_questions)
# Special case when 'Follow-up questions:' appears in the answer after the <<
followupTag = answer_without_followupquestions.lower().find('follow-up questions')
if followupTag != -1:
answer_without_followupquestions = answer_without_followupquestions[:followupTag]
followupTag = answer_without_followupquestions.lower().find('follow up questions') # LLM can make variations...
if followupTag != -1:
answer_without_followupquestions = answer_without_followupquestions[:followupTag]
return answer_without_followupquestions, followup_questions_list
# insert citations in the answer - find filenames in the answer maching sources from the filenamelist and replace them with '${(id+1)}'
def insert_citations_in_answer(self, answer, filenameList):
filenameList_lowered = [x.lower() for x in filenameList] # LLM can make case mitakes in returing the filename of the source
matched_sources = []
pattern = r'\[\[(.*?)\]\]'
match = re.search(pattern, answer)
while match:
filename = match.group(1).split('.')[0] # remove any extension to the name of the source document
if filename in filenameList:
if filename not in matched_sources:
matched_sources.append(filename.lower())
filenameIndex = filenameList.index(filename) + 1
answer = answer[:match.start()] + '$^{' + f'{filenameIndex}' + '}$' + answer[match.end():]
else:
answer = answer[:match.start()] + '$^{' + f'{filename.lower()}' + '}$' + answer[match.end():]
match = re.search(pattern, answer)
# When page is reloaded search for references already added to the answer (e.g. '${(id+1)}')
for id, filename in enumerate(filenameList_lowered):
reference = '$^{' + f'{id+1}' + '}$'
if reference in answer and not filename in matched_sources:
matched_sources.append(filename)
return answer, matched_sources, filenameList_lowered
def get_links_filenames(self, answer, sources):
split_sources = sources.split(' \n ') # soures are expected to be of format ' \n [filename1.ext](sourcelink1) \n [filename2.ext](sourcelink2) \n [filename3.ext](sourcelink3) \n '
srcList = []
linkList = []
filenameList = []
for src in split_sources:
if src != '':
srcList.append(src)
link = src[1:].split('(')[1][:-1].split(')')[0] # get the link
linkList.append(link)
filename = src[1:].split(']')[0] # retrieve the source filename.
source_url = link.split('?')[0]
answer = answer.replace(source_url, filename) # if LLM added a path to the filename, remove it from the answer
filenameList.append(filename)
answer, matchedSourcesList, filenameList = self.insert_citations_in_answer(answer, filenameList) # Add (1), (2), (3) to the answer to indicate the source of the answer
return answer, srcList, matchedSourcesList, linkList, filenameList
def clean_encoding(self, text):
try:
encoding = 'ISO-8859-1'
encodedtext = text.encode(encoding)
encodedtext = encodedtext.decode('utf-8')
except Exception as e:
encodedtext = text
return encodedtext
| [] |
2024-01-10 | ruoccofabrizio/azure-open-ai-embeddings-qna | code~utilities~azuresearch.py | """Wrapper around Azure Cognitive Search."""
from __future__ import annotations
import json
import logging
import uuid
from typing import Any, Callable, Dict, Iterable, List, Mapping, Optional, Tuple, Type
from pydantic import BaseModel, root_validator
import os
import numpy as np
from azure.core.exceptions import ResourceNotFoundError
from azure.core.credentials import AzureKeyCredential
from azure.search.documents import SearchClient
from azure.search.documents.indexes import SearchIndexClient
from azure.search.documents.models import Vector
from azure.search.documents.indexes.models import (
SearchIndex,
SearchField,
SearchFieldDataType,
SimpleField,
SearchableField,
SearchIndex,
SemanticConfiguration,
PrioritizedFields,
SemanticField,
SearchField,
SemanticSettings,
VectorSearch,
VectorSearchAlgorithmConfiguration,
)
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.base import VectorStore
logger = logging.getLogger()
AZURESEARCH_DIMENSIONS = int(os.environ.get("AZURESEARCH_DIMENSIONS", 1536)) # Default to OpenAI's ada-002 embedding model vector size
# Allow overriding field names for Azure Search
FIELDS_ID = os.environ.get("AZURESEARCH_FIELDS_ID", "id")
FIELDS_TITLE = os.environ.get("AZURESEARCH_FIELDS_TITLE", "title")
FIELDS_CONTENT = os.environ.get("AZURESEARCH_FIELDS_CONTENT", "content")
FIELDS_CONTENT_VECTOR = os.environ.get(
"AZURESEARCH_FIELDS_CONTENT_VECTOR", "content_vector")
FIELDS_TAG = os.environ.get("AZURESEARCH_FIELDS_TAG", "tag")
FIELDS_METADATA = os.environ.get("AZURESEARCH_FIELDS_TAG", "metadata")
MAX_UPLOAD_BATCH_SIZE = 1000
MAX_DELETE_BATCH_SIZE = 1000
def get_search_client(endpoint: str, key: str, index_name: str, semantic_configuration_name:str = None) -> SearchClient:
if key is None:
credential = DefaultAzureCredential()
else:
credential = AzureKeyCredential(key)
index_client: SearchIndexClient = SearchIndexClient(
endpoint=endpoint, credential=credential)
try:
index_client.get_index(name=index_name)
except ResourceNotFoundError as ex:
# Fields configuration
fields = [
SimpleField(name=FIELDS_ID, type=SearchFieldDataType.String,
key=True, filterable=True),
SearchableField(name=FIELDS_TITLE, type=SearchFieldDataType.String,
searchable=True, retrievable=True),
SearchableField(name=FIELDS_CONTENT, type=SearchFieldDataType.String,
searchable=True, retrievable=True),
SearchField(name=FIELDS_CONTENT_VECTOR, type=SearchFieldDataType.Collection(SearchFieldDataType.Single),
searchable=True, dimensions=AZURESEARCH_DIMENSIONS, vector_search_configuration="default"),
SearchableField(name=FIELDS_TAG, type=SearchFieldDataType.String,
filterable=True, searchable=True, retrievable=True),
SearchableField(name=FIELDS_METADATA, type=SearchFieldDataType.String,
searchable=True, retrievable=True)
]
# Vector search configuration
vector_search = VectorSearch(
algorithm_configurations=[
VectorSearchAlgorithmConfiguration(
name="default",
kind="hnsw",
hnsw_parameters={
"m": 4,
"efConstruction": 400,
"efSearch": 500,
"metric": "cosine"
}
)
]
)
# Create the semantic settings with the configuration
semantic_settings = None if semantic_configuration_name is None else SemanticSettings(
configurations=[SemanticConfiguration(
name=semantic_configuration_name,
prioritized_fields=PrioritizedFields(
title_field=SemanticField(field_name=FIELDS_TITLE),
prioritized_keywords_fields=[
SemanticField(field_name=FIELDS_TAG)],
prioritized_content_fields=[
SemanticField(field_name=FIELDS_CONTENT)]
)
)
]
)
# Create the search index with the semantic settings and vector search
index = SearchIndex(name=index_name, fields=fields,
vector_search=vector_search, semantic_settings=semantic_settings)
index_client.create_index(index)
# Create the search client
return SearchClient(endpoint=endpoint, index_name=index_name, credential=AzureKeyCredential(key))
class AzureSearch(VectorStore):
def __init__(
self,
azure_cognitive_search_name: str,
azure_cognitive_search_key: str,
index_name: str,
embedding_function: Callable,
semantic_configuration_name: str = None,
semantic_query_language: str = "en-us",
**kwargs: Any,
):
"""Initialize with necessary components."""
try:
from azure.search.documents import SearchClient
except ImportError:
raise ValueError(
"Could not import requests python package. "
"Please install it with `pip install --index-url https://pkgs.dev.azure.com/azure-sdk/public/_packaging/azure-sdk-for-python/pypi/simple/ azure-search-documents==11.4.0a20230509004`"
)
# Initialize base class
self.embedding_function = embedding_function
self.azure_cognitive_search_name = azure_cognitive_search_name
self.azure_cognitive_search_key = azure_cognitive_search_key
self.index_name = index_name
self.semantic_configuration_name = semantic_configuration_name
self.semantic_query_language = semantic_query_language
self.client = get_search_client(
self.azure_cognitive_search_name, self.azure_cognitive_search_key, self.index_name, self.semantic_configuration_name)
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Add texts data to an existing index."""
keys = kwargs.get("keys")
keys = list(map(lambda x: x.replace(':','_'), keys)) if keys else None
ids = []
# Write data to index
data = []
for i, text in enumerate(texts):
# Use provided key otherwise use default key
key = keys[i] if keys else str(uuid.uuid4())
metadata = metadatas[i] if metadatas else {}
# Add data to index
data.append({
"@search.action": "upload",
FIELDS_ID: key,
FIELDS_TITLE : metadata.get(FIELDS_TITLE, metadata.get("source", "[]").split('[')[1].split(']')[0]),
FIELDS_TAG: metadata.get(FIELDS_TAG, ""),
FIELDS_CONTENT: text,
FIELDS_CONTENT_VECTOR: np.array(
self.embedding_function(text), dtype=np.float32
).tolist(),
FIELDS_METADATA: json.dumps(metadata)
})
ids.append(key)
# Upload data in batches
if len(data) == MAX_UPLOAD_BATCH_SIZE:
response = self.client.upload_documents(documents=data)
# Check if all documents were successfully uploaded
if not all([r.succeeded for r in response]):
raise Exception(response)
# Reset data
data = []
# Upload data to index
response = self.client.upload_documents(documents=data)
# Check if all documents were successfully uploaded
if all([r.succeeded for r in response]):
return ids
else:
raise Exception(response)
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
docs_and_scores = self.similarity_search_with_score(
query, k=k, filters=kwargs.get("filters", None))
return [doc for doc, _ in docs_and_scores]
def similarity_search_with_score(
self, query: str, k: int = 4, filters: str = None
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
results = self.client.search(
search_text="",
vector=Vector(value=np.array(self.embedding_function(
query), dtype=np.float32).tolist(), k=k, fields=FIELDS_CONTENT_VECTOR),
select=[f"{FIELDS_TITLE},{FIELDS_CONTENT},{FIELDS_METADATA}"],
filter=filters
)
# Convert results to Document objects
docs = [
(
Document(
page_content=result[FIELDS_CONTENT], metadata=json.loads(
result[FIELDS_METADATA])
),
1 - float(result['@search.score']),
)
for result in results
]
return docs
def hybrid_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
docs_and_scores = self.hybrid_search_with_score(
query, k=k, filters=kwargs.get("filters", None))
return [doc for doc, _ in docs_and_scores]
def hybrid_search_with_score(
self, query: str, k: int = 4, filters: str = None
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query with an hybrid query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
results = self.client.search(
search_text=query,
vector=Vector(value=np.array(self.embedding_function(
query), dtype=np.float32).tolist(), k=k, fields=FIELDS_CONTENT_VECTOR),
select=[f"{FIELDS_TITLE},{FIELDS_CONTENT},{FIELDS_METADATA}"],
filter=filters,
top=k
)
# Convert results to Document objects
docs = [
(
Document(
page_content=result[FIELDS_CONTENT], metadata=json.loads(
result[FIELDS_METADATA])
),
1 - float(result['@search.score']),
)
for result in results
]
return docs
def semantic_hybrid_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
docs_and_scores = self.semantic_hybrid_search_with_score(
query, k=k, filters=kwargs.get('filters', None))
return [doc for doc, _ in docs_and_scores]
def semantic_hybrid_search_with_score(
self, query: str, k: int = 4, filters: str = None
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query with an hybrid query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
results = self.client.search(
search_text=query,
vector=Vector(value=np.array(self.embedding_function(
query), dtype=np.float32).tolist(), k=k, fields=FIELDS_CONTENT_VECTOR),
select=[f"{FIELDS_TITLE},{FIELDS_CONTENT},{FIELDS_METADATA}"],
filter=filters,
query_type="semantic",
query_language=self.semantic_query_language,
semantic_configuration_name=self.semantic_configuration_name,
query_caption="extractive",
query_answer="extractive",
top=k
)
# Get Semantic Answers
semantic_answers = results.get_answers()
semantic_answers_dict = {}
for semantic_answer in semantic_answers:
semantic_answers_dict[semantic_answer.key] = {
"text": semantic_answer.text,
"highlights": semantic_answer.highlights
}
# Convert results to Document objects
docs = [
(
Document(
page_content=result['content'],
metadata={**json.loads(result['metadata']), **{
'captions': {
'text': result.get('@search.captions', [{}])[0].text,
'highlights': result.get('@search.captions', [{}])[0].highlights
} if result.get("@search.captions") else {},
'answers': semantic_answers_dict.get(json.loads(result['metadata']).get('key'), '')
}
}
),
1 - float(result['@search.score']),
)
for result in results
]
return docs
@classmethod
def from_texts(
cls: Type[AzureSearch],
texts: List[str],
embedding: Embeddings,
azure_cognitive_search_name: str,
azure_cognitive_search_key: str,
metadatas: Optional[List[dict]] = None,
index_name: Optional[str] = None,
**kwargs: Any,
) -> AzureSearch:
# Name of the search index if not given
if not index_name:
index_name = uuid.uuid4().hex
# Creating a new Azure Search instance
azure_search = cls(azure_cognitive_search_name,
azure_cognitive_search_key, index_name, embedding.embed_query)
azure_search.add_texts(texts, metadatas, **kwargs)
return azure_search
def index_exists(self):
if self.azure_cognitive_search_key is None:
credential = DefaultAzureCredential()
else:
credential = AzureKeyCredential(self.azure_cognitive_search_key)
index_client: SearchIndexClient = SearchIndexClient(
endpoint=self.azure_cognitive_search_name, credential=credential)
return index_client.get_index(name=self.index_name)
def delete_keys(self, keys: List[str]):
documents = []
keys = list(map(lambda x: x.replace(':','_'), keys)) if keys else None
for i, key in enumerate(keys):
documents.append(
{
"@search.action": "delete",
FIELDS_ID: key
}
)
if i % MAX_DELETE_BATCH_SIZE == 0 and i != 0:
self.client.delete_documents(documents=documents)
documents = []
return self.client.delete_documents(documents=documents)
class AzureSearchVectorStoreRetriever(BaseRetriever, BaseModel):
vectorstore: AzureSearch
search_type: str = "similarity"
k: int = 4
score_threshold: float = 0.4
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
def validate_search_type(cls, values: Dict) -> Dict:
"""Validate search type."""
if "search_type" in values:
search_type = values["search_type"]
if search_type not in ("similarity", "hybrid", "semantic_hybrid"):
raise ValueError(f"search_type of {search_type} not allowed.")
return values
def get_relevant_documents(self, query: str) -> List[Document]:
if self.search_type == "similarity":
docs = self.vectorstore.similarity_search(query, k=self.k)
elif self.search_type == "hybrid":
docs = self.vectorstore.hybrid_search(query, k=self.k)
elif self.search_type == "semantic_hybrid":
docs = self.vectorstore.semantic_hybrid_search(query, k=self.k)
else:
raise ValueError(f"search_type of {self.search_type} not allowed.")
return docs
async def aget_relevant_documents(self, query: str) -> List[Document]:
raise NotImplementedError(
"AzureSearchVectorStoreRetriever does not support async") | [] |
2024-01-10 | activeloopai/langchain-vectordbs-course | Module%203~FableForge-master~api_utils.py | import re
from concurrent.futures import ThreadPoolExecutor
from dotenv import load_dotenv
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
import replicate
import json
from prompts import *
import requests
import streamlit as st
load_dotenv('keys.env')
class BuildBook:
book_text_prompt = BOOK_TEXT_PROMPT
def __init__(self, model_name, input_text, style):
self.chat = ChatOpenAI(model_name=model_name)
self.input_text = input_text
self.style = style
self.progress = st.progress(0)
self.progress_steps = 0
self.total_progress_steps = 30
self.progress_steps += 2
self.progress.progress(self.progress_steps / self.total_progress_steps, "Generating book text...")
self.book_text = self.get_pages()
self.progress_steps += 2
self.progress.progress(self.progress_steps / self.total_progress_steps, "Generating SD prompts...")
self.pages_list = self.get_list_from_text(self.book_text)
self.sd_prompts_list = self.get_prompts()
self.source_files = self.download_images()
self.list_of_tuples = self.create_list_of_tuples()
self.progress.progress(1.0, "Done! Wait one moment while your book is processed...")
def get_pages(self):
pages = self.chat([HumanMessage(content=f'{self.book_text_prompt} Topic: {self.input_text}')]).content
return pages
def get_prompts(self):
base_atmosphere = self.chat([HumanMessage(content=f'Generate a visual description of the overall lightning/atmosphere of this book using the function.'
f'{self.book_text}')], functions=get_lighting_and_atmosphere_function)
base_dict = func_json_to_dict(base_atmosphere)
summary = self.chat([HumanMessage(content=f'Generate a concise summary of the setting and visual details of the book')]).content
base_dict['summary_of_book_visuals'] = summary
def generate_prompt(page, base_dict):
prompt = self.chat([HumanMessage(content=f'General book info: {base_dict}. General style: {self.style} Passage: {page}.'
f' Generate a visual description of the passage using the function.'
f'Creatively fill all parameters with guessed/assumed values if they are missing.')],
functions=get_visual_description_function)
return func_json_to_dict(prompt)
with ThreadPoolExecutor(max_workers=10) as executor:
prompt_list = list(executor.map(generate_prompt, self.pages_list, [base_dict] * len(self.pages_list)))
prompts = prompt_combiner(prompt_list, base_dict, self.style)
return prompts
def get_list_from_text(self, text):
new_list = re.split('Page \d+:', text)
new_list.pop(0)
return new_list
def create_images(self):
if len(self.pages_list) != len(self.sd_prompts_list):
raise 'Pages and Prompts do not match'
def generate_image(i, prompt):
print(f'{prompt} is the prompt for page {i + 1}')
output = replicate.run(
"stability-ai/stable-diffusion:db21e45d3f7023abc2a46ee38a23973f6dce16bb082a930b0c49861f96d1e5bf",
input={"prompt": 'art,' + prompt,
"negative_prompt": "photorealistic, photograph, bad anatomy, blurry, gross,"
"weird eyes, creepy, text, words, letters, realistic"
},
)
return output[0]
with ThreadPoolExecutor(max_workers=10) as executor:
image_urls = list(executor.map(generate_image, range(len(self.sd_prompts_list)), self.sd_prompts_list))
return image_urls
def download_images(self):
image_urls = self.create_images()
source_files = []
for i, url in enumerate(image_urls):
r = requests.get(url, stream=True)
file_path = f'images/{i+1}.png'
with open(file_path, 'wb') as file:
source_files.append(file_path)
for chunk in r.iter_content():
file.write(chunk)
self.progress_steps += 1
self.progress.progress(self.progress_steps / self.total_progress_steps, f"Downloading image {i+1}...")
return source_files
def create_list_of_tuples(self):
files = self.source_files
text = self.pages_list
return list(zip(files, text))
def func_json_to_dict(response):
return json.loads(response.additional_kwargs['function_call']['arguments'])
def prompt_combiner(prompt_list, base_dict, style):
prompts = []
for i, prompt in enumerate(prompt_list):
entry = f"{prompt['base_setting']}, {prompt['setting']}, {prompt['time_of_day']}, {prompt['weather']}, {prompt['key_elements']}, {prompt['specific_details']}, " \
f"{base_dict['lighting']}, {base_dict['mood']}, {base_dict['color_palette']}, in the style of {style}"
prompts.append(entry)
return prompts
def process_page(chat, page, base_dict):
prompt = chat([HumanMessage(content=f'General book info: {base_dict}. Passage: {page}')],
functions=get_visual_description_function)
return func_json_to_dict(prompt)
| [
"Generate a concise summary of the setting and visual details of the book",
"[]",
"Generate a visual description of the overall lightning/atmosphere of this book using the function.",
"General book info: PLACEHOLDER. Passage: PLACEHOLDER",
"Creatively fill all parameters with guessed/assumed values if they are missing.",
" Generate a visual description of the passage using the function."
] |
2024-01-10 | layterz/promptx | promptx~application.py | import os
from loguru import logger
from rich import pretty
import openai
from .world import World
from .models.openai import ChatGPT
class App:
name: str
path: str
world: World
def __init__(self, name, path, world=None, db=None, llm=None, **kwargs):
self.name = name
self.path = path
self.world = world or World(name, db=db, default_llm=llm)
@classmethod
def load(cls, path, db, llm, env=None):
if llm is None:
default_llm_id = os.environ.get('PXX_DEFAULT_LLM', 'chatgpt')
if default_llm_id == 'chatgpt':
api_key = os.environ.get('OPENAI_API_KEY')
org_id = os.environ.get('OPENAI_ORG_ID')
# TODO: The 'openai.organization' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g. 'OpenAI(organization=org_id)'
# openai.organization = org_id
llm = llm or ChatGPT(id='default', api_key=openai.api_key, org_id=openai.organization)
config = {
'name': 'local',
'path': path,
'db': db,
'llm': llm,
}
pretty.install()
env = {**os.environ, **(env or {})}
log_file_path = f"./log/{env.get('PXX_ENV', 'development')}.log"
level = env.get('PXX_LOG_LEVEL', 'INFO')
# Configure Loguru
logger.remove()
logger.add(
log_file_path,
rotation="10 MB",
level=level,
format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <level>{message}</level>",
backtrace=True
)
logger.info("Log file: " + log_file_path)
logger.info("Log level: " + level)
return cls(**config)
def __repr__(self):
return f'<App {self.name} path={self.path}>'
| [] |
2024-01-10 | layterz/promptx | promptx~template.py | import time
import random
import json
from typing import *
import jsonschema
from loguru import logger
from pydantic import BaseModel
import openai
from jinja2 import Template as JinjaTemplate
from .collection import Collection, Entity, Query, model_to_json_schema, create_entity_from_schema
from .models import MockLLM
class MaxRetriesExceeded(Exception):
pass
E = TypeVar('E', bound=BaseModel)
class Example(Entity):
input: str
output: str
def __init__(self, input, output, **kwargs):
super().__init__(input=self.parse(input), output=self.parse(output), **kwargs)
def parse(self, x):
def _serialize(x):
if issubclass(type(x), BaseModel):
return x.model_dump()
if isinstance(x, str):
return x
else:
return json.dumps(x, default=_serialize)
class Template(Entity):
template: str = """
INSTRUCTIONS
---
{{instructions}}
{{format}}
{% if examples %}
EXAMPLES
---
{% endif %}
{{examples}}
{% if examples %}
END_EXAMPLES
---
{% endif %}
{{input}}
{{output}}
"""
input_template: str = """
INPUT
---
{{input}}
END_INPUT
"""
output_template: str = """
OUTPUT
---
{{output}}
"""
example_template: str = f"""
{input_template}
{output_template}
"""
format_template: str = """
FORMAT INSTRUCTIONS
---
{% if string_list_output %}
Return a JSON array of strings.
{% elif list_output %}
Return a list of valid JSON objects with the fields described below.
{% else %}
Return the output as a valid JSON object with the fields described below.
{% endif %}
{% for field in fields %}
- {{field.name}} (type: {{field.type_}}, required: {{field.required}}, default: {{field.default}}, {% for k, v in field.metadata.items()%}{{k}}: {{v}}, {% endfor %}): {{field.description}}
{% endfor %}
Make sure to use double quotes and avoid trailing commas!
Ensure any required fields are set, but you can use the default value
if it's defined and you are unsure what to use.
If you are unsure about any optional fields use `null` or the default value,
but try your best to fill them out.
END_FORMAT_INSTRUCTIONS
"""
type: str = 'template'
name: str = None
instructions: str = None
examples: List[Example] = None
input: str = None
output: str = None
context: str = None
data: Query = None
def __init__(self, examples=None, **kwargs):
if examples is not None:
for i, example in enumerate(examples):
if isinstance(example, dict):
example = Example(**example)
elif isinstance(example, tuple):
example = Example(*example)
if not isinstance(example, Example):
continue
examples[i] = example
kwargs['examples'] = examples
super().__init__(**kwargs)
class TemplateRunner:
def parse(self, x):
if x is None:
return {}
skip_list = ['id', 'type']
if isinstance(x, BaseModel):
skip_list += [k for k, v in x.model_fields.items() if v.json_schema_extra and v.json_schema_extra.get('generate') == False]
return {k: v for k, v in x.model_dump().items() if k not in skip_list}
elif isinstance(x, Entity):
skip_list += [k for k, v in x.model_fields.items() if v.json_schema_extra and v.json_schema_extra.get('generate') == False]
return {k: v for k, v in x.object.dict().items() if k not in skip_list}
elif isinstance(x, Collection):
return [
{k: v for k, v in y.dict().items() if k not in skip_list}
for y in x.objects
]
elif isinstance(x, str):
return x
elif isinstance(x, dict):
return {k: self.parse(v) for k, v in x.items()}
elif isinstance(x, list):
return [self.parse(y) for y in x]
else:
return x
def render(self, t, x, **kwargs):
input_template = JinjaTemplate(t.input_template)
input = input_template.render(**x) if len(x) > 0 else ''
output_template = JinjaTemplate(t.output_template)
output = output_template.render(**x) if len(x) > 0 else ''
vars = {
**x,
'instructions': t.instructions,
'examples': self.render_examples(t),
'format': self.render_format(t, x),
'input': input,
'output': output,
}
template = JinjaTemplate(t.template)
output = template.render(**vars)
return output
def format_field(self, name, field, definitions, required):
if name in ['id', 'type']:
return None
if field.get('generate', True) == False:
return None
description = field.get('description', '')
options = ''
metadata_keys = [
'minimum', 'maximum', 'exclusiveMinimum', 'exclusiveMaximum',
'minLength', 'maxLength',
'minItems', 'maxItems',
]
metadata = {
k: v for k, v in field.items()
if k in metadata_keys
}
definition = None
list_field = False
if field.get('type') == 'array':
list_field = True
item_type = field.get('items', {}).get('type', None)
if item_type is None:
ref = field.get('items', {}).get('$ref', None)
ref = ref.split('/')[-1]
definition = definitions.get(ref, {})
type_ = f'{definition.get("type")}[]'
else:
type_ = f'{item_type}[]'
field = field.get('items', {})
elif len(field.get('allOf', [])) > 0:
ref = field.get('allOf')[0].get('$ref')
ref = ref.split('/')[-1]
definition = definitions.get(ref, {})
type_ = f'{definition.get("type")}'
elif field.get('$ref'):
ref = field.get('$ref')
ref = ref.split('/')[-1]
definition = definitions.get(ref, {})
type_ = f'{definition.get("type")}'
else:
type_ = field.get('type', 'str')
if definition is not None and 'enum' in definition:
if list_field:
options += f'''
Select any relevant options from: {", ".join(definition["enum"])}
'''
else:
options += f'''
Select one option from: {", ".join(definition["enum"])}
'''
if len(options) > 0:
description += ' ' + options
return {
'name': name,
'title': field.get('title', None),
'type_': type_,
'default': field.get('default', None),
'description': description.strip(),
'required': name in required,
'metadata': metadata,
}
def render_format(self, t, x, **kwargs):
if t.output is None or t.output == str:
return ''
output = json.loads(t.output)
format_template = JinjaTemplate(t.format_template)
if output.get('type', None) == 'array' and output.get('items', {}).get('type', None) == 'string':
return format_template.render({
'string_list_output': True,
})
list_output = False
fields = []
properties = {}
if output.get('type', None) == 'array':
properties = output.get('items', {}).get('properties', {})
definitions = output.get('items', {}).get('$defs', {})
required = output.get('items', {}).get('required', [])
list_output = True
elif output.get('type', None) == 'object':
properties = output.get('properties', {})
definitions = output.get('$defs', {})
required = output.get('required', [])
for name, property in properties.items():
f = self.format_field(name, property, definitions, required)
fields += [f]
return format_template.render({
'fields': [field for field in fields if field is not None],
'list_output': list_output,
})
def render_examples(self, t, **kwargs):
if t.examples is None or len(t.examples) == 0:
return ''
examples = [
{
'input': e.input,
'output': e.output,
}
for e in random.sample(t.examples, min(len(t.examples), 3))
]
example_template = JinjaTemplate(t.example_template)
return '\n'.join([
example_template.render(**e) for e in examples
])
def process(self, session, t, x, output, allow_none=False, **kwargs):
if t.output is None:
return output
if allow_none and output is None:
return None
out = json.loads(output)
schema = model_to_json_schema(json.loads(t.output))
if schema.get('type', None) == 'string' or (schema.get('type', None) == 'array' and schema.get('items', {}).get('type', None) == 'string'):
return out
entities = create_entity_from_schema(schema, out, session=session, base=Entity)
return entities
def dict(self):
return {
'id': self.id,
'type': 'template',
'name': self.name or None,
'instructions': self.instructions,
'input': self.input,
'output': self.output,
}
def __call__(self, session, t, x, llm, **kwargs):
return self.forward(session, t, x, llm, **kwargs)
def forward(self, session, t, x, llm, context=None, history=None, retries=3, dryrun=False, allow_none=False, **kwargs):
if retries and retries <= 0:
e = MaxRetriesExceeded(f'{t.name} failed to forward {x}')
logger.error(e)
raise e
if dryrun:
logger.debug(f'Dryrun: {t.output}')
llm = MockLLM(output=t.output)
px = self.parse(x)
prompt_input = self.render(t, {'input': px})
try:
response = llm.generate(prompt_input, context=context or t.context, history=history)
except openai.APIError as e:
logger.error(f'LLM generation failed: {e}')
time.sleep(2)
if retries <= 1:
raise e
return self.forward(session, t, x, llm, retries=retries, **kwargs)
except Exception as e:
logger.error(f'Failed to generate {x}: {e}')
if retries <= 1:
raise e
return self.forward(session, t, x, llm, retries=retries-1, **kwargs)
try:
response.content = self.process(session, t, px, response.raw, allow_none=allow_none, **kwargs)
except jsonschema.exceptions.ValidationError as e:
logger.error(f'Output validation failed: {e}')
if retries <= 1:
raise e
return self.forward(session, t, x, llm, retries=retries-1, **kwargs)
except json.JSONDecodeError as e:
logger.warning(f'Failed to decode JSON from {e}')
if retries <= 1:
raise e
return self.forward(session, t, x, llm, retries=retries-1, **kwargs)
except Exception as e:
logger.error(f'Failed to forward {x}: {e}')
if retries <= 1:
raise e
return self.forward(session, t, x, llm, retries=retries-1, **kwargs)
return response
| [
"\n OUTPUT\n ---\n {{output}}\n ",
"\n FORMAT INSTRUCTIONS\n ---\n {% if string_list_output %}\n Return a JSON array of strings.\n {% elif list_output %}\n Return a list of valid JSON objects with the fields described below.\n {% else %}\n Return the output as a valid JSON object with the fields described below. \n {% endif %}\n {% for field in fields %}\n - {{field.name}} (type: {{field.type_}}, required: {{field.required}}, default: {{field.default}}, {% for k, v in field.metadata.items()%}{{k}}: {{v}}, {% endfor %}): {{field.description}}\n {% endfor %}\n\n Make sure to use double quotes and avoid trailing commas!\n Ensure any required fields are set, but you can use the default value \n if it's defined and you are unsure what to use. \n If you are unsure about any optional fields use `null` or the default value,\n but try your best to fill them out.\n END_FORMAT_INSTRUCTIONS\n ",
"input",
"\n INSTRUCTIONS\n ---\n {{instructions}}\n {{format}}\n {% if examples %}\n EXAMPLES\n ---\n {% endif %}\n {{examples}}\n {% if examples %}\n END_EXAMPLES\n ---\n {% endif %}\n {{input}}\n {{output}}\n ",
"\n INPUT\n ---\n {{input}}\n END_INPUT\n ",
"\n PLACEHOLDER\n PLACEHOLDER\n "
] |
2024-01-10 | layterz/promptx | tests~test_template.py | import pytest
import openai
import json
from promptx.template import *
from promptx.models import Response
from . import User, Trait, session, llm
@pytest.fixture
def template(session):
t = Template(instructions='Some example instructions', output=json.dumps(User.model_json_schema()))
session.store(t, collection='templates')
return session.query(ids=[t.id], collection='templates').first
def test_basic_response(session, llm):
template = Template(instructions='Some example instructions')
o = session.prompt(template=template, llm=llm)
assert o is not None
assert o == 'This is a mock response.'
def test_json_valid_output(session, template, llm):
llm.generate.return_value = Response(
raw='{ "name": "test", "age": 20, "traits": ["nice"] }',
)
o = session.prompt(template=template, llm=llm)
assert o is not None
assert o.type == 'user'
assert o.name == 'test'
assert o.age == 20
def test_load_from_template_id(session, template, llm):
llm.generate.return_value = Response(
raw='{ "name": "test", "age": 20, "traits": ["nice"] }',
)
o = session.prompt(template=template.id, llm=llm)
assert o is not None
assert o.type == 'user'
assert o.name == 'test'
assert o.age == 20
def test_json_valid_output__extra_field(session, template, llm):
response = Response(
raw='{ "name": "test", "age": 20, "location": "london", "traits": ["nice"] }',
)
llm.generate.return_value = response
o = session.prompt(template=template, llm=llm)
assert o.name == 'test'
assert o.age == 20
with pytest.raises(AttributeError):
assert o.location == 'london'
def test_json_invalid_output__missing_required_field(session, template, llm):
response = Response(
raw='{ "age": 20 }',
)
llm.generate.return_value = response
with pytest.raises(MaxRetriesExceeded):
session.prompt(template=template, llm=llm)
def test_json_invalid_output__formatting(session, template, llm):
response = Response(
raw='"name": "test", "age": 20, "traits": ["nice"] }',
)
llm.generate.return_value = response
with pytest.raises(MaxRetriesExceeded):
session.prompt(template=template, llm=llm)
def test_invalild_json_output__validation(session, template, llm):
response = Response(
raw='{ "name": "test", "age": "young" }',
)
llm.generate.return_value = response
with pytest.raises(MaxRetriesExceeded):
session.prompt(template=template, llm=llm)
# TODO: this should probably have some kind of separate retry budget
def test_exception_handling(session, template, llm):
llm.generate.side_effect = [openai.Timeout, Response(raw='Test response')]
template = Template(instructions='Some example instructions')
o = session.prompt(template=template, llm=llm)
assert o == 'Test response'
def test_parse_exception_handling(session, mocker, template, llm):
mocker.patch.object(TemplateRunner, 'process', side_effect=[*[json.JSONDecodeError('test', 'test', 0)] * 4, 'test'])
runner = TemplateRunner()
with pytest.raises(MaxRetriesExceeded):
o = runner(session, template, None, llm=llm)
mocker.patch.object(TemplateRunner, 'process', side_effect=[*[json.JSONDecodeError('test', 'test', 0)] * 3, 'test'])
runner = TemplateRunner()
o = runner(session, template, None, llm=llm)
assert o.content == 'test'
def test_invalid_input_raises_error(session, template, llm):
with pytest.raises(MaxRetriesExceeded):
session.prompt(template=template, input={'age': 'young'}, llm=llm)
def test_output_parsing(session, template, llm):
llm.generate.return_value = Response(raw='{ "name": "test", "age": 20, "traits": ["nice"] }')
o = session.prompt(template=template, llm=llm)
assert o.type == 'user'
assert o.name == 'test'
assert o.age == 20
def test_format_rendering(template):
runner = TemplateRunner()
p = runner.render(template, {})
assert template.instructions in p
def test_format_rendering_with_input(template):
runner = TemplateRunner()
p = runner.render(template, {'input': 'Some test input'})
assert 'Some test input' in p
def test_format_rendering_with_output(template):
runner = TemplateRunner()
p = runner.render(template, {'input': 'Some test input'})
assert 'name (type: string, required: True, default: None' in p
def test_format_rendering_object(template):
runner = TemplateRunner()
p = runner.render(template, {'input': 'Some test input'})
assert 'Return the output as a valid JSON object with the fields described below' in p
def test_format_rendering_list():
schema = json.dumps({
'type': 'array',
'items': {}
})
t = Template(instructions='Some example instructions', output=schema)
runner = TemplateRunner()
p = runner.render(t, {'input': 'Some test input'})
assert 'Return a list of valid JSON objects with the fields described below' in p
def test_format_rendering_with_basic_types(template):
runner = TemplateRunner()
p = runner.render(template, {'input': 'Some test input'})
assert 'name (type: string, required: True, default: None' in p
assert 'age (type: integer, required: True, default: None' in p
def test_format_rendering_with_enum(template):
runner = TemplateRunner()
p = runner.render(template, {'input': 'Some test input'})
assert 'role (type: string, required: False, default: admin' in p
assert 'Select one option from: admin, user' in p
def test_format_rendering_with_enum_list(session, template, llm):
runner = TemplateRunner()
p = runner.render(template, {'input': 'Some test input'})
assert 'traits (type: string[], required: True, default: None' in p
assert 'Select any relevant options from: nice, mean, funny, smart' in p
def test_format_rendering_with_excluded_fields(template):
runner = TemplateRunner()
p = runner.render(template, {'input': 'Some test input'})
assert 'banned (type: bool, required: False, default: False' not in p
@pytest.mark.skip(reason="Not implemented yet")
def test_format_rendering_with_field_description(template):
runner = TemplateRunner()
p = runner.render(template, {'input': 'Some test input'})
assert 'What kind of personality describes the user?' in p
def test_format_rendering_with_field_min_max(template):
runner = TemplateRunner()
p = runner.render(template, {'input': 'Some test input'})
assert 'minimum: 18' in p
assert 'exclusiveMaximum: 100' in p
def test_format_rendering_with_field_min_max_items(template):
runner = TemplateRunner()
p = runner.render(template, {'input': 'Some test input'})
assert 'minItems: 1' in p
assert 'maxItems: 3' in p
def test_format_rendering_with_field_min_max_length(template):
runner = TemplateRunner()
p = runner.render(template, {'input': 'Some test input'})
assert 'minLength: 3' in p
assert 'maxLength: 20' in p
def test_example_rendering(session):
user = User(name="John Wayne", age=64, traits=[Trait.mean])
runner = TemplateRunner()
template = Template(instructions='Some example instructions', output=user.model_dump_json(),
examples=[Example(input='Some test input', output=user.model_dump_json())])
session.store(template)
template_r = session.query(ids=[template.id]).first
p = runner.render(template_r, {'input': 'Some test input'})
assert 'EXAMPLES' in p
assert 'John Wayne' in p
assert '64' in p
assert 'mean' in p
assert 'banned' in p
def test_example_rendering_multiple(session, template):
user = User(name="John Wayne", age=64, traits=[Trait.mean])
runner = TemplateRunner()
examples = [Example(input='Some test input', output=user.model_dump_json()) for _ in range(3)]
template = Template(instructions='Some example instructions', output=user.model_dump_json(), examples=examples)
session.store(template)
template = session.query(ids=[template.id]).first
p = runner.render(template, {'input': 'Some test input'})
assert p.count('John Wayne') == 3 | [
"Some test input",
"Some example instructions"
] |
2024-01-10 | shuhei-fujita/call-api-samples | gpt~get_api_gpt.py | #!/usr/bin/env python
import openai
from dotenv import load_dotenv
import os
import json
import logging
import time
def formatt_responses(responses):
a = []
return a
def main(prompt_file_name):
# ---設定ファイルの初期化---
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
engine_3 = os.getenv('ENGINE_GPT_3')
engine_4 = os.getenv('ENGINE_GPT_4')
with open(prompt_file_name, 'r') as file:
prompt = file.read()
logging.basicConfig(level=logging.INFO, format='%(asctime)s [%(levelname)s] %(message)s')
# ---設定ファイルの初期化---
# APIリクエスト
try:
logging.info('Starting API request...')
start_time = time.time()
responses = []
prompts = [prompt[i:i+4096] for i in range(0, len(prompt), 4096)] # promptを4096文字ごとに分割
for prompt in prompts:
response = openai.ChatCompletion.create(
model=engine_3,
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": f"Please summarize the following text: {prompt}"}
]
)
responses.append(response)
end_time = time.time()
logging.info('Finished API request in %.2f seconds.', end_time - start_time)
except Exception as e:
logging.error('Error occurred during API request: %s', e)
raise
# APIレスポンスを整形
formatted_responses = [json.dumps(response['choices'][0]['message'], indent=4, ensure_ascii=False) for response in responses]
# APIレスポンスの実行結果の出力
with open('result.json', 'w') as json_file:
json.dump([response['choices'][0]['message'] for response in responses], json_file, indent=4, ensure_ascii=False)
with open('result.txt', 'w') as txt_file:
txt_file.write("\n".join([response['choices'][0]['message']['content'] for response in responses]))
return formatted_responses # レスポンスのリストを返す
| [
"Please summarize the following text: PLACEHOLDER",
"You are a helpful assistant."
] |
2024-01-10 | apecloud/llama_index | llama_index~readers~obsidian.py | """Obsidian reader class.
Pass in the path to an Obsidian vault and it will parse all markdown
files into a List of Documents,
with each Document containing text from under an Obsidian header.
"""
import os
from pathlib import Path
from typing import Any, List
from langchain.docstore.document import Document as LCDocument
from llama_index.readers.base import BaseReader
from llama_index.readers.file.markdown_reader import MarkdownReader
from llama_index.readers.schema.base import Document
class ObsidianReader(BaseReader):
"""Utilities for loading data from an Obsidian Vault.
Args:
input_dir (str): Path to the vault.
"""
def __init__(self, input_dir: str):
"""Init params."""
self.input_dir = Path(input_dir)
def load_data(self, *args: Any, **load_kwargs: Any) -> List[Document]:
"""Load data from the input directory."""
docs: List[Document] = []
for dirpath, dirnames, filenames in os.walk(self.input_dir):
dirnames[:] = [d for d in dirnames if not d.startswith(".")]
for filename in filenames:
if filename.endswith(".md"):
filepath = os.path.join(dirpath, filename)
content = MarkdownReader().load_data(Path(filepath))
docs.extend(content)
return docs
def load_langchain_documents(self, **load_kwargs: Any) -> List[LCDocument]:
"""Load data in LangChain document format."""
docs = self.load_data(**load_kwargs)
return [d.to_langchain_format() for d in docs]
| [] |
2024-01-10 | apecloud/llama_index | apecloud-chat~prompt_query.py | import sys
import os
import torch
import openai
import argparse
import logging
from langchain import OpenAI
from langchain.llms.base import LLM
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from gpt_index import GPTSimpleVectorIndex, SimpleDirectoryReader, QuestionAnswerPrompt, LLMPredictor, PromptHelper, ServiceContext
from gpt_index import GPTListIndex, SimpleDirectoryReader
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.embeddings.langchain import LangchainEmbedding
from transformers import pipeline
from IPython.display import Markdown, display
from gpt_index.prompts.base import Prompt
from gpt_index.prompts.prompt_type import PromptType
class TextToClusterDefinitionPrompt(Prompt):
prompt_type: PromptType = PromptType.CLUSTER_DEFINITION
input_variables: list[str] = ["query_str", "schema"]
QDRANT_CLUSTERDEFINTION_SCHEMA = '''apiVersion: apps.kubeblocks.io/v1alpha1
kind: ClusterDefinition
metadata:
name: qdrant-standalone
labels:
{{- include "qdrant.labels" . | nindent 4 }}
spec:
type: qdrant
connectionCredential:
username: root
password: "$(RANDOM_PASSWD)"
endpoint: "$(SVC_FQDN):$(SVC_PORT_tcp-qdrant)"
host: "$(SVC_FQDN)"
port: "$(SVC_PORT_tcp-qdrant)"
componentDefs:
- name: qdrant
workloadType: Stateful
characterType: qdrant
probes:
monitor:
builtIn: false
exporterConfig:
scrapePath: /metrics
scrapePort: 9187
logConfigs:
configSpecs:
- name: qdrant-standalone-config-template
templateRef: qdrant-standalone-config-template
volumeName: qdrant-config
namespace: {{ .Release.Namespace }}
service:
ports:
- name: tcp-qdrant
port: 6333
targetPort: tcp-qdrant
- name: grpc-qdrant
port: 6334
targetPort: grpc-qdrant
volumeTypes:
- name: data
type: data
podSpec:
securityContext:
fsGroup: 1001
containers:
- name: qdrant
imagePullPolicy: {{default .Values.images.pullPolicy "IfNotPresent"}}
securityContext:
runAsUser: 0
livenessProbe:
failureThreshold: 3
httpGet:
path: /
port: 6333
scheme: HTTP
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 10
readinessProbe:
failureThreshold: 2
httpGet:
path: /
port: 6333
scheme: HTTP
initialDelaySeconds: 5
periodSeconds: 15
successThreshold: 1
timeoutSeconds: 3
startupProbe:
failureThreshold: 18
httpGet:
path: /
port: 6333
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 3
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /qdrant/config/
name: qdrant-config
- mountPath: /qdrant/storage
name: data
dnsPolicy: ClusterFirst
enableServiceLinks: true
ports:
- name: tcp-qdrant
containerPort: 6333
- name: grpc-qdrant
containerPort: 6334
- name: tcp-metrics
containerPort: 9091
command:
- ./qdrant
env:'''
DEFAULT_TEXT_TO_CD_TMPL = (
"Given an input question, generate the answer in YAML format."
"The cluster definition YAML is a dialect in YAML format. "
"A cluster definition YAML has fixed schema, or specification."
"Cluster defintion declares the components of a database cluster instance."
"Each component has a defintion in componentDef spec. "
"The major part of a component defintion is k8s podSpec. \n"
"Here we give an example of generating a cluster definition YAML: "
"Question: Generate a qdrant cluster defintion for me, "
"the fields in the spec can be the default values of qdrant."
"Answer: the qdrant clusterdefintion YAML is {schema}\n"
"Use the following format:\n"
"Question: Question here\n"
"Answer: Final answer here\n"
"Question: {query_str}\n"
)
DEFAULT_TEXT_TO_CD_PROMPT = TextToClusterDefinitionPrompt(
#DEFAULT_TEXT_TO_CD_TMPL, stop_token="\nResult:"
DEFAULT_TEXT_TO_CD_TMPL
)
def parse_arguments():
parser = argparse.ArgumentParser(description="Query Engine for KubeBlocks.")
parser.add_argument("query_str", type=str, help="Query string for ask.")
return parser.parse_args()
def main():
args = parse_arguments()
query_str = args.query_str
print("query:", query_str)
# set env for OpenAI api key
# os.environ['OPENAI_API_KEY'] = ""
# set log level
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))# define LLM
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-003", max_tokens=3000))
# define prompt helper
# set maximum input size
max_input_size = 32768
# set number of output tokens
num_output = 32768
# set maximum chunk overlap
max_chunk_overlap = 200
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
response, s = service_context.llm_predictor.predict (
DEFAULT_TEXT_TO_CD_PROMPT,
query_str=query_str,
schema=QDRANT_CLUSTERDEFINTION_SCHEMA,
)
# print(s)
print(response)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | apecloud/llama_index | llama_index~indices~tree~select_leaf_retriever.py | """Leaf query mechanism."""
import logging
from typing import Any, Dict, List, Optional, cast
from langchain.input import print_text
from llama_index.data_structs.node import Node, NodeWithScore
from llama_index.indices.base_retriever import BaseRetriever
from llama_index.indices.query.schema import QueryBundle
from llama_index.indices.response import get_response_builder
from llama_index.indices.tree.base import TreeIndex
from llama_index.indices.tree.utils import get_numbered_text_from_nodes
from llama_index.indices.utils import (
extract_numbers_given_response,
get_sorted_node_list,
)
from llama_index.prompts.default_prompt_selectors import DEFAULT_REFINE_PROMPT_SEL
from llama_index.prompts.default_prompts import (
DEFAULT_QUERY_PROMPT,
DEFAULT_QUERY_PROMPT_MULTIPLE,
DEFAULT_TEXT_QA_PROMPT,
)
from llama_index.prompts.prompts import (
QuestionAnswerPrompt,
RefinePrompt,
TreeSelectMultiplePrompt,
TreeSelectPrompt,
)
from llama_index.response.schema import Response
from llama_index.token_counter.token_counter import llm_token_counter
from llama_index.utils import truncate_text
logger = logging.getLogger(__name__)
def get_text_from_node(
node: Node,
level: Optional[int] = None,
verbose: bool = False,
) -> str:
"""Get text from node."""
level_str = "" if level is None else f"[Level {level}]"
fmt_text_chunk = truncate_text(node.get_text(), 50)
logger.debug(f">{level_str} Searching in chunk: {fmt_text_chunk}")
response_txt = node.get_text()
fmt_response = truncate_text(response_txt, 200)
if verbose:
print_text(f">{level_str} Got node text: {fmt_response}\n", color="blue")
return response_txt
class TreeSelectLeafRetriever(BaseRetriever):
"""Tree select leaf retriever.
This class traverses the index graph and searches for a leaf node that can best
answer the query.
Args:
query_template (Optional[TreeSelectPrompt]): Tree Select Query Prompt
(see :ref:`Prompt-Templates`).
query_template_multiple (Optional[TreeSelectMultiplePrompt]): Tree Select
Query Prompt (Multiple)
(see :ref:`Prompt-Templates`).
child_branch_factor (int): Number of child nodes to consider at each level.
If child_branch_factor is 1, then the query will only choose one child node
to traverse for any given parent node.
If child_branch_factor is 2, then the query will choose two child nodes.
"""
def __init__(
self,
index: TreeIndex,
query_template: Optional[TreeSelectPrompt] = None,
text_qa_template: Optional[QuestionAnswerPrompt] = None,
refine_template: Optional[RefinePrompt] = None,
query_template_multiple: Optional[TreeSelectMultiplePrompt] = None,
child_branch_factor: int = 1,
verbose: bool = False,
**kwargs: Any,
):
self._index = index
self._index_struct = index.index_struct
self._docstore = index.docstore
self._service_context = index.service_context
self._text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT
self._refine_template = refine_template or DEFAULT_REFINE_PROMPT_SEL
self.query_template = query_template or DEFAULT_QUERY_PROMPT
self.query_template_multiple = (
query_template_multiple or DEFAULT_QUERY_PROMPT_MULTIPLE
)
self.child_branch_factor = child_branch_factor
self._verbose = verbose
def _query_with_selected_node(
self,
selected_node: Node,
query_bundle: QueryBundle,
prev_response: Optional[str] = None,
level: int = 0,
) -> str:
"""Get response for selected node.
If not leaf node, it will recursively call _query on the child nodes.
If prev_response is provided, we will update prev_response with the answer.
"""
query_str = query_bundle.query_str
if len(self._index_struct.get_children(selected_node)) == 0:
response_builder = get_response_builder(
self._service_context,
self._text_qa_template,
self._refine_template,
)
# use response builder to get answer from node
node_text = get_text_from_node(selected_node, level=level)
cur_response = response_builder.get_response(
query_str, [node_text], prev_response=prev_response
)
cur_response = cast(str, cur_response)
logger.debug(f">[Level {level}] Current answer response: {cur_response} ")
else:
cur_response = self._query_level(
self._index_struct.get_children(selected_node),
query_bundle,
level=level + 1,
)
if prev_response is None:
return cur_response
else:
context_msg = selected_node.get_text()
(
cur_response,
formatted_refine_prompt,
) = self._service_context.llm_predictor.predict(
self._refine_template,
query_str=query_str,
existing_answer=prev_response,
context_msg=context_msg,
)
logger.debug(f">[Level {level}] Refine prompt: {formatted_refine_prompt}")
logger.debug(f">[Level {level}] Current refined response: {cur_response} ")
return cur_response
def _query_level(
self,
cur_node_ids: Dict[int, str],
query_bundle: QueryBundle,
level: int = 0,
) -> str:
"""Answer a query recursively."""
query_str = query_bundle.query_str
cur_nodes = {
index: self._docstore.get_node(node_id)
for index, node_id in cur_node_ids.items()
}
cur_node_list = get_sorted_node_list(cur_nodes)
if len(cur_node_list) == 1:
logger.debug(f">[Level {level}] Only one node left. Querying node.")
return self._query_with_selected_node(
cur_node_list[0], query_bundle, level=level
)
elif self.child_branch_factor == 1:
query_template = self.query_template.partial_format(
num_chunks=len(cur_node_list), query_str=query_str
)
text_splitter = (
self._service_context.prompt_helper.get_text_splitter_given_prompt(
prompt=query_template,
num_chunks=len(cur_node_list),
)
)
numbered_node_text = get_numbered_text_from_nodes(
cur_node_list, text_splitter=text_splitter
)
(
response,
formatted_query_prompt,
) = self._service_context.llm_predictor.predict(
query_template,
context_list=numbered_node_text,
)
else:
query_template_multiple = self.query_template_multiple.partial_format(
num_chunks=len(cur_node_list),
query_str=query_str,
branching_factor=self.child_branch_factor,
)
text_splitter = (
self._service_context.prompt_helper.get_text_splitter_given_prompt(
prompt=query_template_multiple,
num_chunks=len(cur_node_list),
)
)
numbered_node_text = get_numbered_text_from_nodes(
cur_node_list, text_splitter=text_splitter
)
(
response,
formatted_query_prompt,
) = self._service_context.llm_predictor.predict(
query_template_multiple,
context_list=numbered_node_text,
)
logger.debug(
f">[Level {level}] current prompt template: {formatted_query_prompt}"
)
self._service_context.llama_logger.add_log(
{"formatted_prompt_template": formatted_query_prompt, "level": level}
)
debug_str = f">[Level {level}] Current response: {response}"
logger.debug(debug_str)
if self._verbose:
print_text(debug_str, end="\n")
numbers = extract_numbers_given_response(response, n=self.child_branch_factor)
if numbers is None:
debug_str = (
f">[Level {level}] Could not retrieve response - no numbers present"
)
logger.debug(debug_str)
if self._verbose:
print_text(debug_str, end="\n")
# just join text from current nodes as response
return response
result_response = None
for number_str in numbers:
number = int(number_str)
if number > len(cur_node_list):
logger.debug(
f">[Level {level}] Invalid response: {response} - "
f"number {number} out of range"
)
return response
# number is 1-indexed, so subtract 1
selected_node = cur_node_list[number - 1]
info_str = (
f">[Level {level}] Selected node: "
f"[{number}]/[{','.join([str(int(n)) for n in numbers])}]"
)
logger.info(info_str)
if self._verbose:
print_text(info_str, end="\n")
debug_str = " ".join(selected_node.get_text().splitlines())
full_debug_str = (
f">[Level {level}] Node "
f"[{number}] Summary text: "
f"{ selected_node.get_text() }"
)
logger.debug(full_debug_str)
if self._verbose:
print_text(full_debug_str, end="\n")
result_response = self._query_with_selected_node(
selected_node,
query_bundle,
prev_response=result_response,
level=level,
)
# result_response should not be None
return cast(str, result_response)
def _query(self, query_bundle: QueryBundle) -> Response:
"""Answer a query."""
# NOTE: this overrides the _query method in the base class
info_str = f"> Starting query: {query_bundle.query_str}"
logger.info(info_str)
if self._verbose:
print_text(info_str, end="\n")
response_str = self._query_level(
self._index_struct.root_nodes,
query_bundle,
level=0,
).strip()
# TODO: fix source nodes
return Response(response_str, source_nodes=[])
def _select_nodes(
self,
cur_node_list: List[Node],
query_bundle: QueryBundle,
level: int = 0,
) -> List[Node]:
query_str = query_bundle.query_str
if self.child_branch_factor == 1:
query_template = self.query_template.partial_format(
num_chunks=len(cur_node_list), query_str=query_str
)
text_splitter = (
self._service_context.prompt_helper.get_text_splitter_given_prompt(
prompt=query_template,
num_chunks=len(cur_node_list),
)
)
numbered_node_text = get_numbered_text_from_nodes(
cur_node_list, text_splitter=text_splitter
)
(
response,
formatted_query_prompt,
) = self._service_context.llm_predictor.predict(
query_template,
context_list=numbered_node_text,
)
else:
query_template_multiple = self.query_template_multiple.partial_format(
num_chunks=len(cur_node_list),
query_str=query_str,
branching_factor=self.child_branch_factor,
)
text_splitter = (
self._service_context.prompt_helper.get_text_splitter_given_prompt(
prompt=query_template_multiple,
num_chunks=len(cur_node_list),
)
)
numbered_node_text = get_numbered_text_from_nodes(
cur_node_list, text_splitter=text_splitter
)
(
response,
formatted_query_prompt,
) = self._service_context.llm_predictor.predict(
query_template_multiple,
context_list=numbered_node_text,
)
logger.debug(
f">[Level {level}] current prompt template: {formatted_query_prompt}"
)
self._service_context.llama_logger.add_log(
{"formatted_prompt_template": formatted_query_prompt, "level": level}
)
debug_str = f">[Level {level}] Current response: {response}"
logger.debug(debug_str)
if self._verbose:
print_text(debug_str, end="\n")
numbers = extract_numbers_given_response(response, n=self.child_branch_factor)
if numbers is None:
debug_str = (
f">[Level {level}] Could not retrieve response - no numbers present"
)
logger.debug(debug_str)
if self._verbose:
print_text(debug_str, end="\n")
# just join text from current nodes as response
return []
selected_nodes = []
for number_str in numbers:
number = int(number_str)
if number > len(cur_node_list):
logger.debug(
f">[Level {level}] Invalid response: {response} - "
f"number {number} out of range"
)
continue
# number is 1-indexed, so subtract 1
selected_node = cur_node_list[number - 1]
info_str = (
f">[Level {level}] Selected node: "
f"[{number}]/[{','.join([str(int(n)) for n in numbers])}]"
)
logger.info(info_str)
if self._verbose:
print_text(info_str, end="\n")
debug_str = " ".join(selected_node.get_text().splitlines())
full_debug_str = (
f">[Level {level}] Node "
f"[{number}] Summary text: "
f"{ selected_node.get_text() }"
)
logger.debug(full_debug_str)
if self._verbose:
print_text(full_debug_str, end="\n")
selected_nodes.append(selected_node)
return selected_nodes
def _retrieve_level(
self,
cur_node_ids: Dict[int, str],
query_bundle: QueryBundle,
level: int = 0,
) -> List[Node]:
"""Answer a query recursively."""
cur_nodes = {
index: self._docstore.get_node(node_id)
for index, node_id in cur_node_ids.items()
}
cur_node_list = get_sorted_node_list(cur_nodes)
if len(cur_node_list) > self.child_branch_factor:
selected_nodes = self._select_nodes(
cur_node_list,
query_bundle,
level=level,
)
else:
selected_nodes = cur_node_list
children_nodes = {}
for node in selected_nodes:
node_dict = self._index_struct.get_children(node)
children_nodes.update(node_dict)
if len(children_nodes) == 0:
# NOTE: leaf level
return selected_nodes
else:
return self._retrieve_level(children_nodes, query_bundle, level + 1)
@llm_token_counter("retrieve")
def _retrieve(
self,
query_bundle: QueryBundle,
) -> List[NodeWithScore]:
"""Get nodes for response."""
nodes = self._retrieve_level(
self._index_struct.root_nodes,
query_bundle,
level=0,
)
return [NodeWithScore(node) for node in nodes]
| [] |
2024-01-10 | apecloud/llama_index | apecloud-chat~chatbox.py | import sys
import os
import torch
import openai
import argparse
import logging
from langchain import OpenAI
from langchain.llms.base import LLM
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.agents import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent
from langchain.agents.agent_types import AgentType
from gpt_index.langchain_helpers.agents import LlamaToolkit, create_llama_agent, create_llama_chat_agent, IndexToolConfig, GraphToolConfig
from gpt_index.indices.query.query_transform.base import DecomposeQueryTransform
from gpt_index import GPTSimpleVectorIndex, SimpleDirectoryReader, QuestionAnswerPrompt
from gpt_index import LLMPredictor, PromptHelper, ServiceContext
from gpt_index.indices.composability import ComposableGraph
from gpt_index import GPTListIndex, SimpleDirectoryReader
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.embeddings.langchain import LangchainEmbedding
from transformers import pipeline
from read_key import read_key_from_file
def parse_arguments():
parser = argparse.ArgumentParser(description="Query Engine for KubeBlocks.")
parser.add_argument("key_file", type=str, help="Key file for OpenAI_API_KEY.")
return parser.parse_args()
def main():
args = parse_arguments()
key_file = args.key_file
openai_api_key = read_key_from_file(key_file)
# set env for OpenAI api key
os.environ['OPENAI_API_KEY'] = openai_api_key
print(f"OPENAI_API_KEY:{openai_api_key}")
# set log level
logging.basicConfig(stream=sys.stdout, level=logging.ERROR)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))# define LLM
# initialize the index set with codes and documents
index_set = {}
index_set["Docs"] = GPTSimpleVectorIndex.load_from_disk('doc.json')
index_set["Code"] = GPTSimpleVectorIndex.load_from_disk('code.json')
index_set["Config"] = GPTSimpleVectorIndex.load_from_disk('config.json')
# initialize summary for each index
index_summaries = ["design and user documents for kubeblocks", "codes of implementations of kubeblocks", "config for kubeblocks"]
# define a LLMPredictor
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-003"))
# define prompt helper
# set maximum input size
max_input_size = 4096
# set number of output tokens
num_output = 4096
# set maximum chunk overlap
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
# define a list index over the vector indices
# allow us to synthesize information across each index
graph = ComposableGraph.from_indices(
root_index_cls = GPTListIndex,
children_indices = [index_set["Docs"], index_set["Code"], index_set["Config"]],
index_summaries = index_summaries,
service_context = service_context,
)
decompose_transform = DecomposeQueryTransform(
llm_predictor, verbose=True
)
query_configs = [
{
"index_struct_type": "simple_dict",
"query_mode": "default",
"query_kwargs":{
"similarity_top_k": 3,
},
"query_transform": decompose_transform
},
{
"index_struct_type": "list",
"query_mode": "default",
"query_kwargs": {
"response_mode": "tree_summarize",
"verbose": True
}
},
]
# graph config
graph_config = GraphToolConfig(
graph = graph,
name = f"Graph Index",
description = "useful when you want to answer queries that about how to use and develop with kubeblocks",
query_configs = query_configs,
tool_kwargs = {"return_direct": True, "return_sources": True},
return_sources = True
)
# define toolkit
index_configs = []
tool_config = IndexToolConfig(
index = index_set["Docs"],
name = f"Vector Index Docs",
description = '''answer questions about how to install, deploy, maintain kubeblocks; \
questions about clusterdefinition, clusterversion, cluster; \
qusetions about lifecycle, monitoring, backup, safety for all kinds of atabases''',
index_query_kwargs = {"similarity_top_k": 3},
tool_kwargs = {"return_direct": True, "return_sources": True}
)
index_configs.append(tool_config)
tool_config = IndexToolConfig(
index = index_set["Code"],
name = f"Vector Index Code",
description = '''answer questions about the code implementations of kubeblocks;
questions about the code of clusterdefinition, clusterversion, cluster;
qusetions about lifecycle, monitoring, backup, safety for all kinds of atabases''',
index_query_kwargs = {"similarity_top_k": 3},
tool_kwargs = {"return_direct": True, "return_sources": True}
)
index_configs.append(tool_config)
tool_config = IndexToolConfig(
index = index_set["Config"],
name = f"Vector Index Config",
description = '''answer questions about the generation of configs in kubeblocks;
questions about the configs of clusterdefinition, clusterversion, cluster;
backuppolicy, backup, RBAC, OpsRequest, podSpec, containers, volumeClaimTemplates, volumes''',
index_query_kwargs = {"similarity_top_k": 3},
tool_kwargs = {"return_direct": True, "return_sources": True}
)
index_configs.append(tool_config)
tookit = LlamaToolkit(
index_configs = index_configs,
graph_configs = [graph_config]
)
# create the llama agent
memory = ConversationBufferMemory(memory_key="chat_history")
llm = OpenAI(temperature=0)
agent_chain = create_llama_agent(
tookit,
llm,
#agent = AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
memory = memory,
verbose = True
)
while True:
text_input = input("User:")
response = agent_chain.run(input=text_input)
print(f"Agent: {response}")
if __name__ == "__main__":
main()
| [] |
2024-01-10 | apecloud/llama_index | llama_index~program~__init__.py | from llama_index.program.guidance_program import GuidancePydanticProgram
__all__ = ["GuidancePydanticProgram"]
| [] |
2024-01-10 | apecloud/llama_index | llama_index~output_parsers~guardrails.py | """Guardrails output parser.
See https://github.com/ShreyaR/guardrails.
"""
try:
from guardrails import Guard
except ImportError:
Guard = None
PromptCallable = None
from copy import deepcopy
from typing import Any, Callable, Optional
from langchain.llms.base import BaseLLM
from llama_index.output_parsers.base import BaseOutputParser
def get_callable(llm: Optional[BaseLLM]) -> Optional[Callable]:
"""Get callable."""
if llm is None:
return None
return llm.__call__
class GuardrailsOutputParser(BaseOutputParser):
"""Guardrails output parser."""
def __init__(
self,
guard: Guard,
llm: Optional[BaseLLM] = None,
format_key: Optional[str] = None,
):
"""Initialize a Guardrails output parser."""
self.guard: Guard = guard
self.llm = llm
self.format_key = format_key
@classmethod
def from_rail(
cls, rail: str, llm: Optional[BaseLLM] = None
) -> "GuardrailsOutputParser":
"""From rail."""
if Guard is None:
raise ImportError(
"Guardrails is not installed. Run `pip install guardrails-ai`. "
)
return cls(Guard.from_rail(rail), llm=llm)
@classmethod
def from_rail_string(
cls, rail_string: str, llm: Optional[BaseLLM] = None
) -> "GuardrailsOutputParser":
"""From rail string."""
if Guard is None:
raise ImportError(
"Guardrails is not installed. Run `pip install guardrails-ai`. "
)
return cls(Guard.from_rail_string(rail_string), llm=llm)
def parse(
self,
output: str,
llm: Optional[BaseLLM] = None,
num_reasks: Optional[int] = 1,
*args: Any,
**kwargs: Any
) -> Any:
"""Parse, validate, and correct errors programmatically."""
llm = llm or self.llm
llm_fn = get_callable(llm)
return self.guard.parse(
output, llm_api=llm_fn, num_reasks=num_reasks, *args, **kwargs
)
def format(self, query: str) -> str:
"""Format a query with structured output formatting instructions."""
output_schema_text = deepcopy(self.guard.rail.prompt)
# Add format instructions here.
format_instructions_tmpl = self.guard.raw_prompt.format_instructions
# NOTE: output_schema is fixed
format_instructions = format_instructions_tmpl.format(
output_schema=output_schema_text
)
if self.format_key is not None:
fmt_query = query.format(**{self.format_key: format_instructions})
else:
fmt_query = query + "\n\n" + format_instructions
return fmt_query
| [
"None"
] |
2024-01-10 | apecloud/llama_index | llama_index~indices~service_context.py | import dataclasses
import logging
from dataclasses import dataclass
from typing import Optional
from langchain.base_language import BaseLanguageModel
import llama_index
from llama_index.callbacks.base import CallbackManager
from llama_index.embeddings.base import BaseEmbedding
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.indices.prompt_helper import PromptHelper
from llama_index.langchain_helpers.chain_wrapper import LLMPredictor
from llama_index.llm_predictor.base import BaseLLMPredictor, LLMMetadata
from llama_index.logger import LlamaLogger
from llama_index.node_parser.interface import NodeParser
from llama_index.node_parser.simple import SimpleNodeParser
logger = logging.getLogger(__name__)
def _get_default_node_parser(
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
callback_manager: Optional[CallbackManager] = None,
) -> NodeParser:
"""Get default node parser."""
return SimpleNodeParser.from_defaults(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager,
)
def _get_default_prompt_helper(
llm_metadata: LLMMetadata,
context_window: Optional[int] = None,
num_output: Optional[int] = None,
) -> PromptHelper:
"""Get default prompt helper."""
if context_window is not None:
llm_metadata = dataclasses.replace(llm_metadata, context_window=context_window)
if num_output is not None:
llm_metadata = dataclasses.replace(llm_metadata, num_output=num_output)
return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata)
@dataclass
class ServiceContext:
"""Service Context container.
The service context container is a utility container for LlamaIndex
index and query classes. It contains the following:
- llm_predictor: BaseLLMPredictor
- prompt_helper: PromptHelper
- embed_model: BaseEmbedding
- node_parser: NodeParser
- llama_logger: LlamaLogger (deprecated)
- callback_manager: CallbackManager
"""
llm_predictor: BaseLLMPredictor
prompt_helper: PromptHelper
embed_model: BaseEmbedding
node_parser: NodeParser
llama_logger: LlamaLogger
callback_manager: CallbackManager
@classmethod
def from_defaults(
cls,
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[BaseLanguageModel] = None,
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[BaseEmbedding] = None,
node_parser: Optional[NodeParser] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Create a ServiceContext from defaults.
If an argument is specified, then use the argument value provided for that
parameter. If an argument is not specified, then use the default value.
You can change the base defaults by setting llama_index.global_service_context
to a ServiceContext object with your desired settings.
Args:
llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor
prompt_helper (Optional[PromptHelper]): PromptHelper
embed_model (Optional[BaseEmbedding]): BaseEmbedding
node_parser (Optional[NodeParser]): NodeParser
llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated)
chunk_size (Optional[int]): chunk_size
callback_manager (Optional[CallbackManager]): CallbackManager
Deprecated Args:
chunk_size_limit (Optional[int]): renamed to chunk_size
"""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size instead"
)
chunk_size = chunk_size_limit
if llama_index.global_service_context is not None:
return cls.from_service_context(
llama_index.global_service_context,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
node_parser=node_parser,
llama_logger=llama_logger,
callback_manager=callback_manager,
chunk_size=chunk_size,
chunk_size_limit=chunk_size_limit,
)
callback_manager = callback_manager or CallbackManager([])
if llm is not None:
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or LLMPredictor()
llm_predictor.callback_manager = callback_manager
# NOTE: the embed_model isn't used in all indices
embed_model = embed_model or OpenAIEmbedding()
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.get_llm_metadata(),
context_window=context_window,
num_output=num_output,
)
node_parser = node_parser or _get_default_node_parser(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager,
)
llama_logger = llama_logger or LlamaLogger()
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
node_parser=node_parser,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@classmethod
def from_service_context(
cls,
service_context: "ServiceContext",
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[BaseLanguageModel] = None,
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[BaseEmbedding] = None,
node_parser: Optional[NodeParser] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Instantiate a new service context using a previous as the defaults."""
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size",
DeprecationWarning,
)
chunk_size = chunk_size_limit
callback_manager = callback_manager or service_context.callback_manager
if llm is not None:
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or service_context.llm_predictor
llm_predictor.callback_manager = callback_manager
# NOTE: the embed_model isn't used in all indices
embed_model = embed_model or service_context.embed_model
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.get_llm_metadata(),
context_window=context_window,
num_output=num_output,
)
node_parser = node_parser or service_context.node_parser
if chunk_size is not None or chunk_overlap is not None:
node_parser = _get_default_node_parser(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager,
)
llama_logger = llama_logger or service_context.llama_logger
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
node_parser=node_parser,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
def set_global_service_context(service_context: Optional[ServiceContext]) -> None:
"""Helper function to set the global service context."""
llama_index.global_service_context = service_context
| [] |
2024-01-10 | apecloud/llama_index | llama_index~prompts~default_prompt_selectors.py | """Prompt selectors."""
from langchain.chains.prompt_selector import ConditionalPromptSelector, is_chat_model
from llama_index.prompts.chat_prompts import (
CHAT_REFINE_PROMPT,
CHAT_REFINE_TABLE_CONTEXT_PROMPT,
)
from llama_index.prompts.default_prompts import (
DEFAULT_REFINE_PROMPT,
DEFAULT_REFINE_TABLE_CONTEXT_PROMPT,
)
from llama_index.prompts.prompt_type import PromptType
from llama_index.prompts.prompts import RefinePrompt, RefineTableContextPrompt
DEFAULT_REFINE_PROMPT_SEL_LC = ConditionalPromptSelector(
default_prompt=DEFAULT_REFINE_PROMPT.get_langchain_prompt(),
conditionals=[(is_chat_model, CHAT_REFINE_PROMPT.get_langchain_prompt())],
)
DEFAULT_REFINE_PROMPT_SEL = RefinePrompt(
langchain_prompt_selector=DEFAULT_REFINE_PROMPT_SEL_LC,
prompt_type=PromptType.REFINE,
)
DEFAULT_REFINE_TABLE_CONTEXT_PROMPT_SEL_LC = ConditionalPromptSelector(
default_prompt=DEFAULT_REFINE_TABLE_CONTEXT_PROMPT.get_langchain_prompt(),
conditionals=[
(is_chat_model, CHAT_REFINE_TABLE_CONTEXT_PROMPT.get_langchain_prompt())
],
)
DEFAULT_REFINE_TABLE_CONTEXT_PROMPT_SEL = RefineTableContextPrompt(
langchain_prompt_selector=DEFAULT_REFINE_TABLE_CONTEXT_PROMPT_SEL_LC,
prompt_type=PromptType.TABLE_CONTEXT,
)
| [] |
2024-01-10 | apecloud/llama_index | apecloud-chat~chatbox_qdrant.py | import sys
import os
import argparse
import logging
from langchain import OpenAI
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent
from langchain.agents.agent_types import AgentType
from gpt_index.langchain_helpers.agents import LlamaToolkit, create_llama_agent, create_llama_chat_agent, IndexToolConfig, GraphToolConfig
from gpt_index.indices.query.query_transform.base import DecomposeQueryTransform
from gpt_index import GPTSimpleVectorIndex, SimpleDirectoryReader, QuestionAnswerPrompt
from gpt_index import LLMPredictor, PromptHelper, ServiceContext
from gpt_index.indices.composability import ComposableGraph
from gpt_index.indices.base import IS
from gpt_index import GPTListIndex, SimpleDirectoryReader
import qdrant_client
from gpt_index.vector_stores.qdrant import QdrantVectorStore
from gpt_index.data_structs.data_structs_v2 import V2IndexStruct,IndexDict
from gpt_index import GPTQdrantIndex
from read_key import read_key_from_file
def parse_arguments():
parser = argparse.ArgumentParser(description="Query Engine for KubeBlocks.")
parser.add_argument("key_file", type=str, help="Key file for OpenAI_API_KEY.")
return parser.parse_args()
def main():
args = parse_arguments()
key_file = args.key_file
openai_api_key = read_key_from_file(key_file)
# set env for OpenAI api key
os.environ['OPENAI_API_KEY'] = openai_api_key
print(f"OPENAI_API_KEY:{openai_api_key}")
# set log level
logging.basicConfig(stream=sys.stdout, level=logging.ERROR)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))# define LLM
# initialize the qdrant client
client = qdrant_client.QdrantClient(
url="http://localhost",
port=6333,
grpc_port=6334,
prefer_grpc=False,
https=False,
timeout=300
)
# initialize the index set with codes and documents
index_set = {}
index_struct = IndexDict(summary=None)
index_set["Docs"] = GPTQdrantIndex(client=client, collection_name="kubeblocks_doc", index_struct=index_struct)
index_set["Code"] = GPTQdrantIndex(client=client, collection_name="kubeblocks_code", index_struct=index_struct)
index_set["Config"] = GPTQdrantIndex(client=client, collection_name="kubeblocks_config", index_struct=index_struct)
# initialize summary for each index
index_summaries = ["design and user documents for kubeblocks", "codes of implementations of kubeblocks", "config for kubeblocks"]
# define a LLMPredictor
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-003"))
# define prompt helper
# set maximum input size
max_input_size = 4096
# set number of output tokens
num_output = 4096
# set maximum chunk overlap
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
# define a list index over the vector indices
# allow us to synthesize information across each index
graph = ComposableGraph.from_indices(
root_index_cls = GPTListIndex,
children_indices = [index_set["Docs"], index_set["Code"], index_set["Config"]],
index_summaries = index_summaries,
service_context = service_context,
)
decompose_transform = DecomposeQueryTransform(
llm_predictor, verbose=True
)
query_configs = [
{
"index_struct_type": "simple_dict",
"query_mode": "default",
"query_kwargs":{
"similarity_top_k": 3,
},
"query_transform": decompose_transform
},
{
"index_struct_type": "list",
"query_mode": "default",
"query_kwargs": {
"response_mode": "tree_summarize",
"verbose": True
}
},
]
# graph config
graph_config = GraphToolConfig(
graph = graph,
name = f"Graph Index",
description = "useful when you want to answer queries that about how to use and develop with kubeblocks",
query_configs = query_configs,
tool_kwargs = {"return_direct": True, "return_sources": True},
return_sources=True
)
# define toolkit
index_configs = []
tool_config = IndexToolConfig(
index = index_set["Docs"],
name = f"Vector Index Docs",
description = '''answer questions about how to install, deploy, maintain kubeblocks; \
questions about clusterdefinition, clusterversion, cluster; \
qusetions about lifecycle, monitoring, backup, safety for all kinds of atabases''',
index_query_kwargs = {"similarity_top_k": 3},
tool_kwargs = {"return_direct": True, "return_sources": True}
)
index_configs.append(tool_config)
tool_config = IndexToolConfig(
index = index_set["Code"],
name = f"Vector Index Code",
description = '''answer questions about the code implementations of kubeblocks;
questions about the code of clusterdefinition, clusterversion, cluster;
qusetions about lifecycle, monitoring, backup, safety for all kinds of atabases''',
index_query_kwargs = {"similarity_top_k": 3},
tool_kwargs = {"return_direct": True, "return_sources": True}
)
index_configs.append(tool_config)
tool_config = IndexToolConfig(
index = index_set["Config"],
name = f"Vector Index Config",
description = '''answer questions about the generation of configs in kubeblocks;
questions about the configs of clusterdefinition, clusterversion, cluster;
backuppolicy, backup, RBAC, OpsRequest, podSpec, containers, volumeClaimTemplates, volumes''',
index_query_kwargs = {"similarity_top_k": 3},
tool_kwargs = {"return_direct": True, "return_sources": True}
)
index_configs.append(tool_config)
tookit = LlamaToolkit(
index_configs = index_configs,
graph_configs = [graph_config]
)
# create the llama agent
memory = ConversationBufferMemory(memory_key="chat_history")
llm = OpenAI(temperature=0)
agent_chain = create_llama_agent(
tookit,
llm,
#agent = AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION,
memory = memory,
verbose = True
)
while True:
text_input = input("User:")
response = agent_chain.run(input=text_input)
print(f"Agent: {response}")
if __name__ == "__main__":
main()
| [] |
2024-01-10 | apecloud/llama_index | llama_index~prompts~guidance_utils.py | from typing import TYPE_CHECKING, Optional, Type, TypeVar
from llama_index.output_parsers.base import OutputParserException
from llama_index.output_parsers.utils import parse_json_markdown
if TYPE_CHECKING:
from guidance import Program
from pydantic import BaseModel
def convert_to_handlebars(text: str) -> str:
"""Convert a python format string to handlebars-style template.
In python format string, single braces {} are used for variable substitution,
and double braces {{}} are used for escaping actual braces (e.g. for JSON dict)
In handlebars template, double braces {{}} are used for variable substitution,
and single braces are actual braces (e.g. for JSON dict)
This is currently only used to convert a python format string based prompt template
to a guidance program template.
"""
# Replace double braces with a temporary placeholder
var_left = "TEMP_BRACE_LEFT"
var_right = "TEMP_BRACE_RIGHT"
text = text.replace("{{", var_left)
text = text.replace("}}", var_right)
# Replace single braces with double braces
text = text.replace("{", "{{")
text = text.replace("}", "}}")
# Replace the temporary placeholder with single braces
text = text.replace(var_left, "{")
text = text.replace(var_right, "}")
return text
def wrap_json_markdown(text: str) -> str:
"""Wrap text in json markdown formatting block."""
return "```json\n" + text + "\n```"
def pydantic_to_guidance_output_template(cls: Type[BaseModel]) -> str:
"""Convert a pydantic model to guidance output template."""
return json_schema_to_guidance_output_template(cls.schema(), root=cls.schema())
def pydantic_to_guidance_output_template_markdown(cls: Type[BaseModel]) -> str:
"""Convert a pydantic model to guidance output template wrapped in json markdown."""
output = json_schema_to_guidance_output_template(cls.schema(), root=cls.schema())
return wrap_json_markdown(output)
def json_schema_to_guidance_output_template(
schema: dict,
key: Optional[str] = None,
indent: int = 0,
root: Optional[dict] = None,
use_pattern_control: bool = False,
) -> str:
"""Convert a json schema to guidance output template.
Implementation based on https://github.com/microsoft/guidance/\
blob/main/notebooks/applications/jsonformer.ipynb
Modified to support nested pydantic models.
"""
out = ""
if "type" not in schema and "$ref" in schema:
if root is None:
raise ValueError("Must specify root schema for nested object")
ref = schema["$ref"]
model = ref.split("/")[-1]
return json_schema_to_guidance_output_template(
root["definitions"][model], key, indent, root
)
if schema["type"] == "object":
out += " " * indent + "{\n"
for k, v in schema["properties"].items():
out += (
" " * (indent + 1)
+ f'"{k}"'
+ ": "
+ json_schema_to_guidance_output_template(v, k, indent + 1, root)
+ ",\n"
)
out += " " * indent + "}"
return out
elif schema["type"] == "array":
if key is None:
raise ValueError("Key should not be None")
if "max_items" in schema:
extra_args = f" max_iterations={schema['max_items']}"
else:
extra_args = ""
return (
"[{{#geneach '"
+ key
+ "' stop=']'"
+ extra_args
+ "}}{{#unless @first}}, {{/unless}}"
+ json_schema_to_guidance_output_template(schema["items"], "this", 0, root)
+ "{{/geneach}}]"
)
elif schema["type"] == "string":
if key is None:
raise ValueError("key should not be None")
return "\"{{gen '" + key + "' stop='\"'}}\""
elif schema["type"] in ["integer", "number"]:
if key is None:
raise ValueError("key should not be None")
if use_pattern_control:
return "{{gen '" + key + "' pattern='[0-9\\.]' stop=','}}"
else:
return "\"{{gen '" + key + "' stop='\"'}}\""
elif schema["type"] == "boolean":
if key is None:
raise ValueError("key should not be None")
return "{{#select '" + key + "'}}True{{or}}False{{/select}}"
else:
schema_type = schema["type"]
raise ValueError(f"Unknown schema type {schema_type}")
Model = TypeVar("Model", bound=BaseModel)
def parse_pydantic_from_guidance_program(
program: "Program", cls: Type[Model], verbose: bool = False
) -> Model:
"""Parse output from guidance program.
This is a temporary solution for parsing a pydantic object out of an executed
guidance program.
NOTE: right now we assume the output is the last markdown formatted json block
NOTE: a better way is to extract via Program.variables, but guidance does not
support extracting nested objects right now.
So we call back to manually parsing the final text after program execution
"""
try:
output = program.text.split("```json")[-1]
output = "```json" + output
if verbose:
print("Raw output:")
print(output)
json_dict = parse_json_markdown(output)
sub_questions = cls.parse_obj(json_dict)
except Exception as e:
raise OutputParserException(
"Failed to parse pydantic object from guidance program"
) from e
return sub_questions
| [] |
2024-01-10 | apecloud/llama_index | llama_index~question_gen~guidance_generator.py | from typing import TYPE_CHECKING, List, Optional, Sequence
from pydantic import BaseModel
from llama_index.indices.query.schema import QueryBundle
from llama_index.program.guidance_program import GuidancePydanticProgram
from llama_index.prompts.guidance_utils import convert_to_handlebars
from llama_index.question_gen.prompts import (
DEFAULT_SUB_QUESTION_PROMPT_TMPL,
build_tools_text,
)
from llama_index.question_gen.types import BaseQuestionGenerator, SubQuestion
from llama_index.tools.types import ToolMetadata
if TYPE_CHECKING:
from guidance.llms import LLM as GuidanceLLM
DEFAULT_GUIDANCE_SUB_QUESTION_PROMPT_TMPL = convert_to_handlebars(
DEFAULT_SUB_QUESTION_PROMPT_TMPL
)
class SubQuestionList(BaseModel):
"""A pydantic object wrapping a list of sub-questions.
This is mostly used to make getting a json schema easier.
"""
items: List[SubQuestion]
class GuidanceQuestionGenerator(BaseQuestionGenerator):
def __init__(
self,
program: GuidancePydanticProgram,
verbose: bool = False,
) -> None:
self._program = program
self._verbose = verbose
@classmethod
def from_defaults(
cls,
prompt_template_str: str = DEFAULT_GUIDANCE_SUB_QUESTION_PROMPT_TMPL,
guidance_llm: Optional["GuidanceLLM"] = None,
verbose: bool = False,
) -> "GuidanceQuestionGenerator":
program = GuidancePydanticProgram[SubQuestionList](
output_cls=SubQuestionList,
guidance_llm=guidance_llm,
prompt_template_str=prompt_template_str,
verbose=verbose,
)
return cls(program, verbose)
def generate(
self, tools: Sequence[ToolMetadata], query: QueryBundle
) -> List[SubQuestion]:
tools_str = build_tools_text(tools)
query_str = query.query_str
question_list = self._program(
tools_str=tools_str,
query_str=query_str,
)
return question_list.items
async def agenerate(
self, tools: Sequence[ToolMetadata], query: QueryBundle
) -> List[SubQuestion]:
# TODO: currently guidance does not support async calls
return self.generate(tools=tools, query=query)
| [] |
2024-01-10 | apecloud/llama_index | llama_index~program~guidance_program.py | from typing import TYPE_CHECKING, Any, Generic, Optional, Type
from llama_index.program.base_program import BasePydanticProgram, Model
from llama_index.prompts.guidance_utils import (
parse_pydantic_from_guidance_program,
pydantic_to_guidance_output_template_markdown,
)
if TYPE_CHECKING:
from guidance.llms import LLM as GuidanceLLM
class GuidancePydanticProgram(BasePydanticProgram, Generic[Model]):
"""
A guidance-based function that returns a pydantic model.
Note: this interface is not yet stable.
"""
def __init__(
self,
output_cls: Type[Model],
prompt_template_str: str,
guidance_llm: Optional["GuidanceLLM"] = None,
verbose: bool = False,
):
try:
from guidance import Program
from guidance.llms import OpenAI
except ImportError as e:
raise ImportError(
"guidance package not found." "please run `pip install guidance`"
) from e
llm = guidance_llm or OpenAI("text-davinci-003")
output_str = pydantic_to_guidance_output_template_markdown(output_cls)
full_str = prompt_template_str + "\n" + output_str
self._guidance_program = Program(full_str, llm=llm, silent=not verbose)
self._output_cls = output_cls
self._verbose = verbose
@property
def output_cls(self) -> Type[Model]:
return self._output_cls
def __call__(
self,
*args: Any,
**kwargs: Any,
) -> Model:
executed_program = self._guidance_program(**kwargs)
pydantic_obj = parse_pydantic_from_guidance_program(
program=executed_program, cls=self._output_cls
)
return pydantic_obj
| [] |
2024-01-10 | apecloud/llama_index | llama_index~prompts~chat_prompts.py | """Prompts for ChatGPT."""
from langchain.prompts.chat import (
AIMessagePromptTemplate,
ChatPromptTemplate,
HumanMessagePromptTemplate,
)
from llama_index.prompts.prompts import RefinePrompt, RefineTableContextPrompt
# Refine Prompt
CHAT_REFINE_PROMPT_TMPL_MSGS = [
HumanMessagePromptTemplate.from_template("{query_str}"),
AIMessagePromptTemplate.from_template("{existing_answer}"),
HumanMessagePromptTemplate.from_template(
"We have the opportunity to refine the above answer "
"(only if needed) with some more context below.\n"
"------------\n"
"{context_msg}\n"
"------------\n"
"Given the new context, refine the original answer to better "
"answer the question. "
"If the context isn't useful, output the original answer again.",
),
]
CHAT_REFINE_PROMPT_LC = ChatPromptTemplate.from_messages(CHAT_REFINE_PROMPT_TMPL_MSGS)
CHAT_REFINE_PROMPT = RefinePrompt.from_langchain_prompt(CHAT_REFINE_PROMPT_LC)
# Table Context Refine Prompt
CHAT_REFINE_TABLE_CONTEXT_TMPL_MSGS = [
HumanMessagePromptTemplate.from_template("{query_str}"),
AIMessagePromptTemplate.from_template("{existing_answer}"),
HumanMessagePromptTemplate.from_template(
"We have provided a table schema below. "
"---------------------\n"
"{schema}\n"
"---------------------\n"
"We have also provided some context information below. "
"{context_msg}\n"
"---------------------\n"
"Given the context information and the table schema, "
"refine the original answer to better "
"answer the question. "
"If the context isn't useful, return the original answer."
),
]
CHAT_REFINE_TABLE_CONTEXT_PROMPT_LC = ChatPromptTemplate.from_messages(
CHAT_REFINE_TABLE_CONTEXT_TMPL_MSGS
)
CHAT_REFINE_TABLE_CONTEXT_PROMPT = RefineTableContextPrompt.from_langchain_prompt(
CHAT_REFINE_TABLE_CONTEXT_PROMPT_LC
)
| [
"Given the new context, refine the original answer to better ",
"{existing_answer}",
"{context_msg}\n",
"We have the opportunity to refine the above answer (only if needed) with some more context below.\n------------\n{context_msg}\n------------\nGiven the new context, refine the original answer to better answer the question. If the context isn't useful, output the original answer again.",
"{query_str}",
"(only if needed) with some more context below.\n",
"answer the question. ",
"If the context isn't useful, output the original answer again.",
"We have the opportunity to refine the above answer ",
"We have provided a table schema below. ---------------------\n{schema}\n---------------------\nWe have also provided some context information below. {context_msg}\n---------------------\nGiven the context information and the table schema, refine the original answer to better answer the question. If the context isn't useful, return the original answer.",
"------------\n"
] |
2024-01-10 | apecloud/llama_index | benchmarks~struct_indices~spider~spider_utils.py | """Utilities for Spider module."""
import json
import os
from typing import Dict, Tuple, Union
from langchain import OpenAI
from langchain.chat_models import ChatOpenAI
from sqlalchemy import create_engine, text
from llama_index import SQLStructStoreIndex, LLMPredictor, SQLDatabase
def load_examples(spider_dir: str) -> Tuple[list, list]:
"""Load examples."""
with open(os.path.join(spider_dir, "train_spider.json"), "r") as f:
train_spider = json.load(f)
with open(os.path.join(spider_dir, "train_others.json"), "r") as f:
train_others = json.load(f)
with open(os.path.join(spider_dir, "dev.json"), "r") as f:
dev = json.load(f)
return train_spider + train_others, dev
def create_indexes(
spider_dir: str, llm: Union[ChatOpenAI, OpenAI]
) -> Dict[str, SQLStructStoreIndex]:
"""Create indexes for all databases."""
# Create all necessary SQL database objects.
databases = {}
for db_name in os.listdir(os.path.join(spider_dir, "database")):
db_path = os.path.join(spider_dir, "database", db_name, db_name + ".sqlite")
if not os.path.exists(db_path):
continue
engine = create_engine("sqlite:///" + db_path)
databases[db_name] = SQLDatabase(engine=engine)
# Test connection.
with engine.connect() as connection:
connection.execute(
text("select name from sqlite_master where type = 'table'")
).fetchone()
llm_predictor = LLMPredictor(llm=llm)
llm_indexes = {}
for db_name, db in databases.items():
llm_indexes[db_name] = SQLStructStoreIndex(
llm_predictor=llm_predictor,
sql_database=db,
)
return llm_indexes
| [] |
2024-01-10 | apecloud/llama_index | llama_index~langchain_helpers~sql_wrapper.py | """SQL wrapper around SQLDatabase in langchain."""
from typing import Any, Dict, List, Tuple, Optional
from langchain.sql_database import SQLDatabase as LangchainSQLDatabase
from sqlalchemy import MetaData, create_engine, insert, text
from sqlalchemy.engine import Engine
class SQLDatabase(LangchainSQLDatabase):
"""SQL Database.
Wrapper around SQLDatabase object from langchain. Offers
some helper utilities for insertion and querying.
See `langchain documentation <https://tinyurl.com/4we5ku8j>`_ for more details:
Args:
*args: Arguments to pass to langchain SQLDatabase.
**kwargs: Keyword arguments to pass to langchain SQLDatabase.
"""
@property
def engine(self) -> Engine:
"""Return SQL Alchemy engine."""
return self._engine
@property
def metadata_obj(self) -> MetaData:
"""Return SQL Alchemy metadata."""
return self._metadata
@classmethod
def from_uri(
cls, database_uri: str, engine_args: Optional[dict] = None, **kwargs: Any
) -> "SQLDatabase":
"""Construct a SQLAlchemy engine from URI."""
_engine_args = engine_args or {}
return cls(create_engine(database_uri, **_engine_args), **kwargs)
def get_table_columns(self, table_name: str) -> List[Any]:
"""Get table columns."""
return self._inspector.get_columns(table_name)
def get_single_table_info(self, table_name: str) -> str:
"""Get table info for a single table."""
# same logic as table_info, but with specific table names
template = (
"Table '{table_name}' has columns: {columns} "
"and foreign keys: {foreign_keys}."
)
columns = []
for column in self._inspector.get_columns(table_name):
columns.append(f"{column['name']} ({str(column['type'])})")
column_str = ", ".join(columns)
foreign_keys = []
for foreign_key in self._inspector.get_foreign_keys(table_name):
foreign_keys.append(
f"{foreign_key['constrained_columns']} -> "
f"{foreign_key['referred_table']}.{foreign_key['referred_columns']}"
)
foreign_key_str = ", ".join(foreign_keys)
table_str = template.format(
table_name=table_name, columns=column_str, foreign_keys=foreign_key_str
)
return table_str
def insert_into_table(self, table_name: str, data: dict) -> None:
"""Insert data into a table."""
table = self._metadata.tables[table_name]
stmt = insert(table).values(**data)
with self._engine.connect() as connection:
connection.execute(stmt)
connection.commit()
def run_sql(self, command: str) -> Tuple[str, Dict]:
"""Execute a SQL statement and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
"""
with self._engine.connect() as connection:
cursor = connection.execute(text(command))
if cursor.returns_rows:
result = cursor.fetchall()
return str(result), {"result": result}
return "", {}
| [
"Table '{table_name}' has columns: {columns} and foreign keys: {foreign_keys}."
] |
2024-01-10 | apecloud/llama_index | apecloud-chat~trans_index.py | from langchain import OpenAI
from gpt_index import GPTSimpleVectorIndex, SimpleDirectoryReader, QuestionAnswerPrompt, LLMPredictor, PromptHelper, ServiceContext, Document
import logging
import sys
import os
import argparse
from typing import Any, Dict, List, Optional, cast
from gpt_index import GPTListIndex, SimpleDirectoryReader
from traverse_code_files import list_go_files_recursive
from gpt_index.langchain_helpers.text_splitter import TokenTextSplitter
from gpt_index import GPTQdrantIndex
from gpt_index.vector_stores.qdrant import QdrantVectorStore
from gpt_index.vector_stores.simple import SimpleVectorStore
from gpt_index.utils import iter_batch
from qdrant_client.http import models as rest
from gpt_index.vector_stores.types import (
NodeEmbeddingResult,
VectorStore,
VectorStoreQueryResult,
VectorStoreQuery,
)
import qdrant_client
from read_key import read_key_from_file
def parse_arguments():
parser = argparse.ArgumentParser(description="Query Engine for KubeBlocks.")
parser.add_argument("key_file", type=str, help="Key file for OpenAI_API_KEY.")
return parser.parse_args()
def collection_exists(client: qdrant_client, collection_name: str) -> bool:
"""Check if a collection exists."""
from grpc import RpcError
from qdrant_client.http.exceptions import UnexpectedResponse
try:
client.get_collection(collection_name)
except (RpcError, UnexpectedResponse, ValueError):
return False
return True
def main():
args = parse_arguments()
key_file = args.key_file
openai_api_key = read_key_from_file(key_file)
# set env for OpenAI api key
os.environ['OPENAI_API_KEY'] = openai_api_key
# set log level
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
client = qdrant_client.QdrantClient(
url="http://localhost",
port=6333,
grpc_port=6334,
prefer_grpc=False,
https=False,
timeout=300
)
collection_name = "kubeblocks_config"
vector_index = GPTSimpleVectorIndex.load_from_disk('config.json')
docs = vector_index.docstore.docs
embedding_dict = vector_index.get_vector_store().get_embedding_dict()
embedding_len = 0
for doc_id, vector in embedding_dict.items():
embedding_len = len(vector)
print(f"embedding_len:{embedding_len}")
break
if len(docs.items()) > 0 and not collection_exists(client, collection_name):
client.create_collection(
collection_name=collection_name,
vectors_config=rest.VectorParams(
size=embedding_len,
distance=rest.Distance.COSINE,
),
)
qdrant_store = QdrantVectorStore(collection_name=collection_name, client=client)
count = 0
new_ids = []
vectors = []
payloads = []
for doc_id, doc in docs.items():
count += 1
if count == 100:
client.upsert(
collection_name=collection_name,
points=rest.Batch.construct(
ids=new_ids,
vectors=vectors,
payloads=payloads,
),
)
count = 0
else:
new_ids.append(doc_id)
embedding = embedding_dict[doc_id]
vectors.append(embedding)
payloads.append(
{
"doc_id": doc.doc_id,
"text": doc.text,
"extra_info": doc.extra_info,
}
)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | apecloud/llama_index | llama_index~evaluation~guideline_eval.py | import logging
from typing import Optional
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
from llama_index.evaluation.base import BaseEvaluator, Evaluation
from llama_index.indices.base import ServiceContext
from llama_index.prompts.base import Prompt
from llama_index.response.schema import Response
logger = logging.getLogger(__name__)
class GuidelineEvaluator(BaseEvaluator):
"""An evaluator which uses guidelines to evaluate a response.
Args:
service_context(ServiceContext): The service context to use for evaluation.
guidelines(str): User-added guidelines to use for evaluation.
eval_template(str): The template to use for evaluation.
"""
def __init__(
self,
service_context: Optional[ServiceContext] = None,
guidelines: Optional[str] = None,
eval_template: Optional[str] = None,
) -> None:
self.service_context = service_context or ServiceContext.from_defaults()
self.guidelines = guidelines or DEFAULT_GUIDELINES
self.eval_template = eval_template or DEFAULT_EVAL_TEMPLATE
def evaluate_response(self, query: str, response: Response) -> Evaluation:
"""Evaluate the response for a query and an Evaluation."""
parser: PydanticOutputParser[EvaluationData] = PydanticOutputParser(
pydantic_object=EvaluationData
)
format_instructions = parser.get_format_instructions()
response_str = response.response
prompt = Prompt(self.eval_template)
logger.debug("prompt: %s", prompt)
logger.debug("query: %s", query)
logger.debug("response: %s", response_str)
logger.debug("guidelines: %s", self.guidelines)
logger.debug("format_instructions: %s", format_instructions)
(eval_response, _) = self.service_context.llm_predictor.predict(
prompt,
query=query,
response=response_str,
guidelines=self.guidelines,
format_instructions=format_instructions,
)
eval_data = parser.parse(eval_response)
return Evaluation(query, response, eval_data.passing, eval_data.feedback)
DEFAULT_GUIDELINES = (
"The response should fully answer the query.\n"
"The response should avoid being vague or ambiguous.\n"
"The response should be specific and use statistics or numbers when possible.\n"
)
DEFAULT_EVAL_TEMPLATE = (
"Here is the original query:\n"
"Query: {query}\n"
"Critique the following response based on the guidelines below:\n"
"Response: {response}\n"
"Guidelines: {guidelines}\n"
"Now please provide constructive criticism in the following format:\n"
"{format_instructions}"
)
class EvaluationData(BaseModel):
passing: bool = Field(description="Whether the response passes the guidelines.")
feedback: str = Field(
description="The feedback for the response based on the guidelines."
)
| [
"Here is the original query:\nQuery: {query}\nCritique the following response based on the guidelines below:\nResponse: {response}\nGuidelines: {guidelines}\nNow please provide constructive criticism in the following format:\n{format_instructions}"
] |
2024-01-10 | apecloud/llama_index | llama_index~tools~types.py | from abc import abstractmethod
from dataclasses import dataclass
from typing import Any, Optional, Type, Dict
from pydantic import BaseModel
from langchain.tools import Tool, StructuredTool
@dataclass
class ToolMetadata:
description: str
name: Optional[str] = None
fn_schema: Optional[Type[BaseModel]] = None
class BaseTool:
@property
@abstractmethod
def metadata(self) -> ToolMetadata:
pass
@abstractmethod
def __call__(self, input: Any) -> None:
pass
def _process_langchain_tool_kwargs(
self,
langchain_tool_kwargs: Any,
) -> Dict[str, Any]:
"""Process langchain tool kwargs."""
if "name" not in langchain_tool_kwargs:
langchain_tool_kwargs["name"] = self.metadata.name or ""
if "description" not in langchain_tool_kwargs:
langchain_tool_kwargs["description"] = self.metadata.description
if "fn_schema" not in langchain_tool_kwargs:
langchain_tool_kwargs["args_schema"] = self.metadata.fn_schema
return langchain_tool_kwargs
def to_langchain_tool(
self,
**langchain_tool_kwargs: Any,
) -> Tool:
"""To langchain tool."""
langchain_tool_kwargs = self._process_langchain_tool_kwargs(
langchain_tool_kwargs
)
return Tool.from_function(
func=self.__call__,
**langchain_tool_kwargs,
)
def to_langchain_structured_tool(
self,
**langchain_tool_kwargs: Any,
) -> StructuredTool:
"""To langchain structured tool."""
langchain_tool_kwargs = self._process_langchain_tool_kwargs(
langchain_tool_kwargs
)
return StructuredTool.from_function(
func=self.__call__,
**langchain_tool_kwargs,
)
| [] |
2024-01-10 | apecloud/llama_index | apecloud-chat~private_gpt~local_llm.py | import sys
sys.path.insert(0, "/Users/slc/code/apecloud-gpt/llama_index/")
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.llms import GPT4All
from llama_index.indices.loading import load_index_from_storage
from llama_index import LLMPredictor
from llama_index.node_parser.simple import SimpleNodeParser
from llama_index.langchain_helpers.text_splitter import TokenTextSplitter
from llama_index import (
GPTVectorStoreIndex,
LangchainEmbedding,
ServiceContext,
StorageContext,
download_loader,
PromptHelper
)
PyMuPDFReader = download_loader("PyMuPDFReader")
documents = PyMuPDFReader().load(file_path='./IPCC_AR6_WGII_Chapter03.pdf', metadata=True)
# ensure document texts are not bytes objects
for doc in documents:
doc.text = doc.text.decode()
local_llm_path = './ggml-gpt4all-j-v1.3-groovy.bin'
llm = GPT4All(model=local_llm_path, backend='gptj', streaming=True, n_ctx=512)
llm_predictor = LLMPredictor(llm=llm)
embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2"))
prompt_helper = PromptHelper(max_input_size=512, num_output=256, max_chunk_overlap=-1000)
service_context = ServiceContext.from_defaults(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
node_parser=SimpleNodeParser(text_splitter=TokenTextSplitter(chunk_size=300, chunk_overlap=20))
)
index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context)
index.storage_context.persist(persist_dir="./storage")
storage_context = StorageContext.from_defaults(persist_dir="./storage")
index = load_index_from_storage(storage_context, service_context=service_context)
query_engine = index.as_query_engine(streaming=True, similarity_top_k=1, service_context=service_context)
response_stream = query_engine.query("What are the main climate risks to our Oceans?")
response_stream.print_response_stream()
| [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.