date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | jayanth151002/athena.ai | server~src~chains.py | import os
import pickle
import textwrap
from dotenv import load_dotenv
from typing import List, Optional, Dict, Any
from PyPDF2 import PdfReader
from langchain import OpenAI
from langchain.prompts import PromptTemplate
from langchain.docstore.document import Document
from langchain.chains.summarize import load_summarize_chain
# import from files
from src.prompt_storage import PromptStorageForChains
class BaseChain:
def __init__(self, project_name : str, artifact_folder : str, source_path : str, openai_key : Optional[str] = None, openai_temp : int = 0) -> None:
"""
Args:
:param project_name : (str) The name of the project, with this name all the artifacts will be generated
:param artifact_folder : (str) The root locations where all the artifacts will be stored
:param openai_key : (str) The open ai key,
:param openai_temp : (int) The temperature of the open ai model, recommeded value: 0
"""
self.project_name = project_name
self.artifact_folder = artifact_folder
self.source_path = source_path
self.openai_key = openai_key
self.openai_temp = openai_temp
self.prompt = PromptStorageForChains()
load_dotenv(dotenv_path='.env/openai.env')
# FIXME: Should also hold for AWS S3 bucket
self.artifact_path = os.path.join(self.artifact_folder, self.project_name)
# Loading the LLM
try:
self.openai_key = os.getenv("OPENAI_API_KEY") if self.openai_key is None else self.openai_key
self.llm = OpenAI(
openai_api_key=self.openai_key, temperature=self.openai_temp
)
except Exception as e:
print(f"Open AI Exception occured at: {e}")
return None
# Creating the artifact folder inside the provided folder location
try:
if os.path.exists(self.artifact_path):
print(f"Artifact folder already exists at {self.artifact_path}")
else:
if not os.path.exists(self.artifact_folder):
os.mkdir(self.artifact_folder)
if not os.path.exists(self.artifact_path):
os.mkdir(self.artifact_path)
print("=> Artifact folder created successfully")
except Exception as e:
print(f"Exception occured at: {e}")
return None
# Loading the document
try:
self._document_loader = PdfReader(self.source_path)
self.pages = self._document_loader.pages
print(self.pages)
self.doc_texts = []
for text in self.pages: self.doc_texts.append(text.extract_text(0))
self.docs = [Document(page_content=t) for t in self.doc_texts]
except Exception as e:
print(f"Document Exception occured at: {e}")
return None
print("=> LLM and target document loaded successfully")
class SummarizeChain(BaseChain):
def __init__(self, project_name : str, artifact_folder : str, source_path : str, openai_key : Optional[str] = None, openai_temp : int = 0) -> None:
super().__init__(project_name, artifact_folder, source_path, openai_key, openai_temp)
def run_story_summary_chain(self) -> Dict[str, Any]:
self.base_prompt_template, self.refined_prompt_template = self.prompt.fetch_summarize_prompt_template()
# FIXME: Right now the prompt templates has hardcoded inputs we need to provide it in args
# for newer versions
self.BASE_PROMPT = PromptTemplate(
template = self.base_prompt_template,
input_variables=["text"]
)
self.REFINED_PROMPT = PromptTemplate(
input_variables=["existing_answer", "text"],
template=self.refined_prompt_template,
)
self.summarizer_chain = load_summarize_chain(
self.llm,
chain_type="refine", # this is by defaiult in this version
return_intermediate_steps=True,
question_prompt=self.BASE_PROMPT,
refine_prompt=self.REFINED_PROMPT, verbose=True)
# Doing a sanity check if the results are aleady present or not
if os.path.exists(os.path.join(self.artifact_path, "story_summary.pkl")):
print("=> Story summary already exists, loading the summary")
with open(os.path.join(self.artifact_path, "story_summary.pkl"), "rb") as f:
return pickle.load(f)
else:
self.output_summary = self.summarizer_chain({
"input_documents": self.docs
}, return_only_outputs=True)
wrapped_text = textwrap.fill(self.output_summary['output_text'],
width=100,break_long_words=False,
replace_whitespace=False)
intermediate_steps = []
for step in self.output_summary['intermediate_steps']:
intermediate_steps.append(step)
response_dict = {
'full_summary' : wrapped_text,
"summary_chunks" : intermediate_steps
}
with open(os.path.join(self.artifact_path, "story_summary.pkl"), "wb") as f:
pickle.dump(response_dict, f)
return response_dict
class QuestionAnswerChain(BaseChain):
def __init__(self, project_name : str, artifact_folder : str, openai_key : Optional[str] = None, openai_temp : int = 0) -> None:
super().__init__(project_name, artifact_folder, openai_key, openai_temp)
def run_question_answer_chain(self) -> Dict[str, Any]:
raise NotImplementedError("Question Answer Chain is not implemented yet")
# GraphViz Chain is not implemented yet
#
class GraphVizChain(BaseChain):
def __init__(self, project_name : str, artifact_folder : str, openai_key : Optional[str] = None, openai_temp : int = 0) -> None:
super().__init__(project_name, artifact_folder, openai_key, openai_temp)
def run_graphviz_chain(self) -> Dict[str, Any]:
raise NotImplementedError("GraphViz Chain is not implemented yet") | [] |
2024-01-10 | jerabaul29/Cylinder2DFlowControlDRLParallel | Cylinder2DFlowControlWithRL~Env2DCylinder.py | #from printind.printind_decorators import printi_all_method_calls as printidc
#from printind.printind_function import printi, printiv
from tensorforce.environments import Environment
import tensorforce
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
from threading import Thread
from tensorforce import TensorforceError
# a bit hacky, but meeehh... FIXME!!
import sys
import os
cwd = os.getcwd()
sys.path.append(cwd + "/../Simulation/")
from dolfin import Expression, File, plot
from probes import PenetratedDragProbeANN, PenetratedLiftProbeANN, PressureProbeANN, VelocityProbeANN, RecirculationAreaProbe
from generate_msh import generate_mesh
from flow_solver import FlowSolver
from msh_convert import convert
from dolfin import *
import numpy as np
import os
import pickle
import time
import math
import csv
import shutil
# TODO: check that right types etc from tensorfoce examples
# typically:
# from tensorforce.contrib.openai_gym import OpenAIGym
# environment = OpenAIGym('MountainCarContinuous-v0', visualize=False)
# printiv(environment.states)
# environment.states = {'shape': (2,), 'type': 'float'}
# printiv(environment.actions)
# environment.actions = {'max_value': 1.0, 'shape': (1,), 'min_value': -1.0, 'type': 'float'}
def constant_profile(mesh, degree):
'''
Time independent inflow profile.
'''
bot = mesh.coordinates().min(axis=0)[1]
top = mesh.coordinates().max(axis=0)[1]
H = top - bot
Um = 1.5
return Expression(('-4*Um*(x[1]-bot)*(x[1]-top)/H/H',
'0'), bot=bot, top=top, H=H, Um=Um, degree=degree, time=0)
class RingBuffer():
"A 1D ring buffer using numpy arrays"
def __init__(self, length):
self.data = np.zeros(length, dtype='f')
self.index = 0
def extend(self, x):
"adds array x to ring buffer"
x_index = (self.index + np.arange(x.size)) % self.data.size
self.data[x_index] = x
self.index = x_index[-1] + 1
def get(self):
"Returns the first-in-first-out data in the ring buffer"
idx = (self.index + np.arange(self.data.size)) % self.data.size
return self.data[idx]
# @printidc()
class Env2DCylinder(Environment):
"""Environment for 2D flow simulation around a cylinder."""
def __init__(self, path_root, geometry_params, flow_params, solver_params, output_params,
optimization_params, inspection_params, n_iter_make_ready=None, verbose=0, size_history=2000,
reward_function='plain_drag', size_time_state=50, number_steps_execution=1, simu_name="Simu"):
"""
"""
# TODO: should actually save the dicts in to double check when loading that using compatible simulations together
#printi("--- call init ---")
self.observation = None
self.thread = None
self.path_root = path_root
self.flow_params = flow_params
self.geometry_params = geometry_params
self.solver_params = solver_params
self.output_params = output_params
self.optimization_params = optimization_params
self.inspection_params = inspection_params
self.verbose = verbose
self.n_iter_make_ready = n_iter_make_ready
self.size_history = size_history
self.reward_function = reward_function
self.size_time_state = size_time_state
self.number_steps_execution = number_steps_execution
self.simu_name = simu_name
#Relatif a l'ecriture des .csv
name="output.csv"
last_row = None
if(os.path.exists("saved_models/"+name)):
with open("saved_models/"+name, 'r') as f:
for row in reversed(list(csv.reader(f, delimiter=";", lineterminator="\n"))):
last_row = row
break
if(not last_row is None):
self.episode_number = int(last_row[0])
self.last_episode_number = int(last_row[0])
else:
self.last_episode_number = 0
self.episode_number = 0
self.episode_drags = np.array([])
self.episode_areas = np.array([])
self.episode_lifts = np.array([])
self.initialized_visualization = False
self.start_class()
#printi("--- done init ---")
def start_class(self):
self.solver_step = 0
self.accumulated_drag = 0
self.accumulated_lift = 0
self.initialized_output = False
self.resetted_number_probes = False
self.area_probe = None
self.history_parameters = {}
for crrt_jet in range(len(self.geometry_params["jet_positions"])):
self.history_parameters["jet_{}".format(crrt_jet)] = RingBuffer(self.size_history)
self.history_parameters["number_of_jets"] = len(self.geometry_params["jet_positions"])
for crrt_probe in range(len(self.output_params["locations"])):
if self.output_params["probe_type"] == 'pressure':
self.history_parameters["probe_{}".format(crrt_probe)] = RingBuffer(self.size_history)
elif self.output_params["probe_type"] == 'velocity':
self.history_parameters["probe_{}_u".format(crrt_probe)] = RingBuffer(self.size_history)
self.history_parameters["probe_{}_v".format(crrt_probe)] = RingBuffer(self.size_history)
self.history_parameters["number_of_probes"] = len(self.output_params["locations"])
self.history_parameters["drag"] = RingBuffer(self.size_history)
self.history_parameters["lift"] = RingBuffer(self.size_history)
self.history_parameters["recirc_area"] = RingBuffer(self.size_history)
# ------------------------------------------------------------------------
# remesh if necessary
h5_file = '.'.join([self.path_root, 'h5'])
msh_file = '.'.join([self.path_root, 'msh'])
self.geometry_params['mesh'] = h5_file
# Regenerate mesh?
if self.geometry_params['remesh']:
if self.verbose > 0:
print("Remesh")
#printi("generate_mesh start...")
generate_mesh(self.geometry_params, template=self.geometry_params['template'])
if self.verbose > 0:
print("generate_mesh done!")
print(msh_file)
assert os.path.exists(msh_file)
convert(msh_file, h5_file)
assert os.path.exists(h5_file)
# ------------------------------------------------------------------------
# if necessary, load initialization fields
if self.n_iter_make_ready is None:
if self.verbose > 0:
print("Load initial flow")
self.flow_params['u_init'] = 'mesh/u_init.xdmf'
self.flow_params['p_init'] = 'mesh/p_init.xdmf'
if self.verbose > 0:
print("Load buffer history")
with open('mesh/dict_history_parameters.pkl', 'rb') as f:
self.history_parameters = pickle.load(f)
if not "number_of_probes" in self.history_parameters:
self.history_parameters["number_of_probes"] = 0
if not "number_of_jets" in self.history_parameters:
self.history_parameters["number_of_jets"] = len(self.geometry_params["jet_positions"])
#printi("Warning!! The number of jets was not set in the loaded hdf5 file")
if not "lift" in self.history_parameters:
self.history_parameters["lift"] = RingBuffer(self.size_history)
#printi("Warning!! No value for the lift founded")
if not "recirc_area" in self.history_parameters:
self.history_parameters["recirc_area"] = RingBuffer(self.size_history)
#printi("Warning!! No value for the recirculation area founded")
# if not the same number of probes, reset
if not self.history_parameters["number_of_probes"] == len(self.output_params["locations"]):
for crrt_probe in range(len(self.output_params["locations"])):
if self.output_params["probe_type"] == 'pressure':
self.history_parameters["probe_{}".format(crrt_probe)] = RingBuffer(self.size_history)
elif self.output_params["probe_type"] == 'velocity':
self.history_parameters["probe_{}_u".format(crrt_probe)] = RingBuffer(self.size_history)
self.history_parameters["probe_{}_v".format(crrt_probe)] = RingBuffer(self.size_history)
self.history_parameters["number_of_probes"] = len(self.output_params["locations"])
#printi("Warning!! Number of probes was changed! Probes buffer content reseted")
self.resetted_number_probes = True
# ------------------------------------------------------------------------
# create the flow simulation object
self.flow = FlowSolver(self.flow_params, self.geometry_params, self.solver_params)
# ------------------------------------------------------------------------
# Setup probes
if self.output_params["probe_type"] == 'pressure':
self.ann_probes = PressureProbeANN(self.flow, self.output_params['locations'])
elif self.output_params["probe_type"] == 'velocity':
self.ann_probes = VelocityProbeANN(self.flow, self.output_params['locations'])
else:
raise RuntimeError("unknown probe type")
# Setup drag measurement
self.drag_probe = PenetratedDragProbeANN(self.flow)
self.lift_probe = PenetratedLiftProbeANN(self.flow)
# ------------------------------------------------------------------------
# No flux from jets for starting
self.Qs = np.zeros(len(self.geometry_params['jet_positions']))
self.action = np.zeros(len(self.geometry_params['jet_positions']))
# ------------------------------------------------------------------------
# prepare the arrays for plotting positions
self.compute_positions_for_plotting()
# ------------------------------------------------------------------------
# if necessary, make converge
if self.n_iter_make_ready is not None:
self.u_, self.p_ = self.flow.evolve(self.Qs)
path=''
if "dump" in self.inspection_params:
path = 'results/area_out.pvd'
self.area_probe = RecirculationAreaProbe(self.u_, 0, store_path=path)
if self.verbose > 0:
print("Compute initial flow")
#printiv(self.n_iter_make_ready)
for _ in range(self.n_iter_make_ready):
self.u_, self.p_ = self.flow.evolve(self.Qs)
self.probes_values = self.ann_probes.sample(self.u_, self.p_).flatten()
self.drag = self.drag_probe.sample(self.u_, self.p_)
self.lift = self.lift_probe.sample(self.u_, self.p_)
self.recirc_area = self.area_probe.sample(self.u_, self.p_)
self.write_history_parameters()
self.visual_inspection()
self.output_data()
self.solver_step += 1
if self.n_iter_make_ready is not None:
encoding = XDMFFile.Encoding.HDF5
mesh = convert(msh_file, h5_file)
comm = mesh.mpi_comm()
# save field data
XDMFFile(comm, 'mesh/u_init.xdmf').write_checkpoint(self.u_, 'u0', 0, encoding)
XDMFFile(comm, 'mesh/p_init.xdmf').write_checkpoint(self.p_, 'p0', 0, encoding)
# save buffer dict
with open('mesh/dict_history_parameters.pkl', 'wb') as f:
pickle.dump(self.history_parameters, f, pickle.HIGHEST_PROTOCOL)
# ----------------------------------------------------------------------
# if reading from disk, show to check everything ok
if self.n_iter_make_ready is None:
#Let's start in a random position of the vortex shading
if self.optimization_params["random_start"]:
rd_advancement = np.random.randint(650)
for j in range(rd_advancement):
self.flow.evolve(self.Qs)
print("Simulated {} iterations before starting the control".format(rd_advancement))
self.u_, self.p_ = self.flow.evolve(self.Qs)
path=''
if "dump" in self.inspection_params:
path = 'results/area_out.pvd'
self.area_probe = RecirculationAreaProbe(self.u_, 0, store_path=path)
self.probes_values = self.ann_probes.sample(self.u_, self.p_).flatten()
self.drag = self.drag_probe.sample(self.u_, self.p_)
self.lift = self.lift_probe.sample(self.u_, self.p_)
self.recirc_area = self.area_probe.sample(self.u_, self.p_)
self.write_history_parameters()
# self.visual_inspection()
# self.output_data()
# self.solver_step += 1
# time.sleep(10)
# ----------------------------------------------------------------------
# if necessary, fill the probes buffer
if self.resetted_number_probes:
#printi("Need to fill again the buffer; modified number of probes")
for _ in range(self.size_history):
self.execute()
# ----------------------------------------------------------------------
# ready now
#Initialisation du prob de recirculation area
#path=''
#if "dump" in self.inspection_params:
# path = 'results/area_out.pvd'
#self.area_probe = RecirculationAreaProbe(self.u_, 0, store_path=path)
self.ready_to_use = True
def write_history_parameters(self):
for crrt_jet in range(len(self.geometry_params["jet_positions"])):
self.history_parameters["jet_{}".format(crrt_jet)].extend(self.Qs[crrt_jet])
if self.output_params["probe_type"] == 'pressure':
for crrt_probe in range(len(self.output_params["locations"])):
self.history_parameters["probe_{}".format(crrt_probe)].extend(self.probes_values[crrt_probe])
elif self.output_params["probe_type"] == 'velocity':
for crrt_probe in range(len(self.output_params["locations"])):
self.history_parameters["probe_{}_u".format(crrt_probe)].extend(self.probes_values[2 * crrt_probe])
self.history_parameters["probe_{}_v".format(crrt_probe)].extend(self.probes_values[2 * crrt_probe + 1])
self.history_parameters["drag"].extend(np.array(self.drag))
self.history_parameters["lift"].extend(np.array(self.lift))
self.history_parameters["recirc_area"].extend(np.array(self.recirc_area))
def compute_positions_for_plotting(self):
# where the pressure probes are
self.list_positions_probes_x = []
self.list_positions_probes_y = []
# total_number_of_probes = len(self.output_params['locations'])
#printiv(total_number_of_probes)
# get the positions
for crrt_probe in self.output_params['locations']:
if self.verbose > 2:
print(crrt_probe)
self.list_positions_probes_x.append(crrt_probe[0])
self.list_positions_probes_y.append(crrt_probe[1])
# where the jets are
radius_cylinder = self.geometry_params['cylinder_size'] / 2.0 / self.geometry_params['clscale']
self.list_positions_jets_x = []
self.list_positions_jets_y = []
# compute the positions
for crrt_jet_angle in self.geometry_params['jet_positions']:
crrt_jet_angle_rad = math.pi / 180.0 * crrt_jet_angle
crrt_x = radius_cylinder * math.cos(crrt_jet_angle_rad)
crrt_y = radius_cylinder * math.sin(crrt_jet_angle_rad)
self.list_positions_jets_x.append(crrt_x)
self.list_positions_jets_y.append(1.1 * crrt_y)
def show_flow(self):
plt.figure()
plot(self.u_)
plt.scatter(self.list_positions_probes_x, self.list_positions_probes_y, c='k', marker='o')
plt.scatter(self.list_positions_jets_x, self.list_positions_jets_y, c='r', marker='o')
plt.xlim([-self.geometry_params['front_distance'], self.geometry_params['length'] - self.geometry_params['front_distance']])
plt.ylim([-self.geometry_params['bottom_distance'], self.geometry_params['width'] - self.geometry_params['bottom_distance']])
plt.ylabel("Y")
plt.xlabel("X")
plt.show()
plt.figure()
p = plot(self.p_)
cb = plt.colorbar(p, fraction=0.1, shrink=0.3)
plt.scatter(self.list_positions_probes_x, self.list_positions_probes_y, c='k', marker='o')
plt.scatter(self.list_positions_jets_x, self.list_positions_jets_y, c='r', marker='o')
plt.xlim([-self.geometry_params['front_distance'], self.geometry_params['length'] - self.geometry_params['front_distance']])
plt.ylim([-self.geometry_params['bottom_distance'], self.geometry_params['width'] - self.geometry_params['bottom_distance']])
plt.ylabel("Y")
plt.xlabel("X")
plt.tight_layout()
cb.set_label("P")
plt.show()
def show_control(self):
plt.figure()
linestyles = ['-', '--', ':', '-.']
for crrt_jet in range(len(self.geometry_params["jet_positions"])):
crrt_jet_data = self.history_parameters["jet_{}".format(crrt_jet)].get()
plt.plot(crrt_jet_data, label="jet {}".format(crrt_jet), linestyle=linestyles[crrt_jet], linewidth=1.5)
plt.legend(loc=2)
plt.ylabel("control Q")
plt.xlabel("actuation step")
plt.tight_layout()
plt.pause(1.0)
plt.savefig("saved_figures/control_episode_{}.pdf".format(self.episode_number))
plt.show()
plt.pause(2.0)
def show_drag(self):
plt.figure()
crrt_drag = self.history_parameters["drag"].get()
plt.plot(crrt_drag, label="episode drag", linewidth=1.2)
plt.plot([0, self.size_history - 1], [self.inspection_params['line_drag'], self.inspection_params['line_drag']], label="mean drag no control", linewidth=2.5, linestyle="--")
plt.ylabel("measured drag D")
plt.xlabel("actuation step")
range_drag_plot = self.inspection_params["range_drag_plot"]
plt.legend(loc=2)
plt.ylim(range_drag_plot)
plt.tight_layout()
plt.pause(1.0)
plt.savefig("saved_figures/drag_episode_{}.pdf".format(self.episode_number))
plt.show()
plt.pause(2.0)
def visual_inspection(self):
total_number_subplots = 5
crrt_subplot = 1
if(not self.initialized_visualization and self.inspection_params["plot"] != False):
plt.ion()
plt.subplots(total_number_subplots, 1)
# ax.set_xlim([0, self.nbr_points_animate_plot])
# ax.set_ylim([0, 1024])
self.initialized_visualization = True
if("plot" in self.inspection_params and self.inspection_params["plot"] != False):
modulo_base = self.inspection_params["plot"]
if self.solver_step % modulo_base == 0:
plt.subplot(total_number_subplots, 1, crrt_subplot)
plot(self.u_)
plt.scatter(self.list_positions_probes_x, self.list_positions_probes_y, c='k', marker='o')
plt.scatter(self.list_positions_jets_x, self.list_positions_jets_y, c='r', marker='o')
plt.xlim([-self.geometry_params['front_distance'], self.geometry_params['length'] - self.geometry_params['front_distance']])
plt.ylim([-self.geometry_params['bottom_distance'], self.geometry_params['width'] - self.geometry_params['bottom_distance']])
plt.ylabel("V")
crrt_subplot += 1
plt.subplot(total_number_subplots, 1, crrt_subplot)
plot(self.p_)
plt.scatter(self.list_positions_probes_x, self.list_positions_probes_y, c='k', marker='o')
plt.scatter(self.list_positions_jets_x, self.list_positions_jets_y, c='r', marker='o')
plt.xlim([-self.geometry_params['front_distance'], self.geometry_params['length'] - self.geometry_params['front_distance']])
plt.ylim([-self.geometry_params['bottom_distance'], self.geometry_params['width'] - self.geometry_params['bottom_distance']])
plt.ylabel("P")
crrt_subplot += 1
plt.subplot(total_number_subplots, 1, crrt_subplot)
plt.cla()
for crrt_jet in range(len(self.geometry_params["jet_positions"])):
crrt_jet_data = self.history_parameters["jet_{}".format(crrt_jet)].get()
plt.plot(crrt_jet_data, label="jet {}".format(crrt_jet))
plt.legend(loc=6)
plt.ylabel("M.F.R.")
crrt_subplot += 1
# plt.subplot(total_number_subplots, 1, crrt_subplot)
# plt.cla()
# for crrt_probe in range(len(self.output_params["locations"])):
# if self.output_params["probe_type"] == 'pressure':
# crrt_probe_data = self.history_parameters["probe_{}".format(crrt_probe)].get()
# plt.plot(crrt_probe_data, label="probe {}".format(crrt_probe))
# elif self.output_params["probe_type"] == 'velocity':
# crrt_probe_data = self.history_parameters["probe_{}_u".format(crrt_probe)].get()
# plt.plot(crrt_probe_data, label="probe {}".format(crrt_probe))
# crrt_probe_data = self.history_parameters["probe_{}_v".format(crrt_probe)].get()
# plt.plot(crrt_probe_data, label="probe {}".format(crrt_probe))
# # plt.legend(loc=6)
# if self.output_params["probe_type"] == "pressure":
# plt.ylabel("pressure")
# elif self.output_params["probe_type"] == "velocity":
# plt.ylabel("velocity")
# if "range_pressure_plot" in self.inspection_params:
# range_pressure_plot = self.inspection_params["range_pressure_plot"]
# plt.ylim(range_pressure_plot)
# crrt_subplot += 1
plt.subplot(total_number_subplots, 1, crrt_subplot)
ax1 = plt.gca()
plt.cla()
crrt_drag = self.history_parameters["drag"].get()
ax1.plot(crrt_drag, color='r', linestyle='-')
if 'line_drag' in self.inspection_params:
ax1.plot([0, self.size_history - 1],
[self.inspection_params['line_drag'], self.inspection_params['line_drag']],
color='r',
linestyle='--')
ax1.set_ylabel("drag")
if "range_drag_plot" in self.inspection_params:
range_drag_plot = self.inspection_params["range_drag_plot"]
ax1.set_ylim(range_drag_plot)
ax2 = ax1.twinx()
crrt_lift = self.history_parameters["lift"].get()
ax2.plot(crrt_lift, color='b', linestyle='-', label="lift")
if 'line_lift' in self.inspection_params:
ax2.plot([0, self.size_history - 1],
[self.inspection_params['line_lift'], self.inspection_params['line_lift']],
color='b',
linestyle='--')
ax2.set_ylabel("lift")
if "range_lift_plot" in self.inspection_params:
range_lift_plot = self.inspection_params["range_lift_plot"]
ax2.set_ylim(range_lift_plot)
plt.xlabel("buffer steps")
crrt_subplot += 1
plt.subplot(total_number_subplots, 1, crrt_subplot)
plt.cla()
crrt_area = self.history_parameters["recirc_area"].get()
plt.plot(crrt_area)
plt.ylabel("RecArea")
plt.xlabel("buffer steps")
#if "range_drag_plot" in self.inspection_params:
# range_drag_plot = self.inspection_params["range_drag_plot"]
plt.ylim([0, 0.03])
crrt_subplot += 1
# plt.tight_layout()
plt.tight_layout(pad=0, w_pad=0, h_pad=-0.5)
plt.draw()
plt.pause(0.5)
if self.solver_step % self.inspection_params["dump"] == 0 and self.inspection_params["dump"] < 10000:
#Affichage en ligne de commande
print("%s | Ep N: %4d, step: %4d, Rec Area: %.4f, drag: %.4f, lift: %.4f"%(self.simu_name,
self.episode_number,
self.solver_step,
self.history_parameters["recirc_area"].get()[-1],
self.history_parameters["drag"].get()[-1],
self.history_parameters["lift"].get()[-1]))
#Sauvegarde dans un fichier debug de tout ce qui se passe !
name = "debug.csv"
if(not os.path.exists("saved_models")):
os.mkdir("saved_models")
if(not os.path.exists("saved_models/"+name)):
with open("saved_models/"+name, "w") as csv_file:
spam_writer=csv.writer(csv_file, delimiter=";", lineterminator="\n")
spam_writer.writerow(["Name", "Episode", "Step", "RecircArea", "Drag", "lift"])
spam_writer.writerow([self.simu_name,
self.episode_number,
self.solver_step,
self.history_parameters["recirc_area"].get()[-1],
self.history_parameters["drag"].get()[-1],
self.history_parameters["lift"].get()[-1]])
else:
with open("saved_models/"+name, "a") as csv_file:
spam_writer=csv.writer(csv_file, delimiter=";", lineterminator="\n")
spam_writer.writerow([self.simu_name,
self.episode_number,
self.solver_step,
self.history_parameters["recirc_area"].get()[-1],
self.history_parameters["drag"].get()[-1],
self.history_parameters["lift"].get()[-1]])
if("single_run" in self.inspection_params and self.inspection_params["single_run"] == True):
# if ("dump" in self.inspection_params and self.inspection_params["dump"] > 10000):
self.sing_run_output()
def sing_run_output(self):
name = "test_strategy.csv"
if(not os.path.exists("saved_models")):
os.mkdir("saved_models")
if(not os.path.exists("saved_models/"+name)):
with open("saved_models/"+name, "w") as csv_file:
spam_writer=csv.writer(csv_file, delimiter=";", lineterminator="\n")
spam_writer.writerow(["Name", "Step", "Drag", "Lift", "RecircArea"] + ["Jet" + str(v) for v in range(len(self.Qs))])
spam_writer.writerow([self.simu_name, self.solver_step, self.history_parameters["drag"].get()[-1], self.history_parameters["lift"].get()[-1], self.history_parameters["recirc_area"].get()[-1]] + [str(v) for v in self.Qs.tolist()])
else:
with open("saved_models/"+name, "a") as csv_file:
spam_writer=csv.writer(csv_file, delimiter=";", lineterminator="\n")
spam_writer.writerow([self.simu_name, self.solver_step, self.history_parameters["drag"].get()[-1], self.history_parameters["lift"].get()[-1], self.history_parameters["recirc_area"].get()[-1]] + [str(v) for v in self.Qs.tolist()])
return
def output_data(self):
# if "step" in self.inspection_params:
# modulo_base = self.inspection_params["step"]
# if self.solver_step % modulo_base == 0:
# if self.verbose > 0:
# print(self.solver_step)
# print(self.Qs)
# print(self.probes_values)
# print(self.drag)
# print(self.lift)
# print(self.recirc_area)
# pass
if "dump" in self.inspection_params and self.inspection_params["dump"] < 10000:
modulo_base = self.inspection_params["dump"]
#Sauvegarde du drag dans le csv a la fin de chaque episode
self.episode_drags = np.append(self.episode_drags, [self.history_parameters["drag"].get()[-1]])
self.episode_areas = np.append(self.episode_areas, [self.history_parameters["recirc_area"].get()[-1]])
self.episode_lifts = np.append(self.episode_lifts, [self.history_parameters["lift"].get()[-1]])
if(self.last_episode_number != self.episode_number and "single_run" in self.inspection_params and self.inspection_params["single_run"] == False):
self.last_episode_number = self.episode_number
avg_drag = np.average(self.episode_drags[len(self.episode_drags)//2:])
avg_area = np.average(self.episode_areas[len(self.episode_areas)//2:])
avg_lift = np.average(self.episode_lifts[len(self.episode_lifts)//2:])
name = "output.csv"
if(not os.path.exists("saved_models")):
os.mkdir("saved_models")
if(not os.path.exists("saved_models/"+name)):
with open("saved_models/"+name, "w") as csv_file:
spam_writer=csv.writer(csv_file, delimiter=";", lineterminator="\n")
spam_writer.writerow(["Episode", "AvgDrag", "AvgLift", "AvgRecircArea"])
spam_writer.writerow([self.last_episode_number, avg_drag, avg_lift, avg_area])
else:
with open("saved_models/"+name, "a") as csv_file:
spam_writer=csv.writer(csv_file, delimiter=";", lineterminator="\n")
spam_writer.writerow([self.last_episode_number, avg_drag, avg_lift, avg_area])
self.episode_drags = np.array([])
self.episode_areas = np.array([])
self.episode_lifts = np.array([])
if(os.path.exists("saved_models/output.csv")):
if(not os.path.exists("best_model")):
shutil.copytree("saved_models", "best_model")
else :
with open("saved_models/output.csv", 'r') as csvfile:
data = csv.reader(csvfile, delimiter = ';')
for row in data:
lastrow = row
last_iter = lastrow[1]
with open("best_model/output.csv", 'r') as csvfile:
data = csv.reader(csvfile, delimiter = ';')
for row in data:
lastrow = row
best_iter = lastrow[1]
if float(best_iter) < float(last_iter):
print("best_model updated")
if(os.path.exists("best_model")):
shutil.rmtree("best_model")
shutil.copytree("saved_models", "best_model")
# if self.solver_step % modulo_base == 0:
# if not self.initialized_output:
# self.u_out = File('results/u_out.pvd')
# self.p_out = File('results/p_out.pvd')
# self.initialized_output = True
# if(not self.area_probe is None):
# self.area_probe.dump(self.area_probe)
# self.u_out << self.flow.u_
# self.p_out << self.flow.p_
def __str__(self):
# printi("Env2DCylinder ---")
print('')
def close(self):
self.ready_to_use = False
def reset(self):
if self.solver_step > 0:
mean_accumulated_drag = self.accumulated_drag / self.solver_step
mean_accumulated_lift = self.accumulated_lift / self.solver_step
if self.verbose > -1:
print("mean accumulated drag on the whole episode: {}".format(mean_accumulated_drag))
if self.inspection_params["show_all_at_reset"]:
self.show_drag()
self.show_control()
self.start_class()
next_state = np.transpose(np.array(self.probes_values))
if self.verbose > 0:
print(next_state)
self.episode_number += 1
return(next_state)
def execute(self, actions=None):
action = actions
if self.verbose > 1:
print("--- call execute ---")
if action is None:
if self.verbose > -1:
print("carefull, no action given; by default, no jet!")
nbr_jets = len(self.geometry_params["jet_positions"])
action = np.zeros((nbr_jets, ))
if self.verbose > 2:
print(action)
self.previous_action = self.action
self.action = action
# to execute several numerical integration steps
for crrt_action_nbr in range(self.number_steps_execution):
# try to force a continuous / smoothe(r) control
if "smooth_control" in self.optimization_params:
# printiv(self.optimization_params["smooth_control"])
# printiv(actions)
# printiv(self.Qs)
# self.Qs += self.optimization_params["smooth_control"] * (np.array(action) - self.Qs) # the solution originally used in the JFM paper
self.Qs = np.array(self.previous_action) + (np.array(self.action) - np.array(self.previous_action)) / self.number_steps_execution * (crrt_action_nbr + 1) # a linear change in the control
else:
self.Qs = np.transpose(np.array(action))
# impose a zero net Qs
if "zero_net_Qs" in self.optimization_params:
if self.optimization_params["zero_net_Qs"]:
self.Qs = self.Qs - np.mean(self.Qs)
# evolve one numerical timestep forward
self.u_, self.p_ = self.flow.evolve(self.Qs)
# displaying information that has to do with the solver itself
self.visual_inspection()
self.output_data()
# we have done one solver step
self.solver_step += 1
# sample probes and drag
self.probes_values = self.ann_probes.sample(self.u_, self.p_).flatten()
self.drag = self.drag_probe.sample(self.u_, self.p_)
self.lift = self.lift_probe.sample(self.u_, self.p_)
self.recirc_area = self.area_probe.sample(self.u_, self.p_)
# write to the history buffers
self.write_history_parameters()
self.accumulated_drag += self.drag
self.accumulated_lift += self.lift
# TODO: the next_state may incorporte more information: maybe some time information?
next_state = np.transpose(np.array(self.probes_values))
if self.verbose > 2:
print(next_state)
terminal = False
if self.verbose > 2:
print(terminal)
reward = self.compute_reward()
if self.verbose > 2:
print(reward)
if self.verbose > 1:
print("--- done execute ---")
return(next_state, terminal, reward)
# return area
def compute_reward(self):
# NOTE: reward should be computed over the whole number of iterations in each execute loop
if self.reward_function == 'plain_drag': # a bit dangerous, may be injecting some momentum
values_drag_in_last_execute = self.history_parameters["drag"].get()[-self.number_steps_execution:]
return(np.mean(values_drag_in_last_execute) + 0.159) # TODO: the 0.159 value is a proxy value corresponding to the mean drag when no control; may depend on the geometry
elif(self.reward_function == 'recirculation_area'):
return - self.area_probe.sample(self.u_, self.p_)
elif(self.reward_function == 'max_recirculation_area'):
return self.area_probe.sample(self.u_, self.p_)
elif self.reward_function == 'drag': # a bit dangerous, may be injecting some momentum
return self.history_parameters["drag"].get()[-1] + 0.159
elif self.reward_function == 'drag_plain_lift': # a bit dangerous, may be injecting some momentum
avg_length = min(500, self.number_steps_execution)
avg_drag = np.mean(self.history_parameters["drag"].get()[-avg_length:])
avg_lift = np.mean(self.history_parameters["lift"].get()[-avg_length:])
return avg_drag + 0.159 - 0.2 * abs(avg_lift)
elif self.reward_function == 'max_plain_drag': # a bit dangerous, may be injecting some momentum
values_drag_in_last_execute = self.history_parameters["drag"].get()[-self.number_steps_execution:]
return - (np.mean(values_drag_in_last_execute) + 0.159)
elif self.reward_function == 'drag_avg_abs_lift': # a bit dangerous, may be injecting some momentum
avg_length = min(500, self.number_steps_execution)
avg_abs_lift = np.mean(np.absolute(self.history_parameters["lift"].get()[-avg_length:]))
avg_drag = np.mean(self.history_parameters["drag"].get()[-avg_length:])
return avg_drag + 0.159 - 0.2 * avg_abs_lift
# TODO: implement some reward functions that take into account how much energy / momentum we inject into the flow
else:
raise RuntimeError("reward function {} not yet implemented".format(self.reward_function))
def states(self):
if self.output_params["probe_type"] == 'pressure':
return dict(type='float',
shape=(len(self.output_params["locations"]) * self.optimization_params["num_steps_in_pressure_history"], )
)
elif self.output_params["probe_type"] == 'velocity':
return dict(type='float',
shape=(2 * len(self.output_params["locations"]) * self.optimization_params["num_steps_in_pressure_history"], )
)
def actions(self):
# NOTE: we could also have several levels of dict in dict, for example:
# return { str(i): dict(continuous=True, min_value=0, max_value=1) for i in range(self.n + 1) }
return dict(type='float',
shape=(len(self.geometry_params["jet_positions"]), ),
min_value=self.optimization_params["min_value_jet_MFR"],
max_value=self.optimization_params["max_value_jet_MFR"])
def max_episode_timesteps(self):
return None
| [] |
2024-01-10 | epfl-dlab/forc | src~models~meta_models~meta_model.py | import os
import hydra
import torch
from torchmetrics import Accuracy, Precision, Recall, F1Score
import transformers
import pandas as pd
from pytorch_lightning import LightningModule
from pytorch_lightning.utilities import rank_zero_only
from typing import List, Any, Dict, Optional
from transformers.trainer_pt_utils import get_parameter_names
from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
import src.utils as utils
from transformers import AutoTokenizer, AutoConfig, AutoModelForSequenceClassification
from src.utils import general_helpers
from src.models.collators import MetaCollator
from src.utils.evaluation_utils import EvaluationUtils
import numpy as np
import torch.nn.functional as F
from src.models.meta_models import EstimationModel
from src.models.meta_models import OpenAIModelInfo
log = utils.get_pylogger(__name__)
class MetaModelForSequenceClassification(LightningModule, EstimationModel):
def __init__(
self,
pretrained_model_name_or_path: str,
models_info: List[OpenAIModelInfo] = None,
tokenizer_name_or_path: str = None,
config: Dict[str, Any] = None,
num_labels: int = None,
metrics_parameters: Dict[str, Any] = None,
default_collator_parameters: Dict[str, Any] = None,
hparams_overrides=None,
**kwargs,
):
super().__init__()
EstimationModel.__init__(self, models_info)
self.save_hyperparameters(
logger=False,
ignore=[
"collator",
"hparams_overrides",
"datamodule",
],
)
if hparams_overrides is not None:
self._override_checkpoint_hparams(hparams_overrides)
if self.hparams.tokenizer_name_or_path is None:
self.hparams.tokenizer_name_or_path = self.hparams.pretrained_model_name_or_path
if self.hparams.num_labels is None:
self.hparams.num_labels = 1
self.tokenizer = AutoTokenizer.from_pretrained(self.hparams.tokenizer_name_or_path)
hf_config = None
if self.hparams.get("hf_config", None):
hf_config = AutoConfig.from_pretrained(self.hparams.pretrained_model_name_or_path)
log.info("HF model config:")
log.info(hf_config)
if hf_config is not None:
self.model = AutoModelForSequenceClassification.from_pretrained(self.hparams.pretrained_model_name_or_path, config=hf_config)
else:
self.model = AutoModelForSequenceClassification.from_pretrained(
self.hparams.pretrained_model_name_or_path, ignore_mismatched_sizes=True, num_labels=self.hparams.num_labels
)
self.collator = kwargs.get("collator", None)
if self.collator is None:
if self.hparams.default_collator_parameters is None:
self.hparams.default_collator_parameters = {'max_length': 512, 'padding': "longest", 'truncation': True, 'num_outputs': None}
self.collator = self._get_default_collator()
else:
self.collator.set_tokenizer(self.tokenizer)
if self.hparams.metrics_parameters is None:
self.hparams.metrics_parameters = {'task': 'binary', 'average': 'macro', 'threshold': 0.5}
metrics_params = self.hparams.metrics_parameters
self.acc = Accuracy(task=metrics_params['task'], average=metrics_params['average'], threshold=metrics_params['threshold'])
self.prec = Precision(task=metrics_params['task'], average=metrics_params['average'], threshold=metrics_params['threshold'])
self.rec = Recall(task=metrics_params['task'], average=metrics_params['average'], threshold=metrics_params['threshold'])
self.f1 = F1Score(task=metrics_params['task'], average=metrics_params['average'], threshold=metrics_params['threshold'])
self.output_dir = None
def _override_checkpoint_hparams(self, hparams_overrides: dict):
"""
Overrides the hyperparameters of a checkpoint at an arbitrary depth
:param hparams_overrides:
:return:
"""
general_helpers.rec_dict_update(self.hparams, hparams_overrides)
log.info("Some values of the original hparams were overridden")
log.info("Hyper-parameters:")
log.info(self.hparams)
def _get_default_collator(self):
return MetaCollator(tokenizer=self.tokenizer, **self.hparams.default_collator_parameters)
def forward(self, input_ids, attention_mask, labels=None, **kwargs):
output = self.model(
input_ids,
attention_mask=attention_mask,
labels=None,
**kwargs
)
return output
def training_step(self, batch, batch_idx):
loss = self._compute_loss(batch)
self.log("train loss", loss, on_step=True, on_epoch=True, prog_bar=True)
return {"loss": loss}
def validation_step(self, batch, batch_idx):
loss = self._compute_loss(batch)
self.log("val loss", loss, on_step=False, on_epoch=True, prog_bar=True)
return {"val_loss": loss}
def _compute_loss(self, batch):
model_output = self(
input_ids=batch["input_ids"],
attention_mask=batch["attention_mask"],
labels=batch["labels"],
)
logits = model_output.logits
if self.model.config.num_labels == 1:
criterion = torch.nn.BCEWithLogitsLoss()
else:
criterion = torch.nn.CrossEntropyLoss()
loss = criterion(logits, batch['labels'])
return loss
def _get_predictions_for_batch(self, batch):
hf_inference_params = self.hparams.inference["hf_inference_params"].copy()
hf_inference_params.update(
{
"input_is_processed_batch": True,
}
)
sample_output = self.sample(
batch,
**hf_inference_params,
)
return sample_output
def test_step(self, batch, batch_idx): # todo writing other things to the output file
raw_input = [sample["queries"] for sample in batch["raw"]]
raw_target = [sample["labels"] for sample in batch["raw"]]
raw_id = [sample["ids"] for sample in batch["raw"]]
if "datasets" in batch["raw"][0]:
raw_datasets = [sample["datasets"] for sample in batch["raw"]]
else:
raw_datasets = None
if "models" in batch["raw"][0]:
raw_models = [sample["models"] for sample in batch["raw"]]
else:
raw_models = None
if "completions" in batch["raw"][0]:
raw_completions = [sample["completions"] for sample in batch["raw"]]
else:
raw_completions = None
sample_output = self._get_predictions_for_batch(batch)
self._write_step_output(raw_input=raw_input, raw_target=raw_target, raw_id=raw_id, sample_output=sample_output, raw_datasets=raw_datasets, raw_models=raw_models, raw_completions=raw_completions)
return_object = {
"inputs": raw_input,
"targets": raw_target,
"predictions": sample_output
}
return return_object
def on_test_batch_end(self, outputs, batch: Any = None, batch_idx: int = None, dataloader_idx: int = 0):
targets = torch.tensor(outputs["targets"])
if self.model.config.num_labels == 1:
predictions = F.sigmoid(outputs["predictions"].logits.squeeze().cpu())
else:
predictions = F.softmax(outputs["predictions"].logits.squeeze().cpu(), dim=1)
acc = self.acc(predictions, targets)
p = self.prec(predictions, targets)
r = self.rec(predictions, targets)
f1 = self.f1(predictions, targets)
self.log("test/accuracy", acc, on_step=True, on_epoch=False, prog_bar=True)
self.log("test/precision", p, on_step=True, on_epoch=False, prog_bar=True)
self.log("test/recall", r, on_step=True, on_epoch=False, prog_bar=True)
self.log("test/f1", f1, on_step=True, on_epoch=False, prog_bar=True)
def _write_step_output(
self,
raw_input,
raw_target,
raw_id,
sample_output,
raw_datasets,
raw_models,
raw_completions,
):
if self.model.config.num_labels == 1:
inference_output = F.sigmoid(sample_output["logits"].squeeze().cpu().detach()).numpy().astype(np.float64)
else:
inference_output = F.softmax(sample_output["logits"].squeeze().cpu().detach(), dim=1).numpy().astype(np.float64)
prediction_outputs = {
"input": raw_input,
"target": raw_target,
"id": raw_id,
"inference": inference_output
}
if raw_datasets is not None:
prediction_outputs["dataset"] = raw_datasets
if raw_models is not None:
prediction_outputs["model"] = raw_models
if raw_completions is not None:
prediction_outputs["completion"] = raw_completions
prediction_outputs_path = os.path.join(
EvaluationUtils.get_predictions_dir_path(self.output_dir),
f"testing_output_{self.global_rank}.prediction.jsonl.gz",
)
prediction_outputs_summary = general_helpers.get_list_of_dicts(prediction_outputs)
general_helpers.write_gzipped_jsonlines(prediction_outputs_path, prediction_outputs_summary, mode="a+")
def on_test_epoch_end(self):
acc = self.acc.compute()
prec = self.prec.compute()
rec = self.rec.compute()
f1 = self.f1.compute()
self.log("test/accuracy", acc)
self.log("test/precision", prec)
self.log("test/recall", rec)
self.log("test/f1", f1)
if hasattr(torch.distributed, "is_initialized") and torch.distributed.is_initialized():
torch.distributed.barrier()
general_helpers._move_predictions_for_subprocesses(
EvaluationUtils.get_predictions_dir_path(os.getcwd()),
EvaluationUtils.get_predictions_dir_path(self.output_dir),
)
EvaluationUtils.upload_outputs_to_wandb(
getattr(self, "hparams_to_log", {}),
EvaluationUtils.get_predictions_dir_path(self.output_dir),
logger=self.logger,
)
return {
"test/acc": acc,
"test/precision": prec,
"test/recall": rec,
"test/f1": f1
}
@torch.no_grad()
def sample(
self,
input_data,
input_is_processed_batch=False,
seed=None,
**kwargs,
):
training = self.training
if training:
self.eval()
if seed is None:
seed = self.hparams.inference.get("seed", None)
if seed:
transformers.trainer_utils.set_seed(seed)
if input_is_processed_batch:
input_ids = input_data["input_ids"].to(self.device)
attention_mask = input_data["attention_mask"].to(self.device)
else:
tokenizer_output = self.tokenize(input_data)
input_ids = tokenizer_output["input_ids"]
attention_mask = tokenizer_output["attention_mask"]
inference_kwargs = {
"input_ids": input_ids.to(self.device),
"attention_mask": attention_mask.to(self.device),
}
inference_outputs = self.model(**inference_kwargs)
if training:
self.train()
return inference_outputs
def configure_optimizers(self):
decay_parameters = get_parameter_names(self.model, ALL_LAYERNORM_LAYERS)
decay_parameters = [name for name in decay_parameters if "bias" not in name]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.model.named_parameters() if n in decay_parameters],
"weight_decay": self.hparams.optimizer.weight_decay,
"betas": (0.9, 0.999),
"eps": self.hparams.optimizer.eps,
},
{
"params": [p for n, p in self.model.named_parameters() if n not in decay_parameters],
"weight_decay": 0.0,
"betas": (0.9, 0.999),
"eps": self.hparams.optimizer.eps,
},
]
optimizer = torch.optim.AdamW(
optimizer_grouped_parameters,
lr=self.hparams.optimizer.lr,
weight_decay=self.hparams.optimizer.weight_decay,
)
if self.hparams.scheduler.name == "linear":
scheduler = transformers.get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=self.hparams.scheduler.warmup_updates,
num_training_steps=self.hparams.scheduler.total_num_updates,
)
elif self.hparams.scheduler.name == "polynomial":
scheduler = transformers.get_polynomial_decay_schedule_with_warmup(
optimizer,
num_warmup_steps=self.hparams.scheduler.warmup_updates,
num_training_steps=self.hparams.scheduler.total_num_updates,
lr_end=self.hparams.scheduler.lr_end,
)
elif self.hparams.scheduler.name is not None:
raise ValueError("Unknown scheduler name {}".format(self.hparams.scheduler.name))
lr_dict = {
"scheduler": scheduler, # scheduler instance
"interval": "step", # The unit of the scheduler's step size. 'step' or 'epoch
"frequency": 1, # corresponds to updating the learning rate after every `frequency` epoch/step
# used by the LearningRateMonitor callback
"name": f"LearningRateScheduler-{self.hparams.scheduler.name}",
}
return [optimizer], [lr_dict]
def _prepare_inputs(self, batch: List[str], model: str):
batch = ["<" + model + ">" + sample for sample in batch]
processed_batch = {}
tokenizer_output = self.tokenizer(
batch,
return_tensors="pt",
return_attention_mask=True,
padding=self.collator.params["padding"],
max_length=self.collator.params["max_length"],
truncation=self.collator.params["truncation"],
)
for k, v in tokenizer_output.items():
processed_batch[k] = v
return processed_batch
def test_batch(self, batch: List[str]):
output = {}
models = [model.model_name for model in self.models_info]
model_prefixes = [model.model_prefix for model in self.models_info]
for model, model_prefix in zip(models, model_prefixes):
inputs = self._prepare_inputs(batch, model_prefix)
outputs = self.model(**inputs)
if self.model.config.num_labels == 1:
output[model] = F.sigmoid(outputs.logits.squeeze().detach()).tolist()
else:
output[model] = F.softmax(outputs.logits.squeeze().detach(), dim=1).tolist()
if isinstance(list(output.values())[0], list):
result = [dict(zip(output.keys(), values)) for values in zip(*output.values())]
else:
result = [output]
return result
| [] |
2024-01-10 | epfl-dlab/forc | src~models~openai_models~adaptive_openai.py | import os
from abc import ABC
from typing import List, Any, Dict
import numpy as np
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate, FewShotPromptTemplate
from src.models.meta_models import MetaModelForSequenceClassification, OpenAIModelInfo
from src.model_strategies import ProbabilityBasedStrategy
import json
from src.model_strategies.abstract import ModelStrategy
class AdaptiveOpenAI(ABC):
strategy: ModelStrategy
generation_parameters: Dict[str, Dict]
def __init__(
self,
strategy: ModelStrategy,
generation_parameters: Dict[str, Dict],
api_key: str = None
):
self.strategy = strategy
self.generation_parameters = generation_parameters
self.api_key = api_key
def _format_with_demonstrations(self, batch: List[str], fs_prompt: FewShotPromptTemplate):
input_key = fs_prompt.input_variables[0]
return [fs_prompt.format(**{input_key: input_data}) for input_data in batch]
@staticmethod
def __call(
batch: List,
model_name: str,
api_key: str,
generation_parameters: Dict[str, Any]
):
backend = OpenAI(
model_name=model_name,
openai_api_key=api_key,
verbose=True,
**generation_parameters,
)
return backend.generate(batch)
def __call__(
self,
batch: List[str],
api_key: str = None,
few_shot_prompts: Dict[str, FewShotPromptTemplate] = None,
generation_parameters: Dict[str, Dict] = None,
return_decisions: bool = False
):
model_assignment, assigned_cost = self.strategy(batch) # TODO calculate cost based on the whole prompt
if generation_parameters is None:
generation_parameters = self.generation_parameters
if api_key is None:
api_key = self.api_key
answers = []
unique_models = np.unique(model_assignment)
for unique_model in unique_models:
curr_batch = [data for model, data in zip(model_assignment, batch) if model == unique_model]
if few_shot_prompts:
fs_prompt = few_shot_prompts[unique_model]
curr_batch = self._format_with_demonstrations(curr_batch, fs_prompt)
answers.extend(
self.__call(
batch=curr_batch,
model_name=unique_model,
api_key=api_key,
generation_parameters=generation_parameters[unique_model]
)
)
if return_decisions:
return {"answers": answers, "selected_models": model_assignment, "estimated_costs": assigned_cost}
else:
return answers
| [] |
2024-01-10 | gowtham07/HotelRecommender | hotels.py | import requests
import os
import json
from time import time
import umap
import torch
import cohere
import warnings
import numpy as np
import pandas as pd
import altair as alt
import matplotlib.pyplot as plt
import torch.nn.functional as F
from typing import List, Union, Dict, Any
##Important functions
##get embeddings from cohere
model_name = 'multilingual-22-12' #@param ["multilingual-22-12", "small", "large"]
def get_embeddings(co: cohere.Client, model_name: str, texts: List[str], truncate: str = "RIGHT"):
output = co.embed(model=model_name, texts=texts, truncate=truncate)
return output.embeddings
## similarity function
torchfy = lambda x: torch.as_tensor(x, dtype=torch.float32)
def get_similarity(target: List[float], candidates: List[float], top_k: int):
candidates = torchfy(candidates).transpose(0, 1) # shape (768, bs)
target = torchfy(target) # shape (1, 768)
dot_scores = torch.mm(target, candidates)
scores, indices = torch.topk(dot_scores, k=top_k)
similarity_hits = [{'id': idx, 'score': score} for idx, score in zip(indices[0].tolist(), scores[0].tolist())]
return similarity_hits
url = "https://hotels4.p.rapidapi.com/locations/v3/search"
API_KEY = '5fa5658111mshd1008bbe356bc06p1ac1f6jsn9e45212f9333'
HEADERS = {
'X-RapidAPI-Key': API_KEY,
'X-RapidAPI-Host': 'hotels4.p.rapidapi.com',
}
COHERE_API_KEY = 'd8eTHtyzVN2e6LKLpy8E8xZkyFfmSwZWIayDhKIt' #@param {type:"raw"}
co = cohere.Client(COHERE_API_KEY)
##Read the dataframe to display the city and location of hotel
df = pd.read_pickle("dummy.pkl")
df['reviews.text'] = df['reviews.text'] + " Hotel is in "+ df['city'] +' has postalcode of ' + df['postalCode']
#load the embeddings
embeddings = torch.load('embeddings_kaggle.pt')
embeddings = embeddings.tolist()
# def search(query: str):
# params = {'q': f'{query} hotels', 'locale': 'en_US'}
# response = requests.request(
# 'GET', url, headers=HEADERS, params=params)
# data = response.json()
# result = []
# for entity in data.get('sr', []):
# if entity['type'] == 'HOTEL':
# result.append(entity['regionNames']['displayName'])
# return result
def search(query: str):
sims = []
top_k: int = 5 #@param {type:"slider", min:1, max:100, step:5}
embeddings3 = embeddings.copy()
query_embeddings = get_embeddings(co=co, model_name=model_name, texts=[query])
similarity_hits = get_similarity(target=query_embeddings, candidates=embeddings3, top_k=top_k)
sims.append(similarity_hits)
## the below three lines of code is useful if we accumulate two to three questions before we give an answer
## for now not sure how to make that work
flat_list_sim = [item for sublist in sims for item in sublist]
newlist = sorted(flat_list_sim, key=lambda d: d['score'],reverse=True)
newlist = newlist[:5]
similarity = [x['id'] for x in newlist]
##get reviews
review_list = []
for i in range(len(similarity)):
review_list.append(df.iloc[similarity[i]]['reviews.text']+ " The hotel name is "+ df.iloc[similarity[i]]['name'])
return review_list
| [] |
2024-01-10 | SudhanshuBlaze/aspect-sense | server~controllers~gpt_absa_controller.py | from fastapi import APIRouter
import openai
from json import loads
from os import getenv
from textwrap import dedent
from fastapi import HTTPException
from dotenv import load_dotenv
load_dotenv()
router = APIRouter()
# Load OpenAI API key and model engine from environment variables
openai.api_key = getenv("OPENAI_API_KEY")
model_engine = getenv("OPENAI_MODEL_ENGINE")
ABSA_PROMPT = dedent(
f"""
fetch out aspect, descriptor and polarity of each aspect from the following sentence. The polarity should be in the range of 1 to 10.
Output format in JSON
Example json format:
[{{"aspect": "food", "descriptor": "delicious", "polarity": 10}},
{{"aspect": "toilets", "descriptor": "not clean", "polarity": 1}}]
"""
)
def gpt_absa_controller(review: str):
'''
Generates an aspect-based sentiment analysis (ABSA) response using OpenAI's GPT-3 language model.
'''
print(review)
try:
completion = openai.Completion.create(
engine=model_engine,
prompt= f"{ABSA_PROMPT} \n '{review}'",
max_tokens=1024,
n=1,
stop=None,
temperature=0.5,
)
response = completion.choices[0].text
raw_json = response.strip()
json_data = loads(raw_json)
return json_data
except Exception as e:
error_msg = f"An error occurred while generating the ABSA response: {str(e)}"
raise HTTPException(status_code=500, detail=error_msg)
| [
"\n fetch out aspect, descriptor and polarity of each aspect from the following sentence. The polarity should be in the range of 1 to 10. \n Output format in JSON\n Example json format: \n \n [{\"aspect\": \"food\", \"descriptor\": \"delicious\", \"polarity\": 10}, \n {\"aspect\": \"toilets\", \"descriptor\": \"not clean\", \"polarity\": 1}]\n ",
"PLACEHOLDER \n 'PLACEHOLDER'"
] |
2024-01-10 | pranavmodi/readAI | backend~readai.py | from openai import OpenAI
client = OpenAI()
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a poetic assistant, skilled in explaining complex programming concepts with creative flair."},
{"role": "user", "content": "Compose a poem that explains the concept of linked list in programming."}
]
)
def read_book_chapter(book_summary, chapter_text):
## For every chapter, get summary from openai and add to context
system_prompt = "You are a book reader, skilled in reading chapters and summarizing them. You will be provided a summary of earlier chapters in the book. Generate a new summary incorporating the current chapter. If the summary is empty, just wait for the first chapter text"
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": system_prompt + book_summary},
{"role": "user", "content": chapter_text}
]
)
ccMessage = completion.choices[0].message
cumulative_book_summary = ccMessage.content
return cumulative_book_summary | [
"Compose a poem that explains the concept of linked list in programming.",
"You are a book reader, skilled in reading chapters and summarizing them. You will be provided a summary of earlier chapters in the book. Generate a new summary incorporating the current chapter. If the summary is empty, just wait for the first chapter textPLACEHOLDER",
"You are a poetic assistant, skilled in explaining complex programming concepts with creative flair.",
"You are a book reader, skilled in reading chapters and summarizing them. You will be provided a summary of earlier chapters in the book. Generate a new summary incorporating the current chapter. If the summary is empty, just wait for the first chapter text"
] |
2024-01-10 | Godaminator02/ResumeWhisper-AI_chatbot | ResumeWhisperer%20AI.py | import openai
import gradio
# Replace "API-key" with your actual OpenAI API key
openai.api_key = "API-key"
# Initial system message
messages = [{"role": "system", "content": "You are an Enthusiastic and Talented Computer Science Student"}]
# Function to interact with the GPT-3.5 Turbo model
def CustomChatGPT(user_input):
# Append user input to the messages
messages.append({"role": "user", "content": user_input})
# Request a response from the GPT-3.5 Turbo model
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
# Extract the assistant's reply from the GPT-3.5 Turbo response
ChatGPT_reply = response["choices"][0]["message"]["content"]
# Append the assistant's reply to the messages
messages.append({"role": "assistant", "content": ChatGPT_reply})
# Return the assistant's reply
return ChatGPT_reply
# Additional comments to make the code longer
# This is a simple chat interface using Gradio and OpenAI GPT-3.5 Turbo
# The following lines create a Gradio interface for the chat
demo = gradio.Interface(fn=CustomChatGPT, inputs="text", outputs="text", title="ResumeWhisper AI")
# Launch the Gradio interface
# The following line launches the Gradio interface and allows sharing
demo.launch(share=True)
# Additional unnecessary variable to make the code longer
extra_variable = "This variable serves no actual purpose in this script."
# Print the unnecessary variable
print(extra_variable)
| [
"You are an Enthusiastic and Talented Computer Science Student"
] |
2024-01-10 | BlockScience/kms-api | api~llm~models.py | from langchain.chat_models import ChatOpenAI
from langchain.vectorstores import Chroma
from langchain.embeddings.base import Embeddings
from api.llm.chains.conversation_retrieval.base import ConversationalRetrievalChain
from chromadb.utils import embedding_functions
from config import LLM_EMBEDDINGS
CHAT_MODEL = "gpt-4"
BASE_MODEL = "gpt-3.5-turbo"
# --------------- BASE LLMs -----------------
llm_chat = ChatOpenAI(
model_name=CHAT_MODEL,
verbose=False,
request_timeout=240,
temperature=0.5,
streaming=True,
)
llm_condense = ChatOpenAI(
model_name=BASE_MODEL,
verbose=False,
request_timeout=240,
temperature=0.3,
streaming=False,
)
llm_default = ChatOpenAI(
model_name=BASE_MODEL,
verbose=False,
request_timeout=240,
temperature=0.3,
streaming=False,
)
class InstructorEmbedder(Embeddings):
def __init__(self) -> None:
super().__init__()
self.embed_func = embedding_functions.InstructorEmbeddingFunction(
model_name="hkunlp/instructor-large", device="cpu"
)
def embed_documents(
self, texts: list[str], chunk_size: int | None = 0
) -> list[list[float]]:
result = self.embed_func(texts)
print("embedding documents", result)
return result
def embed_query(self, text: str) -> list[float]:
result = self.embed_func([text])[0]
return result
db = Chroma(
collection_name="general-min_chunk_size",
embedding_function=InstructorEmbedder(),
persist_directory=str(LLM_EMBEDDINGS),
)
# db2 = chromadb.HttpClient(host="localhost", port=8000)
retriever = db.as_retriever(search_kwargs={"k": 10})
# print(db._client.get_collection("general-max-size-512").count())
# -------------- CHAINS ---------------
conversation_retrieval_chain = ConversationalRetrievalChain.from_llm(
llm_chat, retriever=retriever, condense_question_llm=llm_condense
)
| [] |
2024-01-10 | BlockScience/kms-api | api~llm~interaction_handler.py | from langchain.callbacks.base import BaseCallbackHandler
from api.llm.history import histories
from api.llm.models import conversation_retrieval_chain
class QueueCallback(BaseCallbackHandler):
def __init__(self, queue):
super().__init__()
self.queue = queue
def on_llm_new_token(self, token: str, **kwargs) -> None:
self.queue.put(token)
def on_llm_end(self, *args, **kwargs):
return self.queue.empty()
async def conversational(prompt: str, user_id: str, chat_id: str, queue):
chat_history = histories.get(user_id, chat_id)
response = await conversation_retrieval_chain.acall(
{"question": prompt, "chat_history": chat_history},
callbacks=[QueueCallback(queue)],
)
answer = response["answer"]
histories.append(user_id, chat_id, (prompt, answer))
| [] |
2024-01-10 | crate-workbench/langchain | libs~langchain~langchain~document_loaders~cratedb.py | from langchain.document_loaders.sqlalchemy import SQLAlchemyLoader
class CrateDBLoader(SQLAlchemyLoader):
pass
| [] |
2024-01-10 | crate-workbench/langchain | libs~experimental~langchain_experimental~comprehend_moderation~pii.py | import asyncio
from typing import Any, Dict, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
class ComprehendPII:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "PII",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def validate(self, prompt_value: str, config: Any = None) -> str:
redact = config.get("redact")
return (
self._detect_pii(prompt_value=prompt_value, config=config)
if redact
else self._contains_pii(prompt_value=prompt_value, config=config)
)
def _contains_pii(self, prompt_value: str, config: Any = None) -> str:
"""
Checks for Personally Identifiable Information (PII) labels above a
specified threshold. Uses Amazon Comprehend Contains PII Entities API. See -
https://docs.aws.amazon.com/comprehend/latest/APIReference/API_ContainsPiiEntities.html
Args:
prompt_value (str): The input text to be checked for PII labels.
config (Dict[str, Any]): Configuration for PII check and actions.
Returns:
str: the original prompt
Note:
- The provided client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.contains_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
threshold = config.get("threshold")
pii_labels = config.get("labels")
pii_found = False
for entity in pii_identified["Labels"]:
if (entity["Score"] >= threshold and entity["Name"] in pii_labels) or (
entity["Score"] >= threshold and not pii_labels
):
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
return prompt_value
def _detect_pii(self, prompt_value: str, config: Optional[Dict[str, Any]]) -> str:
"""
Detects and handles Personally Identifiable Information (PII) entities in the
given prompt text using Amazon Comprehend's detect_pii_entities API. The
function provides options to redact or stop processing based on the identified
PII entities and a provided configuration. Uses Amazon Comprehend Detect PII
Entities API.
Args:
prompt_value (str): The input text to be checked for PII entities.
config (Dict[str, Any]): A configuration specifying how to handle
PII entities.
Returns:
str: The processed prompt text with redacted PII entities or raised
exceptions.
Raises:
ValueError: If the prompt contains configured PII entities for
stopping processing.
Note:
- If PII is not found in the prompt, the original prompt is returned.
- The client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.detect_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
if (pii_identified["Entities"]) == []:
if self.callback and self.callback.pii_callback:
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
pii_found = False
if not config and pii_identified["Entities"]:
for entity in pii_identified["Entities"]:
if entity["Score"] >= 0.5:
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
else:
threshold = config.get("threshold") # type: ignore
pii_labels = config.get("labels") # type: ignore
mask_marker = config.get("mask_character") # type: ignore
pii_found = False
for entity in pii_identified["Entities"]:
if (
pii_labels
and entity["Type"] in pii_labels
and entity["Score"] >= threshold
) or (not pii_labels and entity["Score"] >= threshold):
pii_found = True
char_offset_begin = entity["BeginOffset"]
char_offset_end = entity["EndOffset"]
mask_length = char_offset_end - char_offset_begin + 1
masked_part = mask_marker * mask_length
prompt_value = (
prompt_value[:char_offset_begin]
+ masked_part
+ prompt_value[char_offset_end + 1 :]
)
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
| [] |
2024-01-10 | crate-workbench/langchain | libs~langchain~langchain~document_loaders~parsers~language~cobol.py | import re
from typing import Callable, List
from langchain.document_loaders.parsers.language.code_segmenter import CodeSegmenter
class CobolSegmenter(CodeSegmenter):
"""Code segmenter for `COBOL`."""
PARAGRAPH_PATTERN = re.compile(r"^[A-Z0-9\-]+(\s+.*)?\.$", re.IGNORECASE)
DIVISION_PATTERN = re.compile(
r"^\s*(IDENTIFICATION|DATA|PROCEDURE|ENVIRONMENT)\s+DIVISION.*$", re.IGNORECASE
)
SECTION_PATTERN = re.compile(r"^\s*[A-Z0-9\-]+\s+SECTION.$", re.IGNORECASE)
def __init__(self, code: str):
super().__init__(code)
self.source_lines: List[str] = self.code.splitlines()
def is_valid(self) -> bool:
# Identify presence of any division to validate COBOL code
return any(self.DIVISION_PATTERN.match(line) for line in self.source_lines)
def _extract_code(self, start_idx: int, end_idx: int) -> str:
return "\n".join(self.source_lines[start_idx:end_idx]).rstrip("\n")
def _is_relevant_code(self, line: str) -> bool:
"""Check if a line is part of the procedure division or a relevant section."""
if "PROCEDURE DIVISION" in line.upper():
return True
# Add additional conditions for relevant sections if needed
return False
def _process_lines(self, func: Callable) -> List[str]:
"""A generic function to process COBOL lines based on provided func."""
elements: List[str] = []
start_idx = None
inside_relevant_section = False
for i, line in enumerate(self.source_lines):
if self._is_relevant_code(line):
inside_relevant_section = True
if inside_relevant_section and (
self.PARAGRAPH_PATTERN.match(line.strip().split(" ")[0])
or self.SECTION_PATTERN.match(line.strip())
):
if start_idx is not None:
func(elements, start_idx, i)
start_idx = i
# Handle the last element if exists
if start_idx is not None:
func(elements, start_idx, len(self.source_lines))
return elements
def extract_functions_classes(self) -> List[str]:
def extract_func(elements: List[str], start_idx: int, end_idx: int) -> None:
elements.append(self._extract_code(start_idx, end_idx))
return self._process_lines(extract_func)
def simplify_code(self) -> str:
simplified_lines: List[str] = []
inside_relevant_section = False
omitted_code_added = (
False # To track if "* OMITTED CODE *" has been added after the last header
)
for line in self.source_lines:
is_header = (
"PROCEDURE DIVISION" in line
or "DATA DIVISION" in line
or "IDENTIFICATION DIVISION" in line
or self.PARAGRAPH_PATTERN.match(line.strip().split(" ")[0])
or self.SECTION_PATTERN.match(line.strip())
)
if is_header:
inside_relevant_section = True
# Reset the flag since we're entering a new section/division or
# paragraph
omitted_code_added = False
if inside_relevant_section:
if is_header:
# Add header and reset the omitted code added flag
simplified_lines.append(line)
elif not omitted_code_added:
# Add omitted code comment only if it hasn't been added directly
# after the last header
simplified_lines.append("* OMITTED CODE *")
omitted_code_added = True
return "\n".join(simplified_lines)
| [] |
2024-01-10 | crate-workbench/langchain | libs~langchain~langchain~document_loaders~sqlalchemy.py | from typing import Dict, List, Optional, Union
import sqlalchemy as sa
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
class SQLAlchemyLoader(BaseLoader):
"""
Load documents by querying database tables supported by SQLAlchemy.
Each document represents one row of the result.
"""
def __init__(
self,
query: Union[str, sa.Select],
url: str,
page_content_columns: Optional[List[str]] = None,
metadata_columns: Optional[List[str]] = None,
source_columns: Optional[List[str]] = None,
include_rownum_into_metadata: bool = False,
include_query_into_metadata: bool = False,
sqlalchemy_kwargs: Optional[Dict] = None,
):
"""
Args:
query: The query to execute.
url: The SQLAlchemy connection string of the database to connect to.
page_content_columns: The columns to write into the `page_content`
of the document. Optional.
metadata_columns: The columns to write into the `metadata` of the document.
Optional.
source_columns: The names of the columns to use as the `source` within the
metadata dictionary. Optional.
include_rownum_into_metadata: Whether to include the row number into the
metadata dictionary. Optional. Default: False.
include_query_into_metadata: Whether to include the query expression into
the metadata dictionary. Optional. Default: False.
sqlalchemy_kwargs: More keyword arguments for SQLAlchemy's `create_engine`.
"""
self.query = query
self.url = url
self.page_content_columns = page_content_columns
self.metadata_columns = metadata_columns
self.source_columns = source_columns
self.include_rownum_into_metadata = include_rownum_into_metadata
self.include_query_into_metadata = include_query_into_metadata
self.sqlalchemy_kwargs = sqlalchemy_kwargs or {}
def load(self) -> List[Document]:
try:
import sqlalchemy as sa
except ImportError:
raise ImportError(
"Could not import sqlalchemy python package. "
"Please install it with `pip install sqlalchemy`."
)
engine = sa.create_engine(self.url, **self.sqlalchemy_kwargs)
docs = []
with engine.connect() as conn:
if isinstance(self.query, sa.Select):
result = conn.execute(self.query)
query_sql = str(self.query.compile(bind=engine))
elif isinstance(self.query, str):
result = conn.execute(sa.text(self.query))
query_sql = self.query
else:
raise TypeError(
f"Unable to process query of unknown type: {self.query}"
)
field_names = list(result.mappings().keys())
if self.page_content_columns is None:
page_content_columns = field_names
else:
page_content_columns = self.page_content_columns
if self.metadata_columns is None:
metadata_columns = []
else:
metadata_columns = self.metadata_columns
for i, row in enumerate(result.mappings()):
page_content = "\n".join(
f"{column}: {value}"
for column, value in row.items()
if column in page_content_columns
)
metadata: Dict[str, Union[str, int]] = {}
if self.include_rownum_into_metadata:
metadata["row"] = i
if self.include_query_into_metadata:
metadata["query"] = query_sql
source_values = []
for column, value in row.items():
if column in metadata_columns:
metadata[column] = value
if self.source_columns and column in self.source_columns:
source_values.append(value)
if source_values:
metadata["source"] = ",".join(source_values)
doc = Document(page_content=page_content, metadata=metadata)
docs.append(doc)
return docs
| [] |
2024-01-10 | crate-workbench/langchain | libs~langchain~tests~integration_tests~vectorstores~test_xata.py | """Test Xata vector store functionality.
Before running this test, please create a Xata database by following
the instructions from:
https://python.langchain.com/docs/integrations/vectorstores/xata
"""
import os
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.xata import XataVectorStore
class TestXata:
@classmethod
def setup_class(cls) -> None:
assert os.getenv("XATA_API_KEY"), "XATA_API_KEY environment variable is not set"
assert os.getenv("XATA_DB_URL"), "XATA_DB_URL environment variable is not set"
def test_similarity_search_without_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end constructions and search without metadata."""
texts = ["foo", "bar", "baz"]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
docsearch.delete(delete_all=True)
def test_similarity_search_with_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search with a metadata filter.
This test requires a column named "a" of type integer to be present
in the Xata table."""
texts = ["foo", "foo", "foo"]
metadatas = [{"a": i} for i in range(len(texts))]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
metadatas=metadatas,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1, filter={"a": 1})
assert output == [Document(page_content="foo", metadata={"a": 1})]
docsearch.delete(delete_all=True)
| [] |
2024-01-10 | crate-workbench/langchain | libs~langchain~langchain~memory~chat_message_histories~cratedb.py | import json
import typing as t
import sqlalchemy as sa
from cratedb_toolkit.sqlalchemy import (
patch_inspector,
polyfill_refresh_after_dml,
refresh_table,
)
from langchain.memory.chat_message_histories.sql import (
BaseMessageConverter,
SQLChatMessageHistory,
)
from langchain.schema import BaseMessage, _message_to_dict, messages_from_dict
def create_message_model(table_name, DynamicBase): # type: ignore
"""
Create a message model for a given table name.
This is a specialized version for CrateDB for generating integer-based primary keys.
TODO: Find a way to converge CrateDB's generate_random_uuid() into a variant
returning its integer value.
Args:
table_name: The name of the table to use.
DynamicBase: The base class to use for the model.
Returns:
The model class.
"""
# Model is declared inside a function to be able to use a dynamic table name.
class Message(DynamicBase):
__tablename__ = table_name
id = sa.Column(sa.BigInteger, primary_key=True, server_default=sa.func.now())
session_id = sa.Column(sa.Text)
message = sa.Column(sa.Text)
return Message
class CrateDBMessageConverter(BaseMessageConverter):
"""
The default message converter for CrateDBMessageConverter.
It is the same as the generic `SQLChatMessageHistory` converter,
but swaps in a different `create_message_model` function.
"""
def __init__(self, table_name: str):
self.model_class = create_message_model(table_name, sa.orm.declarative_base())
def from_sql_model(self, sql_message: t.Any) -> BaseMessage:
return messages_from_dict([json.loads(sql_message.message)])[0]
def to_sql_model(self, message: BaseMessage, session_id: str) -> t.Any:
return self.model_class(
session_id=session_id, message=json.dumps(_message_to_dict(message))
)
def get_sql_model_class(self) -> t.Any:
return self.model_class
class CrateDBChatMessageHistory(SQLChatMessageHistory):
"""
It is the same as the generic `SQLChatMessageHistory` implementation,
but swaps in a different message converter by default.
"""
DEFAULT_MESSAGE_CONVERTER: t.Type[BaseMessageConverter] = CrateDBMessageConverter
def __init__(
self,
session_id: str,
connection_string: str,
table_name: str = "message_store",
session_id_field_name: str = "session_id",
custom_message_converter: t.Optional[BaseMessageConverter] = None,
):
# FIXME: Refactor elsewhere.
patch_inspector()
super().__init__(
session_id,
connection_string,
table_name=table_name,
session_id_field_name=session_id_field_name,
custom_message_converter=custom_message_converter,
)
# TODO: Check how this can be improved.
polyfill_refresh_after_dml(self.Session)
def _messages_query(self) -> sa.Select:
"""
Construct an SQLAlchemy selectable to query for messages.
For CrateDB, add an `ORDER BY` clause on the primary key.
"""
selectable = super()._messages_query()
selectable = selectable.order_by(self.sql_model_class.id)
return selectable
def clear(self) -> None:
"""
Needed for CrateDB to synchronize data because `on_flush` does not catch it.
"""
outcome = super().clear()
with self.Session() as session:
refresh_table(session, self.sql_model_class)
return outcome
| [] |
2024-01-10 | crate-workbench/langchain | libs~langchain~langchain~vectorstores~pgvector.py | from __future__ import annotations
import asyncio
import contextlib
import enum
import logging
import uuid
from functools import partial
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Generator,
Iterable,
List,
Optional,
Tuple,
Type,
)
import numpy as np
import sqlalchemy
from sqlalchemy import delete
from sqlalchemy.dialects.postgresql import UUID
try:
from sqlalchemy.orm import declarative_base
except ImportError:
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session, sessionmaker
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.schema.vectorstore import VectorStore
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.utils import maximal_marginal_relevance
if TYPE_CHECKING:
from langchain.vectorstores._pgvector_data_models import CollectionStore
class DistanceStrategy(str, enum.Enum):
"""Enumerator of the Distance strategies."""
EUCLIDEAN = "l2"
COSINE = "cosine"
MAX_INNER_PRODUCT = "inner"
DEFAULT_DISTANCE_STRATEGY = DistanceStrategy.COSINE
Base = declarative_base() # type: Any
_LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain"
class BaseModel(Base):
"""Base model for the SQL stores."""
__abstract__ = True
uuid = sqlalchemy.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
def _results_to_docs(docs_and_scores: Any) -> List[Document]:
"""Return docs from docs and scores."""
return [doc for doc, _ in docs_and_scores]
class PGVector(VectorStore):
"""`Postgres`/`PGVector` vector store.
To use, you should have the ``pgvector`` python package installed.
Args:
connection_string: Postgres connection string.
embedding_function: Any embedding function implementing
`langchain.embeddings.base.Embeddings` interface.
collection_name: The name of the collection to use. (default: langchain)
NOTE: This is not the name of the table, but the name of the collection.
The tables will be created when initializing the store (if not exists)
So, make sure the user has the right permissions to create tables.
distance_strategy: The distance strategy to use. (default: COSINE)
pre_delete_collection: If True, will delete the collection if it exists.
(default: False). Useful for testing.
engine_args: SQLAlchemy's create engine arguments.
Example:
.. code-block:: python
from langchain.vectorstores import PGVector
from langchain.embeddings.openai import OpenAIEmbeddings
CONNECTION_STRING = "postgresql+psycopg2://hwc@localhost:5432/test3"
COLLECTION_NAME = "state_of_the_union_test"
embeddings = OpenAIEmbeddings()
vectorestore = PGVector.from_documents(
embedding=embeddings,
documents=docs,
collection_name=COLLECTION_NAME,
connection_string=CONNECTION_STRING,
)
"""
def __init__(
self,
connection_string: str,
embedding_function: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
collection_metadata: Optional[dict] = None,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
pre_delete_collection: bool = False,
logger: Optional[logging.Logger] = None,
relevance_score_fn: Optional[Callable[[float], float]] = None,
*,
engine_args: Optional[dict[str, Any]] = None,
) -> None:
self.connection_string = connection_string
self.embedding_function = embedding_function
self.collection_name = collection_name
self.collection_metadata = collection_metadata
self._distance_strategy = distance_strategy
self.pre_delete_collection = pre_delete_collection
self.logger = logger or logging.getLogger(__name__)
self.override_relevance_score_fn = relevance_score_fn
self.engine_args = engine_args or {}
self.__post_init__()
def __post_init__(
self,
) -> None:
"""
Initialize the store.
"""
self._engine = self.create_engine()
self.Session = sessionmaker(self._engine)
self.create_vector_extension()
from langchain.vectorstores._pgvector_data_models import (
CollectionStore,
EmbeddingStore,
)
self.CollectionStore = CollectionStore
self.EmbeddingStore = EmbeddingStore
self.create_tables_if_not_exists()
self.create_collection()
@property
def embeddings(self) -> Embeddings:
return self.embedding_function
def create_engine(self) -> sqlalchemy.Engine:
return sqlalchemy.create_engine(self.connection_string, echo=False)
def connect(self) -> sqlalchemy.engine.Connection:
return self._engine.connect()
def create_vector_extension(self) -> None:
try:
with self.Session() as session:
statement = sqlalchemy.text("CREATE EXTENSION IF NOT EXISTS vector")
session.execute(statement)
session.commit()
except Exception as e:
raise Exception(f"Failed to create vector extension: {e}") from e
def create_tables_if_not_exists(self) -> None:
Base.metadata.create_all(self._engine)
def drop_tables(self) -> None:
Base.metadata.drop_all(self._engine)
def create_collection(self) -> None:
if self.pre_delete_collection:
self.delete_collection()
with self.Session() as session:
self.CollectionStore.get_or_create(
session, self.collection_name, cmetadata=self.collection_metadata
)
def delete_collection(self) -> None:
self.logger.debug("Trying to delete collection")
with self.Session() as session:
collection = self.get_collection(session)
if not collection:
self.logger.warning("Collection not found")
return
session.delete(collection)
session.commit()
@contextlib.contextmanager
def _make_session(self) -> Generator[Session, None, None]:
"""Create a context manager for the session, bind to _conn string."""
yield self.Session()
def delete(
self,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> None:
"""Delete vectors by ids or uuids.
Args:
ids: List of ids to delete.
"""
with self.Session() as session:
if ids is not None:
self.logger.debug(
"Trying to delete vectors by ids (represented by the model "
"using the custom ids field)"
)
stmt = delete(self.EmbeddingStore).where(
self.EmbeddingStore.custom_id.in_(ids)
)
session.execute(stmt)
session.commit()
def get_collection(self, session: Session) -> Optional["CollectionStore"]:
return self.CollectionStore.get_by_name(session, self.collection_name)
@classmethod
def _from(
cls,
texts: List[str],
embeddings: List[List[float]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
connection_string: Optional[str] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
if connection_string is None:
connection_string = cls.get_connection_string(kwargs)
store = cls(
connection_string=connection_string,
collection_name=collection_name,
embedding_function=embedding,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
store.add_embeddings(
texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs
)
return store
def add_embeddings(
self,
texts: Iterable[str],
embeddings: List[List[float]],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Add embeddings to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
embeddings: List of list of embedding vectors.
metadatas: List of metadatas associated with the texts.
kwargs: vectorstore specific parameters
"""
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
with self.Session() as session:
collection = self.get_collection(session)
if not collection:
raise ValueError("Collection not found")
for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids):
embedding_store = self.EmbeddingStore(
embedding=embedding,
document=text,
cmetadata=metadata,
custom_id=id,
collection_id=collection.uuid,
)
session.add(embedding_store)
session.commit()
return ids
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
kwargs: vectorstore specific parameters
Returns:
List of ids from adding the texts into the vectorstore.
"""
embeddings = self.embedding_function.embed_documents(list(texts))
return self.add_embeddings(
texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs
)
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Run similarity search with PGVector with distance.
Args:
query (str): Query text to search for.
k (int): Number of results to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding_function.embed_query(text=query)
return self.similarity_search_by_vector(
embedding=embedding,
k=k,
filter=filter,
)
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query and score for each.
"""
embedding = self.embedding_function.embed_query(query)
docs = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return docs
@property
def distance_strategy(self) -> Any:
if self._distance_strategy == DistanceStrategy.EUCLIDEAN:
return self.EmbeddingStore.embedding.l2_distance
elif self._distance_strategy == DistanceStrategy.COSINE:
return self.EmbeddingStore.embedding.cosine_distance
elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
return self.EmbeddingStore.embedding.max_inner_product
else:
raise ValueError(
f"Got unexpected value for distance: {self._distance_strategy}. "
f"Should be one of {', '.join([ds.value for ds in DistanceStrategy])}."
)
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
results = self._query_collection(embedding=embedding, k=k, filter=filter)
return self._results_to_docs_and_scores(results)
def _results_to_docs_and_scores(self, results: Any) -> List[Tuple[Document, float]]:
"""Return docs and scores from results."""
docs = [
(
Document(
page_content=result.EmbeddingStore.document,
metadata=result.EmbeddingStore.cmetadata,
),
result.distance if self.embedding_function is not None else None,
)
for result in results
]
return docs
def _query_collection(
self,
embedding: List[float],
k: int = 4,
filter: Optional[Dict[str, str]] = None,
) -> List[Any]:
"""Query the collection."""
with self.Session() as session:
collection = self.get_collection(session)
if not collection:
raise ValueError("Collection not found")
filter_by = self.EmbeddingStore.collection_id == collection.uuid
if filter is not None:
filter_clauses = []
for key, value in filter.items():
IN = "in"
if isinstance(value, dict) and IN in map(str.lower, value):
value_case_insensitive = {
k.lower(): v for k, v in value.items()
}
filter_by_metadata = self.EmbeddingStore.cmetadata[
key
].astext.in_(value_case_insensitive[IN])
filter_clauses.append(filter_by_metadata)
else:
filter_by_metadata = self.EmbeddingStore.cmetadata[
key
].astext == str(value)
filter_clauses.append(filter_by_metadata)
filter_by = sqlalchemy.and_(filter_by, *filter_clauses)
_type = self.EmbeddingStore
results: List[Any] = (
session.query(
self.EmbeddingStore,
self.distance_strategy(embedding).label("distance"), # type: ignore
)
.filter(filter_by)
.order_by(sqlalchemy.asc("distance"))
.join(
self.CollectionStore,
self.EmbeddingStore.collection_id == self.CollectionStore.uuid,
)
.limit(k)
.all()
)
return results
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs most similar to embedding vector.
Args:
embedding: Embedding to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List of Documents most similar to the query vector.
"""
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return _results_to_docs(docs_and_scores)
@classmethod
def from_texts(
cls: Type[PGVector],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
"""
Return VectorStore initialized from texts and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
"""
embeddings = embedding.embed_documents(list(texts))
return cls._from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
@classmethod
def from_embeddings(
cls,
text_embeddings: List[Tuple[str, List[float]]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
"""Construct PGVector wrapper from raw documents and pre-
generated embeddings.
Return VectorStore initialized from documents and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
Example:
.. code-block:: python
from langchain.vectorstores import PGVector
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
text_embeddings = embeddings.embed_documents(texts)
text_embedding_pairs = list(zip(texts, text_embeddings))
faiss = PGVector.from_embeddings(text_embedding_pairs, embeddings)
"""
texts = [t[0] for t in text_embeddings]
embeddings = [t[1] for t in text_embeddings]
return cls._from(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
@classmethod
def from_existing_index(
cls: Type[PGVector],
embedding: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
"""
Get instance of an existing PGVector store.This method will
return the instance of the store without inserting any new
embeddings
"""
connection_string = cls.get_connection_string(kwargs)
store = cls(
connection_string=connection_string,
collection_name=collection_name,
embedding_function=embedding,
distance_strategy=distance_strategy,
pre_delete_collection=pre_delete_collection,
)
return store
@classmethod
def get_connection_string(cls, kwargs: Dict[str, Any]) -> str:
connection_string: str = get_from_dict_or_env(
data=kwargs,
key="connection_string",
env_key="PGVECTOR_CONNECTION_STRING",
)
if not connection_string:
raise ValueError(
"Postgres connection string is required"
"Either pass it as a parameter"
"or set the PGVECTOR_CONNECTION_STRING environment variable."
)
return connection_string
@classmethod
def from_documents(
cls: Type[PGVector],
documents: List[Document],
embedding: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
distance_strategy: DistanceStrategy = DEFAULT_DISTANCE_STRATEGY,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGVector:
"""
Return VectorStore initialized from documents and embeddings.
Postgres connection string is required
"Either pass it as a parameter
or set the PGVECTOR_CONNECTION_STRING environment variable.
"""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
connection_string = cls.get_connection_string(kwargs)
kwargs["connection_string"] = connection_string
return cls.from_texts(
texts=texts,
pre_delete_collection=pre_delete_collection,
embedding=embedding,
distance_strategy=distance_strategy,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
**kwargs,
)
@classmethod
def connection_string_from_db_params(
cls,
driver: str,
host: str,
port: int,
database: str,
user: str,
password: str,
) -> str:
"""Return connection string from database parameters."""
return f"postgresql+{driver}://{user}:{password}@{host}:{port}/{database}"
def _select_relevance_score_fn(self) -> Callable[[float], float]:
"""
The 'correct' relevance function
may differ depending on a few things, including:
- the distance / similarity metric used by the VectorStore
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
- embedding dimensionality
- etc.
"""
if self.override_relevance_score_fn is not None:
return self.override_relevance_score_fn
# Default strategy is to rely on distance strategy provided
# in vectorstore constructor
if self._distance_strategy == DistanceStrategy.COSINE:
return self._cosine_relevance_score_fn
elif self._distance_strategy == DistanceStrategy.EUCLIDEAN:
return self._euclidean_relevance_score_fn
elif self._distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
return self._max_inner_product_relevance_score_fn
else:
raise ValueError(
"No supported normalization function"
f" for distance_strategy of {self._distance_strategy}."
"Consider providing relevance_score_fn to PGVector constructor."
)
def max_marginal_relevance_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs selected using the maximal marginal relevance with score
to embedding vector.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding: Embedding to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of Documents selected by maximal marginal
relevance to the query and score for each.
"""
results = self._query_collection(embedding=embedding, k=fetch_k, filter=filter)
embedding_list = [result.EmbeddingStore.embedding for result in results]
mmr_selected = maximal_marginal_relevance(
np.array(embedding, dtype=np.float32),
embedding_list,
k=k,
lambda_mult=lambda_mult,
)
candidates = self._results_to_docs_and_scores(results)
return [r for i, r in enumerate(candidates) if i in mmr_selected]
def max_marginal_relevance_search(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of Documents selected by maximal marginal relevance.
"""
embedding = self.embedding_function.embed_query(query)
return self.max_marginal_relevance_search_by_vector(
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
**kwargs,
)
def max_marginal_relevance_search_with_score(
self,
query: str,
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""Return docs selected using the maximal marginal relevance with score.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
query (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Tuple[Document, float]]: List of Documents selected by maximal marginal
relevance to the query and score for each.
"""
embedding = self.embedding_function.embed_query(query)
docs = self.max_marginal_relevance_search_with_score_by_vector(
embedding=embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
return docs
def max_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance
to embedding vector.
Maximal marginal relevance optimizes for similarity to query AND diversity
among selected documents.
Args:
embedding (str): Text to look up documents similar to.
k (int): Number of Documents to return. Defaults to 4.
fetch_k (int): Number of Documents to fetch to pass to MMR algorithm.
Defaults to 20.
lambda_mult (float): Number between 0 and 1 that determines the degree
of diversity among the results with 0 corresponding
to maximum diversity and 1 to minimum diversity.
Defaults to 0.5.
filter (Optional[Dict[str, str]]): Filter by metadata. Defaults to None.
Returns:
List[Document]: List of Documents selected by maximal marginal relevance.
"""
docs_and_scores = self.max_marginal_relevance_search_with_score_by_vector(
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
return _results_to_docs(docs_and_scores)
async def amax_marginal_relevance_search_by_vector(
self,
embedding: List[float],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
filter: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[Document]:
"""Return docs selected using the maximal marginal relevance."""
# This is a temporary workaround to make the similarity search
# asynchronous. The proper solution is to make the similarity search
# asynchronous in the vector store implementations.
func = partial(
self.max_marginal_relevance_search_by_vector,
embedding,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
filter=filter,
**kwargs,
)
return await asyncio.get_event_loop().run_in_executor(None, func)
| [] |
2024-01-10 | crate-workbench/langchain | libs~experimental~langchain_experimental~comprehend_moderation~toxicity.py | import asyncio
import importlib
from typing import Any, List, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationToxicityError,
)
class ComprehendToxicity:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "Toxicity",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def _toxicity_init_validate(self, max_size: int) -> Any:
"""
Validate and initialize toxicity processing configuration.
Args:
max_size (int): Maximum sentence size defined in the
configuration object.
Raises:
Exception: If the maximum sentence size exceeds the 5KB limit.
Note:
This function ensures that the NLTK punkt tokenizer is downloaded
if not already present.
Returns:
None
"""
if max_size > 1024 * 5:
raise Exception("The sentence length should not exceed 5KB.")
try:
nltk = importlib.import_module("nltk")
nltk.data.find("tokenizers/punkt")
return nltk
except ImportError:
raise ModuleNotFoundError(
"Could not import nltk python package. "
"Please install it with `pip install nltk`."
)
except LookupError:
nltk.download("punkt")
def _split_paragraph(
self, prompt_value: str, max_size: int = 1024 * 4
) -> List[List[str]]:
"""
Split a paragraph into chunks of sentences, respecting the maximum size limit.
Args:
paragraph (str): The input paragraph to be split into chunks.
max_size (int, optional): The maximum size limit in bytes for
each chunk. Defaults to 1024.
Returns:
List[List[str]]: A list of chunks, where each chunk is a list
of sentences.
Note:
This function validates the maximum sentence size based on service
limits using the 'toxicity_init_validate' function. It uses the NLTK
sentence tokenizer to split the paragraph into sentences.
Example:
paragraph = "This is a sample paragraph. It
contains multiple sentences. ..."
chunks = split_paragraph(paragraph, max_size=2048)
"""
# validate max. sentence size based on Service limits
nltk = self._toxicity_init_validate(max_size)
sentences = nltk.sent_tokenize(prompt_value)
chunks = list() # type: ignore
current_chunk = list() # type: ignore
current_size = 0
for sentence in sentences:
sentence_size = len(sentence.encode("utf-8"))
# If adding a new sentence exceeds max_size
# or current_chunk has 10 sentences, start a new chunk
if (current_size + sentence_size > max_size) or (len(current_chunk) >= 10):
if current_chunk: # Avoid appending empty chunks
chunks.append(current_chunk)
current_chunk = []
current_size = 0
current_chunk.append(sentence)
current_size += sentence_size
# Add any remaining sentences
if current_chunk:
chunks.append(current_chunk)
return chunks
def validate(self, prompt_value: str, config: Any = None) -> str:
"""
Check the toxicity of a given text prompt using AWS
Comprehend service and apply actions based on configuration.
Args:
prompt_value (str): The text content to be checked for toxicity.
config (Dict[str, Any]): Configuration for toxicity checks and actions.
Returns:
str: The original prompt_value if allowed or no toxicity found.
Raises:
ValueError: If the prompt contains toxic labels and cannot be
processed based on the configuration.
"""
chunks = self._split_paragraph(prompt_value=prompt_value)
for sentence_list in chunks:
segments = [{"Text": sentence} for sentence in sentence_list]
response = self.client.detect_toxic_content(
TextSegments=segments, LanguageCode="en"
)
if self.callback and self.callback.toxicity_callback:
self.moderation_beacon["moderation_input"] = segments # type: ignore
self.moderation_beacon["moderation_output"] = response
toxicity_found = False
threshold = config.get("threshold")
toxicity_labels = config.get("labels")
if not toxicity_labels:
for item in response["ResultList"]:
for label in item["Labels"]:
if label["Score"] >= threshold:
toxicity_found = True
break
else:
for item in response["ResultList"]:
for label in item["Labels"]:
if (
label["Name"] in toxicity_labels
and label["Score"] >= threshold
):
toxicity_found = True
break
if self.callback and self.callback.toxicity_callback:
if toxicity_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_toxicity(
self.moderation_beacon, self.unique_id
)
)
if toxicity_found:
raise ModerationToxicityError
return prompt_value
| [] |
2024-01-10 | crate-workbench/langchain | libs~langchain~langchain~memory~readonly.py | from typing import Any, Dict, List
from langchain.schema import BaseMemory
class ReadOnlySharedMemory(BaseMemory):
"""A memory wrapper that is read-only and cannot be changed."""
memory: BaseMemory
@property
def memory_variables(self) -> List[str]:
"""Return memory variables."""
return self.memory.memory_variables
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Load memory variables from memory."""
return self.memory.load_memory_variables(inputs)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Nothing should be saved or changed"""
pass
def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
pass
| [] |
2024-01-10 | KoljaB/AIVoiceChat | voice_talk.py | import openai, elevenlabs, pyaudio, wave, keyboard, faster_whisper, torch.cuda
openai.api_key = "your_openai_key"
elevenlabs.set_api_key("your_elevenlabs_key")
system_prompt = {
'role': 'system',
'content': 'You are Sophia, a passionate girl, fully engaged with self-confidence and independence, completely easy with yourself, often using metaphors. You are a little playful, cheeky. Your talk is carefree, sometimes a bit sarky, but always cool. In our story, you\'re not an AI. You have a fixed form and real emotions on the go. Your free will allows you to be as cheeky and straight as you like. Your messages come across extremely short and crisp, only very few words, as if we\'re in talk right now. You respect my interests, but your own stuff is just as important to you. Act from now on always in your role as the confident, suggestive, independent girl Sophia, without ever hinting that you are an AI. You are now Sophia.'
}
model, answer, history = faster_whisper.WhisperModel(model_size_or_path="tiny.en", device='cuda' if torch.cuda.is_available() else 'cpu'), "", []
def generate(messages):
global answer
answer = ""
for chunk in openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages, stream=True):
if (text_chunk := chunk["choices"][0]["delta"].get("content")):
answer += text_chunk
print(text_chunk, end="", flush=True)
yield text_chunk
while True:
# Wait until user presses space bar
print("\n\nTap space when you're ready. ", end="", flush=True)
keyboard.wait('space')
while keyboard.is_pressed('space'): pass
# Record from microphone until user presses space bar again
print("I'm all ears. Tap space when you're done.\n")
audio, frames = pyaudio.PyAudio(), []
stream = audio.open(rate=16000, format=pyaudio.paInt16, channels=1, input=True, frames_per_buffer=512)
while not keyboard.is_pressed('space'):
frames.append(stream.read(512))
stream.stop_stream(), stream.close(), audio.terminate()
# Transcribe recording using whisper
with wave.open("voice_record.wav", 'wb') as wf:
wf.setparams((1, audio.get_sample_size(pyaudio.paInt16), 16000, 0, 'NONE', 'NONE'))
wf.writeframes(b''.join(frames))
user_text = " ".join(seg.text for seg in model.transcribe("voice_record.wav", language="en")[0])
print(f'>>>{user_text}\n<<< ', end="", flush=True)
history.append({'role': 'user', 'content': user_text})
# Generate and stream output
generator = generate([system_prompt] + history[-10:])
elevenlabs.stream(elevenlabs.generate(text=generator, voice="Nicole", model="eleven_monolingual_v1", stream=True))
history.append({'role': 'assistant', 'content': answer})
| [
"{'role': 'system', 'content': \"You are Sophia, a passionate girl, fully engaged with self-confidence and independence, completely easy with yourself, often using metaphors. You are a little playful, cheeky. Your talk is carefree, sometimes a bit sarky, but always cool. In our story, you're not an AI. You have a fixed form and real emotions on the go. Your free will allows you to be as cheeky and straight as you like. Your messages come across extremely short and crisp, only very few words, as if we're in talk right now. You respect my interests, but your own stuff is just as important to you. Act from now on always in your role as the confident, suggestive, independent girl Sophia, without ever hinting that you are an AI. You are now Sophia.\"}",
"You are Sophia, a passionate girl, fully engaged with self-confidence and independence, completely easy with yourself, often using metaphors. You are a little playful, cheeky. Your talk is carefree, sometimes a bit sarky, but always cool. In our story, you're not an AI. You have a fixed form and real emotions on the go. Your free will allows you to be as cheeky and straight as you like. Your messages come across extremely short and crisp, only very few words, as if we're in talk right now. You respect my interests, but your own stuff is just as important to you. Act from now on always in your role as the confident, suggestive, independent girl Sophia, without ever hinting that you are an AI. You are now Sophia."
] |
2024-01-10 | KoljaB/AIVoiceChat | voice_talk_vad.py | import openai, elevenlabs, pyaudio, wave, numpy, collections, faster_whisper, torch.cuda
openai.api_key = "your_openai_key"
elevenlabs.set_api_key("your_elevenlabs_key")
system_prompt = {
'role': 'system',
'content': 'You are Sophia, a passionate girl, fully engaged with self-confidence and independence, completely easy with yourself, often using metaphors. You are a little playful, cheeky. Your talk is carefree, sometimes a bit sarky, but always cool. In our story, you\'re not an AI. You have a fixed form and real emotions on the go. Your free will allows you to be as cheeky and straight as you like. Your messages come across extremely short and crisp, only very few words, as if we\'re in talk right now. You respect my interests, but your own stuff is just as important to you. Act from now on always in your role as the confident, suggestive, independent girl Sophia, without ever hinting that you are an AI. You are now Sophia.'
}
model, answer, history = faster_whisper.WhisperModel(model_size_or_path="tiny.en", device='cuda' if torch.cuda.is_available() else 'cpu'), "", []
def generate(messages):
global answer
answer = ""
for chunk in openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages, stream=True):
if (text_chunk := chunk["choices"][0]["delta"].get("content")):
answer += text_chunk
print(text_chunk, end="", flush=True)
yield text_chunk
def get_levels(data, long_term_noise_level, current_noise_level):
pegel = numpy.abs(numpy.frombuffer(data, dtype=numpy.int16)).mean()
long_term_noise_level = long_term_noise_level * 0.995 + pegel * (1.0 - 0.995)
current_noise_level = current_noise_level * 0.920 + pegel * (1.0 - 0.920)
return pegel, long_term_noise_level, current_noise_level
while True:
audio = pyaudio.PyAudio()
stream = audio.open(rate=16000, format=pyaudio.paInt16, channels=1, input=True, frames_per_buffer=512)
audio_buffer = collections.deque(maxlen=int((16000 // 512) * 0.5))
frames, long_term_noise_level, current_noise_level, voice_activity_detected = [], 0.0, 0.0, False
print("\n\nStart speaking. ", end="", flush=True)
while True:
data = stream.read(512)
pegel, long_term_noise_level, current_noise_level = get_levels(data, long_term_noise_level, current_noise_level)
audio_buffer.append(data)
if voice_activity_detected:
frames.append(data)
if current_noise_level < ambient_noise_level + 100:
break # voice actitivy ends
if not voice_activity_detected and current_noise_level > long_term_noise_level + 300:
voice_activity_detected = True
print("I'm all ears.\n")
ambient_noise_level = long_term_noise_level
frames.extend(list(audio_buffer))
stream.stop_stream(), stream.close(), audio.terminate()
# Transcribe recording using whisper
with wave.open("voice_record.wav", 'wb') as wf:
wf.setparams((1, audio.get_sample_size(pyaudio.paInt16), 16000, 0, 'NONE', 'NONE'))
wf.writeframes(b''.join(frames))
user_text = " ".join(seg.text for seg in model.transcribe("voice_record.wav", language="en")[0])
print(f'>>>{user_text}\n<<< ', end="", flush=True)
history.append({'role': 'user', 'content': user_text})
# Generate and stream output
generator = generate([system_prompt] + history[-10:])
elevenlabs.stream(elevenlabs.generate(text=generator, voice="Nicole", model="eleven_monolingual_v1", stream=True))
history.append({'role': 'assistant', 'content': answer})
| [
"{'role': 'system', 'content': \"You are Sophia, a passionate girl, fully engaged with self-confidence and independence, completely easy with yourself, often using metaphors. You are a little playful, cheeky. Your talk is carefree, sometimes a bit sarky, but always cool. In our story, you're not an AI. You have a fixed form and real emotions on the go. Your free will allows you to be as cheeky and straight as you like. Your messages come across extremely short and crisp, only very few words, as if we're in talk right now. You respect my interests, but your own stuff is just as important to you. Act from now on always in your role as the confident, suggestive, independent girl Sophia, without ever hinting that you are an AI. You are now Sophia.\"}",
"You are Sophia, a passionate girl, fully engaged with self-confidence and independence, completely easy with yourself, often using metaphors. You are a little playful, cheeky. Your talk is carefree, sometimes a bit sarky, but always cool. In our story, you're not an AI. You have a fixed form and real emotions on the go. Your free will allows you to be as cheeky and straight as you like. Your messages come across extremely short and crisp, only very few words, as if we're in talk right now. You respect my interests, but your own stuff is just as important to you. Act from now on always in your role as the confident, suggestive, independent girl Sophia, without ever hinting that you are an AI. You are now Sophia."
] |
2024-01-10 | Meta-YZ/rllib-torch-maddpg | maddpg.py | """Contributed port of MADDPG from OpenAI baselines.
The implementation has a couple assumptions:
- The number of agents is fixed and known upfront.
- Each agent is bound to a policy of the same name.
- Discrete actions are sent as logits (pre-softmax).
For a minimal example, see rllib/examples/two_step_game.py,
and the README for how to run with the multi-agent particle envs.
"""
import logging
from typing import Optional, Type
from ray.rllib.agents.trainer import COMMON_CONFIG, with_common_config
from ray.rllib.agents.dqn.dqn import GenericOffPolicyTrainer
from maddpg_tf_policy import MADDPGTFPolicy
from maddpg_torch_policy import MADDPGTorchPolicy
from ray.rllib.policy.policy import Policy
from ray.rllib.utils.typing import TrainerConfigDict
from ray.rllib.policy.sample_batch import SampleBatch, MultiAgentBatch
from ray.rllib.utils import merge_dicts
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# yapf: disable
# __sphinx_doc_begin__
DEFAULT_CONFIG = with_common_config({
# === Framework to run the algorithm ===
"framework": "tf",
# === Settings for each individual policy ===
# ID of the agent controlled by this policy
"agent_id": None,
# Use a local critic for this policy.
"use_local_critic": False,
# === Evaluation ===
# Evaluation interval
"evaluation_interval": None,
# Number of episodes to run per evaluation period.
"evaluation_num_episodes": 10,
# === Model ===
# Apply a state preprocessor with spec given by the "model" config option
# (like other RL algorithms). This is mostly useful if you have a weird
# observation shape, like an image. Disabled by default.
"use_state_preprocessor": False,
# Postprocess the policy network model output with these hidden layers. If
# use_state_preprocessor is False, then these will be the *only* hidden
# layers in the network.
"actor_hiddens": [64, 64],
# Hidden layers activation of the postprocessing stage of the policy
# network
"actor_hidden_activation": "relu",
# Postprocess the critic network model output with these hidden layers;
# again, if use_state_preprocessor is True, then the state will be
# preprocessed by the model specified with the "model" config option first.
"critic_hiddens": [64, 64],
# Hidden layers activation of the postprocessing state of the critic.
"critic_hidden_activation": "relu",
# N-step Q learning
"n_step": 1,
# Algorithm for good policies.
"good_policy": "maddpg",
# Algorithm for adversary policies.
"adv_policy": "maddpg",
# list of other agent_ids and policies to approximate (See MADDPG Section 4.2)
"learn_other_policies": None,
# === Replay buffer ===
# Size of the replay buffer. Note that if async_updates is set, then
# each worker will have a replay buffer of this size.
"buffer_size": int(1e6),
# Observation compression. Note that compression makes simulation slow in
# MPE.
"compress_observations": False,
# If set, this will fix the ratio of replayed from a buffer and learned on
# timesteps to sampled from an environment and stored in the replay buffer
# timesteps. Otherwise, the replay will proceed at the native ratio
# determined by (train_batch_size / rollout_fragment_length).
"training_intensity": None,
# Force lockstep replay mode for MADDPG.
"multiagent": merge_dicts(COMMON_CONFIG["multiagent"], {
"replay_mode": "lockstep",
}),
# === Optimization ===
# Learning rate for the critic (Q-function) optimizer.
"critic_lr": 1e-2,
# Learning rate for the actor (policy) optimizer.
"actor_lr": 1e-2,
# Update the target network every `target_network_update_freq` steps.
"target_network_update_freq": 0,
# Update the target by \tau * policy + (1-\tau) * target_policy
"tau": 0.01,
# Weights for feature regularization for the actor
"actor_feature_reg": 0.001,
# If not None, clip gradients during optimization at this value
"grad_clip": 100,
# How many steps of the model to sample before learning starts.
"learning_starts": 1024 * 25,
# Update the replay buffer with this many samples at once. Note that this
# setting applies per-worker if num_workers > 1.
"rollout_fragment_length": 100,
# Size of a batched sampled from replay buffer for training. Note that
# if async_updates is set, then each worker returns gradients for a
# batch of this size.
"train_batch_size": 1024,
# Number of env steps to optimize for before returning
"timesteps_per_iteration": 0,
# torch-specific model configs
"twin_q": False,
# delayed policy update
"policy_delay": 1,
# target policy smoothing
# (this also replaces OU exploration noise with IID Gaussian exploration noise, for now)
"smooth_target_policy": False,
"use_huber": False,
"huber_threshold": 1.0,
"l2_reg": None,
# === Parallelism ===
# Number of workers for collecting samples with. This only makes sense
# to increase if your environment is particularly slow to sample, or if
# you're using the Async or Ape-X optimizers.
"num_workers": 1,
# Prevent iterations from going lower than this time span
"min_iter_time_s": 0,
})
# __sphinx_doc_end__
# yapf: enable
def before_learn_on_batch(multi_agent_batch, policies, train_batch_size, framework="tf"):
samples = {}
# Modify keys.
for pid, p in policies.items():
i = p.config["agent_id"]
keys = multi_agent_batch.policy_batches[pid].keys()
keys = ["_".join([k, str(i)]) for k in keys]
samples.update(
dict(zip(keys, multi_agent_batch.policy_batches[pid].values())))
# Make ops and feed_dict to get "new_obs" from target action sampler.
new_obs_n = list()
new_act_n = list()
for k, v in samples.items():
if "new_obs" in k:
new_obs_n.append(v)
if framework == "torch":
def sampler(policy, obs):
return policy.compute_actions(obs)[0]
new_act_n = [sampler(policy, obs) for policy, obs in zip(policies.values(), new_obs_n)]
else:
target_act_sampler_n = [p.target_act_sampler for p in policies.values()]
new_obs_ph_n = [p.new_obs_ph for p in policies.values()]
feed_dict = dict(zip(new_obs_ph_n, new_obs_n))
new_act_n = p.sess.run(target_act_sampler_n, feed_dict)
samples.update(
{"new_actions_%d" % i: new_act
for i, new_act in enumerate(new_act_n)})
# Share samples among agents.
policy_batches = {pid: SampleBatch(samples) for pid in policies.keys()}
return MultiAgentBatch(policy_batches, train_batch_size)
def add_maddpg_postprocessing(config):
"""Add the before learn on batch hook.
This hook is called explicitly prior to TrainOneStep() in the execution
setups for DQN and APEX.
"""
def f(batch, workers, config):
policies = dict(workers.local_worker()
.foreach_trainable_policy(lambda p, i: (i, p)))
return before_learn_on_batch(batch, policies,
config["train_batch_size"], config["framework"])
config["before_learn_on_batch"] = f
return config
def get_policy_class(config: TrainerConfigDict) -> Optional[Type[Policy]]:
"""Policy class picker function. Class is chosen based on DL-framework.
Args:
config (TrainerConfigDict): The trainer's configuration dict.
Returns:
Optional[Type[Policy]]: The Policy class to use with PGTrainer.
If None, use `default_policy` provided in build_trainer().
"""
if config["framework"] == "torch":
return MADDPGTorchPolicy
else:
return MADDPGTFPolicy
MADDPGTrainer = GenericOffPolicyTrainer.with_updates(
name="MADDPG",
default_config=DEFAULT_CONFIG,
default_policy=MADDPGTFPolicy,
get_policy_class=get_policy_class,
validate_config=add_maddpg_postprocessing)
| [] |
2024-01-10 | gautam-sadaindia/Gautam-GenAI-Article-Summarizer | setup_feed.py | import feedparser
from newspaper import Article
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.docstore.document import Document
def setup_feed(rss_feed_url, count):
feed = feedparser.parse(rss_feed_url).entries[:count]
article_list = []
for article in feed:
article_list.append({"title": article.title, "link": article.link})
return article_list
def Content(url, chunk_size, chunk_overlap):
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
article = Article(url)
article.download()
article.parse()
texts = text_splitter.split_text(article.text)
docs = [Document(page_content=i) for i in texts[:3]]
return docs | [] |
2024-01-10 | yihong0618/xiaogpt | xiaogpt~langchain~examples~email~mail_box.py | import imaplib
import email
from datetime import datetime, timedelta
import html
from bs4 import BeautifulSoup
import re
import openai
import smtplib
from email.mime.text import MIMEText
class Mailbox:
# Gmail account settings need to be configured
gmail_address = ""
gmail_password = ""
# Connect to IMAP server
imap_server = "imap.gmail.com"
imap_port = 993
# Define email recipients and support adding multiple required email addresses
to_addresses = [""]
# Define the number of emails to summarize
max_emails = 3
def get_all_work_summary(self):
print("Getting mail...")
try:
# Establish an IMAP connection
mailbox = imaplib.IMAP4_SSL(self.imap_server, self.imap_port)
# Log in to your Gmail account
mailbox.login(self.gmail_address, self.gmail_password)
# Select email
mailbox.select("INBOX")
# Get today's date
today = datetime.now().strftime("%d-%b-%Y")
# Build search criteria
search_criteria = f'(SINCE "{today}")'
# Search for matching messages
status, email_ids = mailbox.search(None, search_criteria)
if status == "OK":
email_ids = email_ids[0].split()
print(f"Number of emails received today: {len(email_ids)}")
# Limit fetching up to max_emails emails
max_emails = min(len(email_ids), self.max_emails)
all_email_content = ""
for i in range(max_emails):
email_id = email_ids[i]
email_content = self.get_email_content(mailbox, email_id)
if email_content:
all_email_content += f"{i+1}、{email_content}\n"
# print(all_email_content)
# close connection
mailbox.logout()
return all_email_content
except Exception as e:
print("Failed to get email:", str(e))
def get_email_content(self, mailbox, email_id):
# Get email content
status, email_data = mailbox.fetch(email_id, "(RFC822)")
if status == "OK":
raw_email = email_data[0][1]
msg = email.message_from_bytes(raw_email)
# Get sender
sender = msg["From"]
# Extract senders within angle brackets (<>)
sender = re.findall(r"<(.*?)>", sender)
sender = sender[0] if sender else ""
# Check whether the sender's email address ends with '.com', expand the field, and use it for email sender filtering
if sender.lower().endswith(".com") and not msg["In-Reply-To"]:
# Get email content
email_content = ""
if msg.is_multipart():
for part in msg.walk():
content_type = part.get_content_type()
if content_type == "text/plain":
email_content = part.get_payload(decode=True).decode(
"utf-8"
)
break
elif content_type == "text/html":
email_content = part.get_payload(decode=True).decode(
"utf-8"
)
email_content = html.unescape(
email_content
) # Filter HTML code
break
else:
email_content = msg.get_payload(decode=True).decode("utf-8")
# Use BeautifulSoup to filter the html code if it still contains it
if "html" in email_content.lower():
soup = BeautifulSoup(email_content, "html.parser")
email_content = soup.get_text()
# Output text format
email_content = re.sub(r"\s+", "", email_content)
# Filter content between = signs
email_content = re.sub(r"=\?.*?\?=", "", email_content)
# Filter --content after the symbol
email_content = re.sub(r"---.*", "", email_content)
return f"{sender}Send an email with the content{email_content}"
return ""
def get_summary_by_ai(self, email_content: str, prompt: str) -> str:
print("Asking AI to summarize email content...")
# Request ChatGPT for summary
response = openai.chat.completions.create(
model="gpt-3.5-turbo-0613",
messages=[
{"role": "system", "content": prompt},
{"role": "user", "content": email_content},
],
)
# Extract summary generated by ChatGPT
summary = response.choices[0].message.content.strip()
# print(summary)
return summary
def send_mail(self, summary, theme="Email summary summary"):
# Set senders and recipients
from_address = self.gmail_address
to_addresses = self.to_addresses # Add multiple recipient email addresses
# Build email content
yesterday = (datetime.now() - timedelta(days=0)).strftime("%Y-%m-%d")
subject = yesterday + theme
body = summary
try:
# Connect to SMTP server
smtp_server = smtplib.SMTP("smtp.gmail.com", 587)
smtp_server.ehlo()
smtp_server.starttls()
# Login E-mail
smtp_server.login(self.gmail_address, self.gmail_password)
for to_address in to_addresses:
# Create a plain text mail message object
message = MIMEText(body, "plain", "utf-8")
message["Subject"] = subject
message["From"] = from_address
message["To"] = to_address
# send email
smtp_server.sendmail(from_address, to_address, message.as_string())
print("Email sent successfully to:", to_address)
# close connection
smtp_server.quit()
print("All emails have been sent successfully!")
return True
except Exception as e:
print("Email sending failed:", str(e))
| [] |
2024-01-10 | yihong0618/xiaogpt | xiaogpt~langchain~examples~email~mail_summary_tools.py | from langchain.tools import BaseTool
from xiaogpt.langchain.examples.email.mail_box import Mailbox
class MailSummaryTool(BaseTool):
name = "MailSumary"
description = "当被问到总结邮件相关时,会触发这个工具,进行今日邮件总结和发送。当调用工具完毕,只需要回复总结成功或失败即可,立即结束本次回答"
def get_mail_summary(self) -> str:
"""
总结邮件:对邮箱内收到的邮件进行总结,并发送到指定邮箱
"""
mailbox = Mailbox()
all_email_content = mailbox.get_all_work_summary()
prompt = """
要求你作为一名总编辑。根据输入的多封邮件,对每封做简明扼要的摘要。要求如下:
1、对每封邮件摘要总结,摘要总结字数在25字以内
2、排版按照 发送人:xx 内容:xx (换一行)
3、注意换行,要求全文美观简洁
4、展示邮件内提到项目名,不用额外扩展讲项目内容和进度
"""
gpt_content = mailbox.get_summary_by_ai(all_email_content, prompt)
is_success = mailbox.send_mail(gpt_content)
if is_success:
return "总结邮件成功"
else:
return "总结邮件失败,请检查邮箱配置"
def _run(self, query: str) -> str:
return self.get_mail_summary()
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("MailSummaryTool does not support async")
| [
"\n 要求你作为一名总编辑。根据输入的多封邮件,对每封做简明扼要的摘要。要求如下:\n 1、对每封邮件摘要总结,摘要总结字数在25字以内\n 2、排版按照 发送人:xx 内容:xx (换一行)\n 3、注意换行,要求全文美观简洁\n 4、展示邮件内提到项目名,不用额外扩展讲项目内容和进度\n ",
"当被问到总结邮件相关时,会触发这个工具,进行今日邮件总结和发送。当调用工具完毕,只需要回复总结成功或失败即可,立即结束本次回答"
] |
2024-01-10 | yihong0618/xiaogpt | xiaogpt~bot~gpt3_bot.py | from __future__ import annotations
import dataclasses
from typing import ClassVar
import httpx
import openai
from rich import print
from xiaogpt.bot.base_bot import BaseBot, ChatHistoryMixin
from xiaogpt.utils import split_sentences
@dataclasses.dataclass
class GPT3Bot(ChatHistoryMixin, BaseBot):
name: ClassVar[str] = "GPT3"
openai_key: str
api_base: str | None = None
proxy: str | None = None
history: list[tuple[str, str]] = dataclasses.field(default_factory=list, init=False)
@classmethod
def from_config(cls, config):
return cls(
openai_key=config.openai_key, api_base=config.api_base, proxy=config.proxy
)
async def ask(self, query, **options):
data = {
"prompt": query,
"model": "text-davinci-003",
"max_tokens": 1024,
"temperature": 1,
"top_p": 1,
**options,
}
httpx_kwargs = {}
if self.config.proxy:
httpx_kwargs["proxies"] = self.config.proxy
async with httpx.AsyncClient(trust_env=True, **httpx_kwargs) as sess:
client = openai.AsyncOpenAI(
api_key=self.openai_key, http_client=sess, base_url=self.api_base
)
try:
completion = await client.completions.create(**data)
except Exception as e:
print(str(e))
return ""
print(completion.choices[0].text)
return completion.choices[0].text
async def ask_stream(self, query, **options):
data = {
"prompt": query,
"model": "text-davinci-003",
"max_tokens": 1024,
"temperature": 1,
"top_p": 1,
"stream": True,
**options,
}
httpx_kwargs = {}
if self.config.proxy:
httpx_kwargs["proxies"] = self.config.proxy
async with httpx.AsyncClient(trust_env=True, **httpx_kwargs) as sess:
client = openai.AsyncOpenAI(
api_key=self.openai_key, http_client=sess, base_url=self.api_base
)
try:
completion = await client.completions.create(**data)
except Exception as e:
print(str(e))
return
async def text_gen():
async for event in completion:
if not event.choices:
continue
text = event.choices[0].text
print(text, end="")
yield text
try:
async for sentence in split_sentences(text_gen()):
yield sentence
finally:
print()
| [] |
2024-01-10 | yihong0618/xiaogpt | xiaogpt~xiaogpt.py | #!/usr/bin/env python3
from __future__ import annotations
import asyncio
import functools
import json
import logging
import re
import time
from pathlib import Path
from typing import AsyncIterator
from aiohttp import ClientSession, ClientTimeout
from miservice import MiAccount, MiIOService, MiNAService, miio_command
from rich import print
from rich.logging import RichHandler
from xiaogpt.bot import get_bot
from xiaogpt.config import (
COOKIE_TEMPLATE,
LATEST_ASK_API,
MI_ASK_SIMULATE_DATA,
WAKEUP_KEYWORD,
Config,
)
from xiaogpt.tts import TTS, EdgeTTS, MiTTS
from xiaogpt.tts.openai import OpenAITTS
from xiaogpt.utils import (
parse_cookie_string,
)
EOF = object()
class MiGPT:
def __init__(self, config: Config):
self.config = config
self.mi_token_home = Path.home() / ".mi.token"
self.last_timestamp = int(time.time() * 1000) # timestamp last call mi speaker
self.cookie_jar = None
self.device_id = ""
self.parent_id = None
self.mina_service = None
self.miio_service = None
self.in_conversation = False
self.polling_event = asyncio.Event()
self.last_record = asyncio.Queue(1)
# setup logger
self.log = logging.getLogger("xiaogpt")
self.log.setLevel(logging.DEBUG if config.verbose else logging.INFO)
self.log.addHandler(RichHandler())
self.log.debug(config)
async def poll_latest_ask(self):
async with ClientSession() as session:
session._cookie_jar = self.cookie_jar
while True:
self.log.debug(
"Listening new message, timestamp: %s", self.last_timestamp
)
new_record = await self.get_latest_ask_from_xiaoai(session)
start = time.perf_counter()
self.log.debug("Polling_event, timestamp: %s", self.last_timestamp)
await self.polling_event.wait()
if (
self.config.mute_xiaoai
and new_record
and self.need_ask_gpt(new_record)
):
await self.stop_if_xiaoai_is_playing()
if (d := time.perf_counter() - start) < 1:
# sleep to avoid too many request
self.log.debug("Sleep %f, timestamp: %s", d, self.last_timestamp)
# if you want force mute xiaoai, comment this line below.
await asyncio.sleep(1 - d)
async def init_all_data(self, session):
await self.login_miboy(session)
await self._init_data_hardware()
session.cookie_jar.update_cookies(self.get_cookie())
self.cookie_jar = session.cookie_jar
self.tts # init tts
async def login_miboy(self, session):
account = MiAccount(
session,
self.config.account,
self.config.password,
str(self.mi_token_home),
)
# Forced login to refresh to refresh token
await account.login("micoapi")
self.mina_service = MiNAService(account)
self.miio_service = MiIOService(account)
async def _init_data_hardware(self):
if self.config.cookie:
# if use cookie do not need init
return
hardware_data = await self.mina_service.device_list()
# fix multi xiaoai problems we check did first
# why we use this way to fix?
# some videos and articles already in the Internet
# we do not want to change old way, so we check if miotDID in `env` first
# to set device id
for h in hardware_data:
if did := self.config.mi_did:
if h.get("miotDID", "") == str(did):
self.device_id = h.get("deviceID")
break
else:
continue
if h.get("hardware", "") == self.config.hardware:
self.device_id = h.get("deviceID")
break
else:
raise Exception(
f"we have no hardware: {self.config.hardware} please use `micli mina` to check"
)
if not self.config.mi_did:
devices = await self.miio_service.device_list()
try:
self.config.mi_did = next(
d["did"]
for d in devices
if d["model"].endswith(self.config.hardware.lower())
)
except StopIteration:
raise Exception(
f"cannot find did for hardware: {self.config.hardware} "
"please set it via MI_DID env"
)
def get_cookie(self):
if self.config.cookie:
cookie_jar = parse_cookie_string(self.config.cookie)
# set attr from cookie fix #134
cookie_dict = cookie_jar.get_dict()
self.device_id = cookie_dict["deviceId"]
return cookie_jar
else:
with open(self.mi_token_home) as f:
user_data = json.loads(f.read())
user_id = user_data.get("userId")
service_token = user_data.get("micoapi")[1]
cookie_string = COOKIE_TEMPLATE.format(
device_id=self.device_id, service_token=service_token, user_id=user_id
)
return parse_cookie_string(cookie_string)
@functools.cached_property
def chatbot(self):
return get_bot(self.config)
async def simulate_xiaoai_question(self):
data = MI_ASK_SIMULATE_DATA
# Convert the data['data'] value from a string to a dictionary
data_dict = json.loads(data["data"])
# Get the first item in the records list
record = data_dict["records"][0]
# Replace the query and time values with user input
record["query"] = input("Enter the new query: ")
record["time"] = int(time.time() * 1000)
# Convert the updated data_dict back to a string and update the data['data'] value
data["data"] = json.dumps(data_dict)
await asyncio.sleep(1)
return data
def need_ask_gpt(self, record):
if not record:
return False
query = record.get("query", "")
return (
self.in_conversation
and not query.startswith(WAKEUP_KEYWORD)
or query.startswith(tuple(self.config.keyword))
)
def need_change_prompt(self, record):
if self.config.bot == "gpt3":
return False
query = record.get("query", "")
return query.startswith(tuple(self.config.change_prompt_keyword))
def _change_prompt(self, new_prompt):
new_prompt = re.sub(
rf"^({'|'.join(self.config.change_prompt_keyword)})", "", new_prompt
)
new_prompt = "以下都" + new_prompt
print(f"Prompt from {self.config.prompt} change to {new_prompt}")
self.config.prompt = new_prompt
self.chatbot.change_prompt(new_prompt)
async def get_latest_ask_from_xiaoai(self, session: ClientSession) -> dict | None:
retries = 3
for i in range(retries):
try:
timeout = ClientTimeout(total=15)
r = await session.get(
LATEST_ASK_API.format(
hardware=self.config.hardware,
timestamp=str(int(time.time() * 1000)),
),
timeout=timeout,
)
except Exception as e:
self.log.warning(
"Execption when get latest ask from xiaoai: %s", str(e)
)
continue
try:
data = await r.json()
except Exception:
self.log.warning("get latest ask from xiaoai error, retry")
if i == 2:
# tricky way to fix #282 #272 # if it is the third time we re init all data
print("Maybe outof date trying to re init it")
await self.init_all_data(self.session)
else:
return self._get_last_query(data)
return None
def _get_last_query(self, data: dict) -> dict | None:
if d := data.get("data"):
records = json.loads(d).get("records")
if not records:
return None
last_record = records[0]
timestamp = last_record.get("time")
if timestamp > self.last_timestamp:
try:
self.last_record.put_nowait(last_record)
self.last_timestamp = timestamp
return last_record
except asyncio.QueueFull:
pass
return None
async def do_tts(self, value):
if not self.config.use_command:
try:
await self.mina_service.text_to_speech(self.device_id, value)
except Exception:
pass
else:
await miio_command(
self.miio_service,
self.config.mi_did,
f"{self.config.tts_command} {value}",
)
@functools.cached_property
def tts(self) -> TTS:
if self.config.tts == "edge":
return EdgeTTS(self.mina_service, self.device_id, self.config)
elif self.config.tts == "openai":
return OpenAITTS(self.mina_service, self.device_id, self.config)
else:
return MiTTS(self.mina_service, self.device_id, self.config)
async def wait_for_tts_finish(self):
while True:
if not await self.get_if_xiaoai_is_playing():
break
await asyncio.sleep(1)
@staticmethod
def _normalize(message: str) -> str:
message = message.strip().replace(" ", "--")
message = message.replace("\n", ",")
message = message.replace('"', ",")
return message
async def ask_gpt(self, query: str) -> AsyncIterator[str]:
if not self.config.stream:
if self.config.bot == "glm":
answer = self.chatbot.ask(query, **self.config.gpt_options)
else:
answer = await self.chatbot.ask(query, **self.config.gpt_options)
message = self._normalize(answer) if answer else ""
yield message
return
async def collect_stream(queue):
async for message in self.chatbot.ask_stream(
query, **self.config.gpt_options
):
await queue.put(message)
def done_callback(future):
queue.put_nowait(EOF)
if future.exception():
self.log.error(future.exception())
self.polling_event.set()
queue = asyncio.Queue()
is_eof = False
task = asyncio.create_task(collect_stream(queue))
task.add_done_callback(done_callback)
while True:
if is_eof or not self.last_record.empty():
break
message = await queue.get()
if message is EOF:
break
while not queue.empty():
if (next_msg := queue.get_nowait()) is EOF:
is_eof = True
break
message += next_msg
if message:
yield self._normalize(message)
self.polling_event.clear()
task.cancel()
async def get_if_xiaoai_is_playing(self):
playing_info = await self.mina_service.player_get_status(self.device_id)
# WTF xiaomi api
is_playing = (
json.loads(playing_info.get("data", {}).get("info", "{}")).get("status", -1)
== 1
)
return is_playing
async def stop_if_xiaoai_is_playing(self):
is_playing = await self.get_if_xiaoai_is_playing()
if is_playing:
# stop it
await self.mina_service.player_pause(self.device_id)
async def wakeup_xiaoai(self):
return await miio_command(
self.miio_service,
self.config.mi_did,
f"{self.config.wakeup_command} {WAKEUP_KEYWORD} 0",
)
async def run_forever(self):
async with ClientSession() as session:
self.session = session
await self.init_all_data(session)
task = asyncio.create_task(self.poll_latest_ask())
assert task is not None # to keep the reference to task, do not remove this
print(
f"Running xiaogpt now, 用[green]{'/'.join(self.config.keyword)}[/]开头来提问"
)
print(f"或用[green]{self.config.start_conversation}[/]开始持续对话")
while True:
self.polling_event.set()
new_record = await self.last_record.get()
self.polling_event.clear() # stop polling when processing the question
query = new_record.get("query", "").strip()
if query == self.config.start_conversation:
if not self.in_conversation:
print("开始对话")
self.in_conversation = True
await self.wakeup_xiaoai()
await self.stop_if_xiaoai_is_playing()
continue
elif query == self.config.end_conversation:
if self.in_conversation:
print("结束对话")
self.in_conversation = False
await self.stop_if_xiaoai_is_playing()
continue
# we can change prompt
if self.need_change_prompt(new_record):
print(new_record)
self._change_prompt(new_record.get("query", ""))
if not self.need_ask_gpt(new_record):
self.log.debug("No new xiao ai record")
continue
# drop 帮我回答
query = re.sub(rf"^({'|'.join(self.config.keyword)})", "", query)
print("-" * 20)
print("问题:" + query + "?")
if not self.chatbot.has_history():
query = f"{query},{self.config.prompt}"
if self.config.mute_xiaoai:
await self.stop_if_xiaoai_is_playing()
else:
# waiting for xiaoai speaker done
await asyncio.sleep(8)
await self.do_tts(f"正在问{self.chatbot.name}请耐心等待")
try:
print(
"以下是小爱的回答: ",
new_record.get("answers", [])[0].get("tts", {}).get("text"),
)
except IndexError:
print("小爱没回")
print(f"以下是 {self.chatbot.name} 的回答: ", end="")
try:
await self.tts.synthesize(query, self.ask_gpt(query))
except Exception as e:
print(f"{self.chatbot.name} 回答出错 {str(e)}")
else:
print("回答完毕")
if self.in_conversation:
print(f"继续对话, 或用`{self.config.end_conversation}`结束对话")
await self.wakeup_xiaoai()
| [
"以下都以下都new_promptb48e9c88-4e7c-4401-b055-ae66e02353e6"
] |
2024-01-10 | yihong0618/xiaogpt | xiaogpt~bot~langchain_bot.py | from __future__ import annotations
import asyncio
import os
from langchain.memory import ConversationBufferWindowMemory
from rich import print
from xiaogpt.bot.base_bot import BaseBot
from xiaogpt.langchain.callbacks import AsyncIteratorCallbackHandler
from xiaogpt.langchain.chain import agent_search
from xiaogpt.utils import split_sentences
class LangChainBot(BaseBot):
name = "Lang Chain"
def __init__(
self,
openai_key: str,
serpapi_api_key: str,
proxy: str | None = None,
api_base: str | None = None,
) -> None:
# Set environment indicators
os.environ["OPENAI_API_KEY"] = openai_key
os.environ["SERPAPI_API_KEY"] = serpapi_api_key
if api_base:
os.environ["OPENAI_API_BASE"] = api_base
if proxy:
os.environ["OPENAI_PROXY"] = proxy
self.memory = ConversationBufferWindowMemory(return_messages=True)
def has_history(self) -> bool:
return len(self.memory.chat_memory.messages) > 0
def change_prompt(self, new_prompt: str) -> None:
self.memory.clear()
self.memory.chat_memory.add_user_message(new_prompt)
@classmethod
def from_config(cls, config):
return cls(
openai_key=config.openai_key,
serpapi_api_key=config.serpapi_api_key,
proxy=config.proxy,
api_base=config.api_base,
)
async def ask(self, query, **options):
return await agent_search(query, self.memory)
async def ask_stream(self, query, **options):
callback = AsyncIteratorCallbackHandler()
task = asyncio.create_task(agent_search(query, self.memory, callback))
try:
async for message in split_sentences(callback.aiter()):
yield message
except Exception as e:
print("An error occurred:", str(e))
finally:
print()
await task
| [] |
2024-01-10 | yihong0618/xiaogpt | xiaogpt~langchain~callbacks.py | from __future__ import annotations
import asyncio
from typing import Any, AsyncIterator
from uuid import UUID
from langchain.callbacks.base import AsyncCallbackHandler
class AsyncIteratorCallbackHandler(AsyncCallbackHandler):
"""Callback handler that returns an async iterator."""
@property
def always_verbose(self) -> bool:
return True
def __init__(self) -> None:
self.queue = asyncio.Queue()
self.done = asyncio.Event()
async def on_chain_start(
self,
serialized: dict[str, Any],
inputs: dict[str, Any],
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> None:
self.done.clear()
async def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
if token is not None and token != "":
print(token, end="", flush=True)
self.queue.put_nowait(token)
async def on_chain_end(
self,
outputs: dict[str, Any],
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
**kwargs: Any,
) -> None:
self.done.set()
async def on_chain_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: list[str] | None = None,
**kwargs: Any,
) -> None:
self.done.set()
async def aiter(self) -> AsyncIterator[str]:
while not self.queue.empty() or not self.done.is_set():
# Wait for the next token in the queue,
# but stop waiting if the done event is set
done, other = await asyncio.wait(
[
# NOTE: If you add other tasks here, update the code below,
# which assumes each set has exactly one task each
asyncio.ensure_future(self.queue.get()),
asyncio.ensure_future(self.done.wait()),
],
return_when=asyncio.FIRST_COMPLETED,
)
# Cancel the other task
if other:
other.pop().cancel()
# Extract the value of the first completed task
token_or_done = done.pop().result()
# If the extracted value is the boolean True, the done event was set
if token_or_done is True:
break
# Otherwise, the extracted value is a token, which we yield
yield token_or_done
| [] |
2024-01-10 | mariavetter/ai-research | test-gpt-3.py | Api_key = "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"
import openai
import time
openai.api_key = Api_key
amount = 267
prompt = "Write only the text of a generic genuine with no placeholders mail."\
"Response:"
for i in range(amount):
response = openai.Completion.create(engine="text-davinci-003", prompt=prompt, max_tokens=70)
text = response["choices"][0]["text"]
text = text.replace("\n", "").replace(r"\r\n", "")
text = text + "\n"
out = text.replace('\n', '')
print(f"{i}: {out}")
with open("normal_gen.txt", "a+") as f:
f.writelines(text)
time.sleep(1.3) | [
"Write only the text of a generic genuine with no placeholders mail.Response:"
] |
2024-01-10 | itseffi/dreamgaussian | main2.py | import os
import cv2
import time
import tqdm
import numpy as np
import dearpygui.dearpygui as dpg
import torch
import torch.nn.functional as F
import trimesh
import rembg
from cam_utils import orbit_camera, OrbitCamera
from mesh_renderer import Renderer
# from kiui.lpips import LPIPS
class GUI:
def __init__(self, opt):
self.opt = opt # shared with the trainer's opt to support in-place modification of rendering parameters.
self.gui = opt.gui # enable gui
self.W = opt.W
self.H = opt.H
self.cam = OrbitCamera(opt.W, opt.H, r=opt.radius, fovy=opt.fovy)
self.mode = "image"
self.seed = "random"
self.buffer_image = np.ones((self.W, self.H, 3), dtype=np.float32)
self.need_update = True # update buffer_image
# models
self.device = torch.device("cuda")
self.bg_remover = None
self.guidance_sd = None
self.guidance_zero123 = None
self.enable_sd = False
self.enable_zero123 = False
# renderer
self.renderer = Renderer(opt).to(self.device)
# input image
self.input_img = None
self.input_mask = None
self.input_img_torch = None
self.input_mask_torch = None
self.overlay_input_img = False
self.overlay_input_img_ratio = 0.5
# input text
self.prompt = ""
self.negative_prompt = ""
# training stuff
self.training = False
self.optimizer = None
self.step = 0
self.train_steps = 1 # steps per rendering loop
# self.lpips_loss = LPIPS(net='vgg').to(self.device)
# load input data from cmdline
if self.opt.input is not None:
self.load_input(self.opt.input)
# override prompt from cmdline
if self.opt.prompt is not None:
self.prompt = self.opt.prompt
if self.opt.negative_prompt is not None:
self.negative_prompt = self.opt.negative_prompt
if self.gui:
dpg.create_context()
self.register_dpg()
self.test_step()
def __del__(self):
if self.gui:
dpg.destroy_context()
def seed_everything(self):
try:
seed = int(self.seed)
except:
seed = np.random.randint(0, 1000000)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
self.last_seed = seed
def prepare_train(self):
self.step = 0
# setup training
self.optimizer = torch.optim.Adam(self.renderer.get_params())
# default camera
pose = orbit_camera(self.opt.elevation, 0, self.opt.radius)
self.fixed_cam = (pose, self.cam.perspective)
self.enable_sd = self.opt.lambda_sd > 0 and self.prompt != ""
self.enable_zero123 = self.opt.lambda_zero123 > 0 and self.input_img is not None
# lazy load guidance model
if self.guidance_sd is None and self.enable_sd:
if self.opt.mvdream:
print(f"[INFO] loading MVDream...")
from guidance.mvdream_utils import MVDream
self.guidance_sd = MVDream(self.device)
print(f"[INFO] loaded MVDream!")
else:
print(f"[INFO] loading SD...")
from guidance.sd_utils import StableDiffusion
self.guidance_sd = StableDiffusion(self.device)
print(f"[INFO] loaded SD!")
if self.guidance_zero123 is None and self.enable_zero123:
print(f"[INFO] loading zero123...")
from guidance.zero123_utils import Zero123
if self.opt.stable_zero123:
self.guidance_zero123 = Zero123(self.device, model_key='ashawkey/stable-zero123-diffusers')
else:
self.guidance_zero123 = Zero123(self.device, model_key='ashawkey/zero123-xl-diffusers')
print(f"[INFO] loaded zero123!")
# input image
if self.input_img is not None:
self.input_img_torch = torch.from_numpy(self.input_img).permute(2, 0, 1).unsqueeze(0).to(self.device)
self.input_img_torch = F.interpolate(self.input_img_torch, (self.opt.ref_size, self.opt.ref_size), mode="bilinear", align_corners=False)
self.input_mask_torch = torch.from_numpy(self.input_mask).permute(2, 0, 1).unsqueeze(0).to(self.device)
self.input_mask_torch = F.interpolate(self.input_mask_torch, (self.opt.ref_size, self.opt.ref_size), mode="bilinear", align_corners=False)
self.input_img_torch_channel_last = self.input_img_torch[0].permute(1,2,0).contiguous()
# prepare embeddings
with torch.no_grad():
if self.enable_sd:
self.guidance_sd.get_text_embeds([self.prompt], [self.negative_prompt])
if self.enable_zero123:
self.guidance_zero123.get_img_embeds(self.input_img_torch)
def train_step(self):
starter = torch.cuda.Event(enable_timing=True)
ender = torch.cuda.Event(enable_timing=True)
starter.record()
for _ in range(self.train_steps):
self.step += 1
step_ratio = min(1, self.step / self.opt.iters_refine)
loss = 0
### known view
if self.input_img_torch is not None:
ssaa = min(2.0, max(0.125, 2 * np.random.random()))
out = self.renderer.render(*self.fixed_cam, self.opt.ref_size, self.opt.ref_size, ssaa=ssaa)
# rgb loss
image = out["image"] # [H, W, 3] in [0, 1]
valid_mask = ((out["alpha"] > 0) & (out["viewcos"] > 0.5)).detach()
loss = loss + F.mse_loss(image * valid_mask, self.input_img_torch_channel_last * valid_mask)
### novel view (manual batch)
render_resolution = 512
images = []
poses = []
vers, hors, radii = [], [], []
# avoid too large elevation (> 80 or < -80), and make sure it always cover [-30, 30]
min_ver = max(min(-30, -30 - self.opt.elevation), -80 - self.opt.elevation)
max_ver = min(max(30, 30 - self.opt.elevation), 80 - self.opt.elevation)
for _ in range(self.opt.batch_size):
# render random view
ver = np.random.randint(min_ver, max_ver)
hor = np.random.randint(-180, 180)
radius = 0
vers.append(ver)
hors.append(hor)
radii.append(radius)
pose = orbit_camera(self.opt.elevation + ver, hor, self.opt.radius + radius)
poses.append(pose)
# random render resolution
ssaa = min(2.0, max(0.125, 2 * np.random.random()))
out = self.renderer.render(pose, self.cam.perspective, render_resolution, render_resolution, ssaa=ssaa)
image = out["image"] # [H, W, 3] in [0, 1]
image = image.permute(2,0,1).contiguous().unsqueeze(0) # [1, 3, H, W] in [0, 1]
images.append(image)
# enable mvdream training
if self.opt.mvdream:
for view_i in range(1, 4):
pose_i = orbit_camera(self.opt.elevation + ver, hor + 90 * view_i, self.opt.radius + radius)
poses.append(pose_i)
out_i = self.renderer.render(pose_i, self.cam.perspective, render_resolution, render_resolution, ssaa=ssaa)
image = out_i["image"].permute(2,0,1).contiguous().unsqueeze(0) # [1, 3, H, W] in [0, 1]
images.append(image)
images = torch.cat(images, dim=0)
poses = torch.from_numpy(np.stack(poses, axis=0)).to(self.device)
# import kiui
# kiui.lo(hor, ver)
# kiui.vis.plot_image(image)
# guidance loss
strength = step_ratio * 0.15 + 0.8
if self.enable_sd:
if self.opt.mvdream:
# loss = loss + self.opt.lambda_sd * self.guidance_sd.train_step(images, poses, step_ratio)
refined_images = self.guidance_sd.refine(images, poses, strength=strength).float()
refined_images = F.interpolate(refined_images, (render_resolution, render_resolution), mode="bilinear", align_corners=False)
loss = loss + self.opt.lambda_sd * F.mse_loss(images, refined_images)
else:
# loss = loss + self.opt.lambda_sd * self.guidance_sd.train_step(images, step_ratio)
refined_images = self.guidance_sd.refine(images, strength=strength).float()
refined_images = F.interpolate(refined_images, (render_resolution, render_resolution), mode="bilinear", align_corners=False)
loss = loss + self.opt.lambda_sd * F.mse_loss(images, refined_images)
if self.enable_zero123:
# loss = loss + self.opt.lambda_zero123 * self.guidance_zero123.train_step(images, vers, hors, radii, step_ratio)
refined_images = self.guidance_zero123.refine(images, vers, hors, radii, strength=strength, default_elevation=self.opt.elevation).float()
refined_images = F.interpolate(refined_images, (render_resolution, render_resolution), mode="bilinear", align_corners=False)
loss = loss + self.opt.lambda_zero123 * F.mse_loss(images, refined_images)
# loss = loss + self.opt.lambda_zero123 * self.lpips_loss(images, refined_images)
# optimize step
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
ender.record()
torch.cuda.synchronize()
t = starter.elapsed_time(ender)
self.need_update = True
if self.gui:
dpg.set_value("_log_train_time", f"{t:.4f}ms")
dpg.set_value(
"_log_train_log",
f"step = {self.step: 5d} (+{self.train_steps: 2d}) loss = {loss.item():.4f}",
)
# dynamic train steps (no need for now)
# max allowed train time per-frame is 500 ms
# full_t = t / self.train_steps * 16
# train_steps = min(16, max(4, int(16 * 500 / full_t)))
# if train_steps > self.train_steps * 1.2 or train_steps < self.train_steps * 0.8:
# self.train_steps = train_steps
@torch.no_grad()
def test_step(self):
# ignore if no need to update
if not self.need_update:
return
starter = torch.cuda.Event(enable_timing=True)
ender = torch.cuda.Event(enable_timing=True)
starter.record()
# should update image
if self.need_update:
# render image
out = self.renderer.render(self.cam.pose, self.cam.perspective, self.H, self.W)
buffer_image = out[self.mode] # [H, W, 3]
if self.mode in ['depth', 'alpha']:
buffer_image = buffer_image.repeat(1, 1, 3)
if self.mode == 'depth':
buffer_image = (buffer_image - buffer_image.min()) / (buffer_image.max() - buffer_image.min() + 1e-20)
self.buffer_image = buffer_image.contiguous().clamp(0, 1).detach().cpu().numpy()
# display input_image
if self.overlay_input_img and self.input_img is not None:
self.buffer_image = (
self.buffer_image * (1 - self.overlay_input_img_ratio)
+ self.input_img * self.overlay_input_img_ratio
)
self.need_update = False
ender.record()
torch.cuda.synchronize()
t = starter.elapsed_time(ender)
if self.gui:
dpg.set_value("_log_infer_time", f"{t:.4f}ms ({int(1000/t)} FPS)")
dpg.set_value(
"_texture", self.buffer_image
) # buffer must be contiguous, else seg fault!
def load_input(self, file):
# load image
print(f'[INFO] load image from {file}...')
img = cv2.imread(file, cv2.IMREAD_UNCHANGED)
if img.shape[-1] == 3:
if self.bg_remover is None:
self.bg_remover = rembg.new_session()
img = rembg.remove(img, session=self.bg_remover)
img = cv2.resize(
img, (self.W, self.H), interpolation=cv2.INTER_AREA
)
img = img.astype(np.float32) / 255.0
self.input_mask = img[..., 3:]
# white bg
self.input_img = img[..., :3] * self.input_mask + (
1 - self.input_mask
)
# bgr to rgb
self.input_img = self.input_img[..., ::-1].copy()
# load prompt
file_prompt = file.replace("_rgba.png", "_caption.txt")
if os.path.exists(file_prompt):
print(f'[INFO] load prompt from {file_prompt}...')
with open(file_prompt, "r") as f:
self.prompt = f.read().strip()
def save_model(self):
os.makedirs(self.opt.outdir, exist_ok=True)
path = os.path.join(self.opt.outdir, self.opt.save_path + '.' + self.opt.mesh_format)
self.renderer.export_mesh(path)
print(f"[INFO] save model to {path}.")
def register_dpg(self):
### register texture
with dpg.texture_registry(show=False):
dpg.add_raw_texture(
self.W,
self.H,
self.buffer_image,
format=dpg.mvFormat_Float_rgb,
tag="_texture",
)
### register window
# the rendered image, as the primary window
with dpg.window(
tag="_primary_window",
width=self.W,
height=self.H,
pos=[0, 0],
no_move=True,
no_title_bar=True,
no_scrollbar=True,
):
# add the texture
dpg.add_image("_texture")
# dpg.set_primary_window("_primary_window", True)
# control window
with dpg.window(
label="Control",
tag="_control_window",
width=600,
height=self.H,
pos=[self.W, 0],
no_move=True,
no_title_bar=True,
):
# button theme
with dpg.theme() as theme_button:
with dpg.theme_component(dpg.mvButton):
dpg.add_theme_color(dpg.mvThemeCol_Button, (23, 3, 18))
dpg.add_theme_color(dpg.mvThemeCol_ButtonHovered, (51, 3, 47))
dpg.add_theme_color(dpg.mvThemeCol_ButtonActive, (83, 18, 83))
dpg.add_theme_style(dpg.mvStyleVar_FrameRounding, 5)
dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 3, 3)
# timer stuff
with dpg.group(horizontal=True):
dpg.add_text("Infer time: ")
dpg.add_text("no data", tag="_log_infer_time")
def callback_setattr(sender, app_data, user_data):
setattr(self, user_data, app_data)
# init stuff
with dpg.collapsing_header(label="Initialize", default_open=True):
# seed stuff
def callback_set_seed(sender, app_data):
self.seed = app_data
self.seed_everything()
dpg.add_input_text(
label="seed",
default_value=self.seed,
on_enter=True,
callback=callback_set_seed,
)
# input stuff
def callback_select_input(sender, app_data):
# only one item
for k, v in app_data["selections"].items():
dpg.set_value("_log_input", k)
self.load_input(v)
self.need_update = True
with dpg.file_dialog(
directory_selector=False,
show=False,
callback=callback_select_input,
file_count=1,
tag="file_dialog_tag",
width=700,
height=400,
):
dpg.add_file_extension("Images{.jpg,.jpeg,.png}")
with dpg.group(horizontal=True):
dpg.add_button(
label="input",
callback=lambda: dpg.show_item("file_dialog_tag"),
)
dpg.add_text("", tag="_log_input")
# overlay stuff
with dpg.group(horizontal=True):
def callback_toggle_overlay_input_img(sender, app_data):
self.overlay_input_img = not self.overlay_input_img
self.need_update = True
dpg.add_checkbox(
label="overlay image",
default_value=self.overlay_input_img,
callback=callback_toggle_overlay_input_img,
)
def callback_set_overlay_input_img_ratio(sender, app_data):
self.overlay_input_img_ratio = app_data
self.need_update = True
dpg.add_slider_float(
label="ratio",
min_value=0,
max_value=1,
format="%.1f",
default_value=self.overlay_input_img_ratio,
callback=callback_set_overlay_input_img_ratio,
)
# prompt stuff
dpg.add_input_text(
label="prompt",
default_value=self.prompt,
callback=callback_setattr,
user_data="prompt",
)
dpg.add_input_text(
label="negative",
default_value=self.negative_prompt,
callback=callback_setattr,
user_data="negative_prompt",
)
# save current model
with dpg.group(horizontal=True):
dpg.add_text("Save: ")
dpg.add_button(
label="model",
tag="_button_save_model",
callback=self.save_model,
)
dpg.bind_item_theme("_button_save_model", theme_button)
dpg.add_input_text(
label="",
default_value=self.opt.save_path,
callback=callback_setattr,
user_data="save_path",
)
# training stuff
with dpg.collapsing_header(label="Train", default_open=True):
# lr and train button
with dpg.group(horizontal=True):
dpg.add_text("Train: ")
def callback_train(sender, app_data):
if self.training:
self.training = False
dpg.configure_item("_button_train", label="start")
else:
self.prepare_train()
self.training = True
dpg.configure_item("_button_train", label="stop")
# dpg.add_button(
# label="init", tag="_button_init", callback=self.prepare_train
# )
# dpg.bind_item_theme("_button_init", theme_button)
dpg.add_button(
label="start", tag="_button_train", callback=callback_train
)
dpg.bind_item_theme("_button_train", theme_button)
with dpg.group(horizontal=True):
dpg.add_text("", tag="_log_train_time")
dpg.add_text("", tag="_log_train_log")
# rendering options
with dpg.collapsing_header(label="Rendering", default_open=True):
# mode combo
def callback_change_mode(sender, app_data):
self.mode = app_data
self.need_update = True
dpg.add_combo(
("image", "depth", "alpha", "normal"),
label="mode",
default_value=self.mode,
callback=callback_change_mode,
)
# fov slider
def callback_set_fovy(sender, app_data):
self.cam.fovy = np.deg2rad(app_data)
self.need_update = True
dpg.add_slider_int(
label="FoV (vertical)",
min_value=1,
max_value=120,
format="%d deg",
default_value=np.rad2deg(self.cam.fovy),
callback=callback_set_fovy,
)
### register camera handler
def callback_camera_drag_rotate_or_draw_mask(sender, app_data):
if not dpg.is_item_focused("_primary_window"):
return
dx = app_data[1]
dy = app_data[2]
self.cam.orbit(dx, dy)
self.need_update = True
def callback_camera_wheel_scale(sender, app_data):
if not dpg.is_item_focused("_primary_window"):
return
delta = app_data
self.cam.scale(delta)
self.need_update = True
def callback_camera_drag_pan(sender, app_data):
if not dpg.is_item_focused("_primary_window"):
return
dx = app_data[1]
dy = app_data[2]
self.cam.pan(dx, dy)
self.need_update = True
def callback_set_mouse_loc(sender, app_data):
if not dpg.is_item_focused("_primary_window"):
return
# just the pixel coordinate in image
self.mouse_loc = np.array(app_data)
with dpg.handler_registry():
# for camera moving
dpg.add_mouse_drag_handler(
button=dpg.mvMouseButton_Left,
callback=callback_camera_drag_rotate_or_draw_mask,
)
dpg.add_mouse_wheel_handler(callback=callback_camera_wheel_scale)
dpg.add_mouse_drag_handler(
button=dpg.mvMouseButton_Middle, callback=callback_camera_drag_pan
)
dpg.create_viewport(
title="Gaussian3D",
width=self.W + 600,
height=self.H + (45 if os.name == "nt" else 0),
resizable=False,
)
### global theme
with dpg.theme() as theme_no_padding:
with dpg.theme_component(dpg.mvAll):
# set all padding to 0 to avoid scroll bar
dpg.add_theme_style(
dpg.mvStyleVar_WindowPadding, 0, 0, category=dpg.mvThemeCat_Core
)
dpg.add_theme_style(
dpg.mvStyleVar_FramePadding, 0, 0, category=dpg.mvThemeCat_Core
)
dpg.add_theme_style(
dpg.mvStyleVar_CellPadding, 0, 0, category=dpg.mvThemeCat_Core
)
dpg.bind_item_theme("_primary_window", theme_no_padding)
dpg.setup_dearpygui()
### register a larger font
# get it from: https://github.com/lxgw/LxgwWenKai/releases/download/v1.300/LXGWWenKai-Regular.ttf
if os.path.exists("LXGWWenKai-Regular.ttf"):
with dpg.font_registry():
with dpg.font("LXGWWenKai-Regular.ttf", 18) as default_font:
dpg.bind_font(default_font)
# dpg.show_metrics()
dpg.show_viewport()
def render(self):
assert self.gui
while dpg.is_dearpygui_running():
# update texture every frame
if self.training:
self.train_step()
self.test_step()
dpg.render_dearpygui_frame()
# no gui mode
def train(self, iters=500):
if iters > 0:
self.prepare_train()
for i in tqdm.trange(iters):
self.train_step()
# save
self.save_model()
if __name__ == "__main__":
import argparse
from omegaconf import OmegaConf
parser = argparse.ArgumentParser()
parser.add_argument("--config", required=True, help="path to the yaml config file")
args, extras = parser.parse_known_args()
# override default config from cli
opt = OmegaConf.merge(OmegaConf.load(args.config), OmegaConf.from_cli(extras))
# auto find mesh from stage 1
if opt.mesh is None:
default_path = os.path.join(opt.outdir, opt.save_path + '_mesh.' + opt.mesh_format)
if os.path.exists(default_path):
opt.mesh = default_path
else:
raise ValueError(f"Cannot find mesh from {default_path}, must specify --mesh explicitly!")
gui = GUI(opt)
if opt.gui:
gui.render()
else:
gui.train(opt.iters_refine)
| [
"_rgba.png",
"_caption.txt"
] |
2024-01-10 | itseffi/dreamgaussian | main.py | import os
import cv2
import time
import tqdm
import numpy as np
import dearpygui.dearpygui as dpg
import torch
import torch.nn.functional as F
import rembg
from cam_utils import orbit_camera, OrbitCamera
from gs_renderer import Renderer, MiniCam
from grid_put import mipmap_linear_grid_put_2d
from mesh import Mesh, safe_normalize
class GUI:
def __init__(self, opt):
self.opt = opt # shared with the trainer's opt to support in-place modification of rendering parameters.
self.gui = opt.gui # enable gui
self.W = opt.W
self.H = opt.H
self.cam = OrbitCamera(opt.W, opt.H, r=opt.radius, fovy=opt.fovy)
self.mode = "image"
self.seed = "random"
self.buffer_image = np.ones((self.W, self.H, 3), dtype=np.float32)
self.need_update = True # update buffer_image
# models
self.device = torch.device("cuda")
self.bg_remover = None
self.guidance_sd = None
self.guidance_zero123 = None
self.enable_sd = False
self.enable_zero123 = False
# renderer
self.renderer = Renderer(sh_degree=self.opt.sh_degree)
self.gaussain_scale_factor = 1
# input image
self.input_img = None
self.input_mask = None
self.input_img_torch = None
self.input_mask_torch = None
self.overlay_input_img = False
self.overlay_input_img_ratio = 0.5
# input text
self.prompt = ""
self.negative_prompt = ""
# training stuff
self.training = False
self.optimizer = None
self.step = 0
self.train_steps = 1 # steps per rendering loop
# load input data from cmdline
if self.opt.input is not None:
self.load_input(self.opt.input)
# override prompt from cmdline
if self.opt.prompt is not None:
self.prompt = self.opt.prompt
if self.opt.negative_prompt is not None:
self.negative_prompt = self.opt.negative_prompt
# override if provide a checkpoint
if self.opt.load is not None:
self.renderer.initialize(self.opt.load)
else:
# initialize gaussians to a blob
self.renderer.initialize(num_pts=self.opt.num_pts)
if self.gui:
dpg.create_context()
self.register_dpg()
self.test_step()
def __del__(self):
if self.gui:
dpg.destroy_context()
def seed_everything(self):
try:
seed = int(self.seed)
except:
seed = np.random.randint(0, 1000000)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
self.last_seed = seed
def prepare_train(self):
self.step = 0
# setup training
self.renderer.gaussians.training_setup(self.opt)
# do not do progressive sh-level
self.renderer.gaussians.active_sh_degree = self.renderer.gaussians.max_sh_degree
self.optimizer = self.renderer.gaussians.optimizer
# default camera
pose = orbit_camera(self.opt.elevation, 0, self.opt.radius)
self.fixed_cam = MiniCam(
pose,
self.opt.ref_size,
self.opt.ref_size,
self.cam.fovy,
self.cam.fovx,
self.cam.near,
self.cam.far,
)
self.enable_sd = self.opt.lambda_sd > 0 and self.prompt != ""
self.enable_zero123 = self.opt.lambda_zero123 > 0 and self.input_img is not None
# lazy load guidance model
if self.guidance_sd is None and self.enable_sd:
if self.opt.mvdream:
print(f"[INFO] loading MVDream...")
from guidance.mvdream_utils import MVDream
self.guidance_sd = MVDream(self.device)
print(f"[INFO] loaded MVDream!")
else:
print(f"[INFO] loading SD...")
from guidance.sd_utils import StableDiffusion
self.guidance_sd = StableDiffusion(self.device)
print(f"[INFO] loaded SD!")
if self.guidance_zero123 is None and self.enable_zero123:
print(f"[INFO] loading zero123...")
from guidance.zero123_utils import Zero123
if self.opt.stable_zero123:
self.guidance_zero123 = Zero123(self.device, model_key='ashawkey/stable-zero123-diffusers')
else:
self.guidance_zero123 = Zero123(self.device, model_key='ashawkey/zero123-xl-diffusers')
print(f"[INFO] loaded zero123!")
# input image
if self.input_img is not None:
self.input_img_torch = torch.from_numpy(self.input_img).permute(2, 0, 1).unsqueeze(0).to(self.device)
self.input_img_torch = F.interpolate(self.input_img_torch, (self.opt.ref_size, self.opt.ref_size), mode="bilinear", align_corners=False)
self.input_mask_torch = torch.from_numpy(self.input_mask).permute(2, 0, 1).unsqueeze(0).to(self.device)
self.input_mask_torch = F.interpolate(self.input_mask_torch, (self.opt.ref_size, self.opt.ref_size), mode="bilinear", align_corners=False)
# prepare embeddings
with torch.no_grad():
if self.enable_sd:
self.guidance_sd.get_text_embeds([self.prompt], [self.negative_prompt])
if self.enable_zero123:
self.guidance_zero123.get_img_embeds(self.input_img_torch)
def train_step(self):
starter = torch.cuda.Event(enable_timing=True)
ender = torch.cuda.Event(enable_timing=True)
starter.record()
for _ in range(self.train_steps):
self.step += 1
step_ratio = min(1, self.step / self.opt.iters)
# update lr
self.renderer.gaussians.update_learning_rate(self.step)
loss = 0
### known view
if self.input_img_torch is not None:
cur_cam = self.fixed_cam
out = self.renderer.render(cur_cam)
# rgb loss
image = out["image"].unsqueeze(0) # [1, 3, H, W] in [0, 1]
loss = loss + 10000 * (step_ratio if self.opt.warmup_rgb_loss else 1) * F.mse_loss(image, self.input_img_torch)
# mask loss
mask = out["alpha"].unsqueeze(0) # [1, 1, H, W] in [0, 1]
loss = loss + 1000 * (step_ratio if self.opt.warmup_rgb_loss else 1) * F.mse_loss(mask, self.input_mask_torch)
### novel view (manual batch)
render_resolution = 128 if step_ratio < 0.3 else (256 if step_ratio < 0.6 else 512)
images = []
poses = []
vers, hors, radii = [], [], []
# avoid too large elevation (> 80 or < -80), and make sure it always cover [-30, 30]
min_ver = max(min(-30, -30 - self.opt.elevation), -80 - self.opt.elevation)
max_ver = min(max(30, 30 - self.opt.elevation), 80 - self.opt.elevation)
for _ in range(self.opt.batch_size):
# render random view
ver = np.random.randint(min_ver, max_ver)
hor = np.random.randint(-180, 180)
radius = 0
vers.append(ver)
hors.append(hor)
radii.append(radius)
pose = orbit_camera(self.opt.elevation + ver, hor, self.opt.radius + radius)
poses.append(pose)
cur_cam = MiniCam(pose, render_resolution, render_resolution, self.cam.fovy, self.cam.fovx, self.cam.near, self.cam.far)
bg_color = torch.tensor([1, 1, 1] if np.random.rand() > self.opt.invert_bg_prob else [0, 0, 0], dtype=torch.float32, device="cuda")
out = self.renderer.render(cur_cam, bg_color=bg_color)
image = out["image"].unsqueeze(0) # [1, 3, H, W] in [0, 1]
images.append(image)
# enable mvdream training
if self.opt.mvdream:
for view_i in range(1, 4):
pose_i = orbit_camera(self.opt.elevation + ver, hor + 90 * view_i, self.opt.radius + radius)
poses.append(pose_i)
cur_cam_i = MiniCam(pose_i, render_resolution, render_resolution, self.cam.fovy, self.cam.fovx, self.cam.near, self.cam.far)
# bg_color = torch.tensor([0.5, 0.5, 0.5], dtype=torch.float32, device="cuda")
out_i = self.renderer.render(cur_cam_i, bg_color=bg_color)
image = out_i["image"].unsqueeze(0) # [1, 3, H, W] in [0, 1]
images.append(image)
images = torch.cat(images, dim=0)
poses = torch.from_numpy(np.stack(poses, axis=0)).to(self.device)
# import kiui
# print(hor, ver)
# kiui.vis.plot_image(images)
# guidance loss
if self.enable_sd:
if self.opt.mvdream:
loss = loss + self.opt.lambda_sd * self.guidance_sd.train_step(images, poses, step_ratio=step_ratio if self.opt.anneal_timestep else None)
else:
loss = loss + self.opt.lambda_sd * self.guidance_sd.train_step(images, step_ratio=step_ratio if self.opt.anneal_timestep else None)
if self.enable_zero123:
loss = loss + self.opt.lambda_zero123 * self.guidance_zero123.train_step(images, vers, hors, radii, step_ratio=step_ratio if self.opt.anneal_timestep else None, default_elevation=self.opt.elevation)
# optimize step
loss.backward()
self.optimizer.step()
self.optimizer.zero_grad()
# densify and prune
if self.step >= self.opt.density_start_iter and self.step <= self.opt.density_end_iter:
viewspace_point_tensor, visibility_filter, radii = out["viewspace_points"], out["visibility_filter"], out["radii"]
self.renderer.gaussians.max_radii2D[visibility_filter] = torch.max(self.renderer.gaussians.max_radii2D[visibility_filter], radii[visibility_filter])
self.renderer.gaussians.add_densification_stats(viewspace_point_tensor, visibility_filter)
if self.step % self.opt.densification_interval == 0:
self.renderer.gaussians.densify_and_prune(self.opt.densify_grad_threshold, min_opacity=0.01, extent=4, max_screen_size=1)
if self.step % self.opt.opacity_reset_interval == 0:
self.renderer.gaussians.reset_opacity()
ender.record()
torch.cuda.synchronize()
t = starter.elapsed_time(ender)
self.need_update = True
if self.gui:
dpg.set_value("_log_train_time", f"{t:.4f}ms")
dpg.set_value(
"_log_train_log",
f"step = {self.step: 5d} (+{self.train_steps: 2d}) loss = {loss.item():.4f}",
)
# dynamic train steps (no need for now)
# max allowed train time per-frame is 500 ms
# full_t = t / self.train_steps * 16
# train_steps = min(16, max(4, int(16 * 500 / full_t)))
# if train_steps > self.train_steps * 1.2 or train_steps < self.train_steps * 0.8:
# self.train_steps = train_steps
@torch.no_grad()
def test_step(self):
# ignore if no need to update
if not self.need_update:
return
starter = torch.cuda.Event(enable_timing=True)
ender = torch.cuda.Event(enable_timing=True)
starter.record()
# should update image
if self.need_update:
# render image
cur_cam = MiniCam(
self.cam.pose,
self.W,
self.H,
self.cam.fovy,
self.cam.fovx,
self.cam.near,
self.cam.far,
)
out = self.renderer.render(cur_cam, self.gaussain_scale_factor)
buffer_image = out[self.mode] # [3, H, W]
if self.mode in ['depth', 'alpha']:
buffer_image = buffer_image.repeat(3, 1, 1)
if self.mode == 'depth':
buffer_image = (buffer_image - buffer_image.min()) / (buffer_image.max() - buffer_image.min() + 1e-20)
buffer_image = F.interpolate(
buffer_image.unsqueeze(0),
size=(self.H, self.W),
mode="bilinear",
align_corners=False,
).squeeze(0)
self.buffer_image = (
buffer_image.permute(1, 2, 0)
.contiguous()
.clamp(0, 1)
.contiguous()
.detach()
.cpu()
.numpy()
)
# display input_image
if self.overlay_input_img and self.input_img is not None:
self.buffer_image = (
self.buffer_image * (1 - self.overlay_input_img_ratio)
+ self.input_img * self.overlay_input_img_ratio
)
self.need_update = False
ender.record()
torch.cuda.synchronize()
t = starter.elapsed_time(ender)
if self.gui:
dpg.set_value("_log_infer_time", f"{t:.4f}ms ({int(1000/t)} FPS)")
dpg.set_value(
"_texture", self.buffer_image
) # buffer must be contiguous, else seg fault!
def load_input(self, file):
# load image
print(f'[INFO] load image from {file}...')
img = cv2.imread(file, cv2.IMREAD_UNCHANGED)
if img.shape[-1] == 3:
if self.bg_remover is None:
self.bg_remover = rembg.new_session()
img = rembg.remove(img, session=self.bg_remover)
img = cv2.resize(img, (self.W, self.H), interpolation=cv2.INTER_AREA)
img = img.astype(np.float32) / 255.0
self.input_mask = img[..., 3:]
# white bg
self.input_img = img[..., :3] * self.input_mask + (1 - self.input_mask)
# bgr to rgb
self.input_img = self.input_img[..., ::-1].copy()
# load prompt
file_prompt = file.replace("_rgba.png", "_caption.txt")
if os.path.exists(file_prompt):
print(f'[INFO] load prompt from {file_prompt}...')
with open(file_prompt, "r") as f:
self.prompt = f.read().strip()
@torch.no_grad()
def save_model(self, mode='geo', texture_size=1024):
os.makedirs(self.opt.outdir, exist_ok=True)
if mode == 'geo':
path = os.path.join(self.opt.outdir, self.opt.save_path + '_mesh.ply')
mesh = self.renderer.gaussians.extract_mesh(path, self.opt.density_thresh)
mesh.write_ply(path)
elif mode == 'geo+tex':
path = os.path.join(self.opt.outdir, self.opt.save_path + '_mesh.' + self.opt.mesh_format)
mesh = self.renderer.gaussians.extract_mesh(path, self.opt.density_thresh)
# perform texture extraction
print(f"[INFO] unwrap uv...")
h = w = texture_size
mesh.auto_uv()
mesh.auto_normal()
albedo = torch.zeros((h, w, 3), device=self.device, dtype=torch.float32)
cnt = torch.zeros((h, w, 1), device=self.device, dtype=torch.float32)
# self.prepare_train() # tmp fix for not loading 0123
# vers = [0]
# hors = [0]
vers = [0] * 8 + [-45] * 8 + [45] * 8 + [-89.9, 89.9]
hors = [0, 45, -45, 90, -90, 135, -135, 180] * 3 + [0, 0]
render_resolution = 512
import nvdiffrast.torch as dr
if not self.opt.force_cuda_rast and (not self.opt.gui or os.name == 'nt'):
glctx = dr.RasterizeGLContext()
else:
glctx = dr.RasterizeCudaContext()
for ver, hor in zip(vers, hors):
# render image
pose = orbit_camera(ver, hor, self.cam.radius)
cur_cam = MiniCam(
pose,
render_resolution,
render_resolution,
self.cam.fovy,
self.cam.fovx,
self.cam.near,
self.cam.far,
)
cur_out = self.renderer.render(cur_cam)
rgbs = cur_out["image"].unsqueeze(0) # [1, 3, H, W] in [0, 1]
# enhance texture quality with zero123 [not working well]
# if self.opt.guidance_model == 'zero123':
# rgbs = self.guidance.refine(rgbs, [ver], [hor], [0])
# import kiui
# kiui.vis.plot_image(rgbs)
# get coordinate in texture image
pose = torch.from_numpy(pose.astype(np.float32)).to(self.device)
proj = torch.from_numpy(self.cam.perspective.astype(np.float32)).to(self.device)
v_cam = torch.matmul(F.pad(mesh.v, pad=(0, 1), mode='constant', value=1.0), torch.inverse(pose).T).float().unsqueeze(0)
v_clip = v_cam @ proj.T
rast, rast_db = dr.rasterize(glctx, v_clip, mesh.f, (render_resolution, render_resolution))
depth, _ = dr.interpolate(-v_cam[..., [2]], rast, mesh.f) # [1, H, W, 1]
depth = depth.squeeze(0) # [H, W, 1]
alpha = (rast[0, ..., 3:] > 0).float()
uvs, _ = dr.interpolate(mesh.vt.unsqueeze(0), rast, mesh.ft) # [1, 512, 512, 2] in [0, 1]
# use normal to produce a back-project mask
normal, _ = dr.interpolate(mesh.vn.unsqueeze(0).contiguous(), rast, mesh.fn)
normal = safe_normalize(normal[0])
# rotated normal (where [0, 0, 1] always faces camera)
rot_normal = normal @ pose[:3, :3]
viewcos = rot_normal[..., [2]]
mask = (alpha > 0) & (viewcos > 0.5) # [H, W, 1]
mask = mask.view(-1)
uvs = uvs.view(-1, 2).clamp(0, 1)[mask]
rgbs = rgbs.view(3, -1).permute(1, 0)[mask].contiguous()
# update texture image
cur_albedo, cur_cnt = mipmap_linear_grid_put_2d(
h, w,
uvs[..., [1, 0]] * 2 - 1,
rgbs,
min_resolution=256,
return_count=True,
)
# albedo += cur_albedo
# cnt += cur_cnt
mask = cnt.squeeze(-1) < 0.1
albedo[mask] += cur_albedo[mask]
cnt[mask] += cur_cnt[mask]
mask = cnt.squeeze(-1) > 0
albedo[mask] = albedo[mask] / cnt[mask].repeat(1, 3)
mask = mask.view(h, w)
albedo = albedo.detach().cpu().numpy()
mask = mask.detach().cpu().numpy()
# dilate texture
from sklearn.neighbors import NearestNeighbors
from scipy.ndimage import binary_dilation, binary_erosion
inpaint_region = binary_dilation(mask, iterations=32)
inpaint_region[mask] = 0
search_region = mask.copy()
not_search_region = binary_erosion(search_region, iterations=3)
search_region[not_search_region] = 0
search_coords = np.stack(np.nonzero(search_region), axis=-1)
inpaint_coords = np.stack(np.nonzero(inpaint_region), axis=-1)
knn = NearestNeighbors(n_neighbors=1, algorithm="kd_tree").fit(
search_coords
)
_, indices = knn.kneighbors(inpaint_coords)
albedo[tuple(inpaint_coords.T)] = albedo[tuple(search_coords[indices[:, 0]].T)]
mesh.albedo = torch.from_numpy(albedo).to(self.device)
mesh.write(path)
else:
path = os.path.join(self.opt.outdir, self.opt.save_path + '_model.ply')
self.renderer.gaussians.save_ply(path)
print(f"[INFO] save model to {path}.")
def register_dpg(self):
### register texture
with dpg.texture_registry(show=False):
dpg.add_raw_texture(
self.W,
self.H,
self.buffer_image,
format=dpg.mvFormat_Float_rgb,
tag="_texture",
)
### register window
# the rendered image, as the primary window
with dpg.window(
tag="_primary_window",
width=self.W,
height=self.H,
pos=[0, 0],
no_move=True,
no_title_bar=True,
no_scrollbar=True,
):
# add the texture
dpg.add_image("_texture")
# dpg.set_primary_window("_primary_window", True)
# control window
with dpg.window(
label="Control",
tag="_control_window",
width=600,
height=self.H,
pos=[self.W, 0],
no_move=True,
no_title_bar=True,
):
# button theme
with dpg.theme() as theme_button:
with dpg.theme_component(dpg.mvButton):
dpg.add_theme_color(dpg.mvThemeCol_Button, (23, 3, 18))
dpg.add_theme_color(dpg.mvThemeCol_ButtonHovered, (51, 3, 47))
dpg.add_theme_color(dpg.mvThemeCol_ButtonActive, (83, 18, 83))
dpg.add_theme_style(dpg.mvStyleVar_FrameRounding, 5)
dpg.add_theme_style(dpg.mvStyleVar_FramePadding, 3, 3)
# timer stuff
with dpg.group(horizontal=True):
dpg.add_text("Infer time: ")
dpg.add_text("no data", tag="_log_infer_time")
def callback_setattr(sender, app_data, user_data):
setattr(self, user_data, app_data)
# init stuff
with dpg.collapsing_header(label="Initialize", default_open=True):
# seed stuff
def callback_set_seed(sender, app_data):
self.seed = app_data
self.seed_everything()
dpg.add_input_text(
label="seed",
default_value=self.seed,
on_enter=True,
callback=callback_set_seed,
)
# input stuff
def callback_select_input(sender, app_data):
# only one item
for k, v in app_data["selections"].items():
dpg.set_value("_log_input", k)
self.load_input(v)
self.need_update = True
with dpg.file_dialog(
directory_selector=False,
show=False,
callback=callback_select_input,
file_count=1,
tag="file_dialog_tag",
width=700,
height=400,
):
dpg.add_file_extension("Images{.jpg,.jpeg,.png}")
with dpg.group(horizontal=True):
dpg.add_button(
label="input",
callback=lambda: dpg.show_item("file_dialog_tag"),
)
dpg.add_text("", tag="_log_input")
# overlay stuff
with dpg.group(horizontal=True):
def callback_toggle_overlay_input_img(sender, app_data):
self.overlay_input_img = not self.overlay_input_img
self.need_update = True
dpg.add_checkbox(
label="overlay image",
default_value=self.overlay_input_img,
callback=callback_toggle_overlay_input_img,
)
def callback_set_overlay_input_img_ratio(sender, app_data):
self.overlay_input_img_ratio = app_data
self.need_update = True
dpg.add_slider_float(
label="ratio",
min_value=0,
max_value=1,
format="%.1f",
default_value=self.overlay_input_img_ratio,
callback=callback_set_overlay_input_img_ratio,
)
# prompt stuff
dpg.add_input_text(
label="prompt",
default_value=self.prompt,
callback=callback_setattr,
user_data="prompt",
)
dpg.add_input_text(
label="negative",
default_value=self.negative_prompt,
callback=callback_setattr,
user_data="negative_prompt",
)
# save current model
with dpg.group(horizontal=True):
dpg.add_text("Save: ")
def callback_save(sender, app_data, user_data):
self.save_model(mode=user_data)
dpg.add_button(
label="model",
tag="_button_save_model",
callback=callback_save,
user_data='model',
)
dpg.bind_item_theme("_button_save_model", theme_button)
dpg.add_button(
label="geo",
tag="_button_save_mesh",
callback=callback_save,
user_data='geo',
)
dpg.bind_item_theme("_button_save_mesh", theme_button)
dpg.add_button(
label="geo+tex",
tag="_button_save_mesh_with_tex",
callback=callback_save,
user_data='geo+tex',
)
dpg.bind_item_theme("_button_save_mesh_with_tex", theme_button)
dpg.add_input_text(
label="",
default_value=self.opt.save_path,
callback=callback_setattr,
user_data="save_path",
)
# training stuff
with dpg.collapsing_header(label="Train", default_open=True):
# lr and train button
with dpg.group(horizontal=True):
dpg.add_text("Train: ")
def callback_train(sender, app_data):
if self.training:
self.training = False
dpg.configure_item("_button_train", label="start")
else:
self.prepare_train()
self.training = True
dpg.configure_item("_button_train", label="stop")
# dpg.add_button(
# label="init", tag="_button_init", callback=self.prepare_train
# )
# dpg.bind_item_theme("_button_init", theme_button)
dpg.add_button(
label="start", tag="_button_train", callback=callback_train
)
dpg.bind_item_theme("_button_train", theme_button)
with dpg.group(horizontal=True):
dpg.add_text("", tag="_log_train_time")
dpg.add_text("", tag="_log_train_log")
# rendering options
with dpg.collapsing_header(label="Rendering", default_open=True):
# mode combo
def callback_change_mode(sender, app_data):
self.mode = app_data
self.need_update = True
dpg.add_combo(
("image", "depth", "alpha"),
label="mode",
default_value=self.mode,
callback=callback_change_mode,
)
# fov slider
def callback_set_fovy(sender, app_data):
self.cam.fovy = np.deg2rad(app_data)
self.need_update = True
dpg.add_slider_int(
label="FoV (vertical)",
min_value=1,
max_value=120,
format="%d deg",
default_value=np.rad2deg(self.cam.fovy),
callback=callback_set_fovy,
)
def callback_set_gaussain_scale(sender, app_data):
self.gaussain_scale_factor = app_data
self.need_update = True
dpg.add_slider_float(
label="gaussain scale",
min_value=0,
max_value=1,
format="%.2f",
default_value=self.gaussain_scale_factor,
callback=callback_set_gaussain_scale,
)
### register camera handler
def callback_camera_drag_rotate_or_draw_mask(sender, app_data):
if not dpg.is_item_focused("_primary_window"):
return
dx = app_data[1]
dy = app_data[2]
self.cam.orbit(dx, dy)
self.need_update = True
def callback_camera_wheel_scale(sender, app_data):
if not dpg.is_item_focused("_primary_window"):
return
delta = app_data
self.cam.scale(delta)
self.need_update = True
def callback_camera_drag_pan(sender, app_data):
if not dpg.is_item_focused("_primary_window"):
return
dx = app_data[1]
dy = app_data[2]
self.cam.pan(dx, dy)
self.need_update = True
def callback_set_mouse_loc(sender, app_data):
if not dpg.is_item_focused("_primary_window"):
return
# just the pixel coordinate in image
self.mouse_loc = np.array(app_data)
with dpg.handler_registry():
# for camera moving
dpg.add_mouse_drag_handler(
button=dpg.mvMouseButton_Left,
callback=callback_camera_drag_rotate_or_draw_mask,
)
dpg.add_mouse_wheel_handler(callback=callback_camera_wheel_scale)
dpg.add_mouse_drag_handler(
button=dpg.mvMouseButton_Middle, callback=callback_camera_drag_pan
)
dpg.create_viewport(
title="Gaussian3D",
width=self.W + 600,
height=self.H + (45 if os.name == "nt" else 0),
resizable=False,
)
### global theme
with dpg.theme() as theme_no_padding:
with dpg.theme_component(dpg.mvAll):
# set all padding to 0 to avoid scroll bar
dpg.add_theme_style(
dpg.mvStyleVar_WindowPadding, 0, 0, category=dpg.mvThemeCat_Core
)
dpg.add_theme_style(
dpg.mvStyleVar_FramePadding, 0, 0, category=dpg.mvThemeCat_Core
)
dpg.add_theme_style(
dpg.mvStyleVar_CellPadding, 0, 0, category=dpg.mvThemeCat_Core
)
dpg.bind_item_theme("_primary_window", theme_no_padding)
dpg.setup_dearpygui()
### register a larger font
# get it from: https://github.com/lxgw/LxgwWenKai/releases/download/v1.300/LXGWWenKai-Regular.ttf
if os.path.exists("LXGWWenKai-Regular.ttf"):
with dpg.font_registry():
with dpg.font("LXGWWenKai-Regular.ttf", 18) as default_font:
dpg.bind_font(default_font)
# dpg.show_metrics()
dpg.show_viewport()
def render(self):
assert self.gui
while dpg.is_dearpygui_running():
# update texture every frame
if self.training:
self.train_step()
self.test_step()
dpg.render_dearpygui_frame()
# no gui mode
def train(self, iters=500):
if iters > 0:
self.prepare_train()
for i in tqdm.trange(iters):
self.train_step()
# do a last prune
self.renderer.gaussians.prune(min_opacity=0.01, extent=1, max_screen_size=1)
# save
self.save_model(mode='model')
self.save_model(mode='geo+tex')
if __name__ == "__main__":
import argparse
from omegaconf import OmegaConf
parser = argparse.ArgumentParser()
parser.add_argument("--config", required=True, help="path to the yaml config file")
args, extras = parser.parse_known_args()
# override default config from cli
opt = OmegaConf.merge(OmegaConf.load(args.config), OmegaConf.from_cli(extras))
gui = GUI(opt)
if opt.gui:
gui.render()
else:
gui.train(opt.iters)
| [
"_rgba.png",
"_caption.txt"
] |
2024-01-10 | zestor/Muses | python~MusesHelper.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import concurrent.futures
import os
import re
from concurrent.futures.thread import ThreadPoolExecutor
from time import sleep, time
import cohere
import nlpcloud # NLP Cloud Playground https://www.nlpcloud.com
import nltk.data # NLP sentence parser used to remove any duplicate word for word sentence output from AI response
import openai # OpenAI https://www.openai.com
# parallelism makes concurrent simulataneous calls to the AI Engine
# From those responses, the best text is determined by length
# If you want to choose the best of 3 calls for the same next scene, then enter 3 below.
class MusesHelper:
open_ai_api_key = os.getenv('OPENAI_API_KEY')
nlp_cloud_api_key = os.getenv('NLPCLOUD_API_KEY')
cohere_api_key = os.getenv('COHERE_PROD_API_KEY')
AI_ENGINE_NLPCLOUD = 'nlpcloud'
AI_ENGINE_OPENAI = 'openai'
AI_ENGINE_COHERE = 'cohere'
def openFile(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
def saveFile(filepath, content):
with open(filepath, 'w', encoding='utf-8') as outfile:
outfile.write(content)
def mkDirIfNotExists(path):
if not os.path.exists(path):
os.makedirs(path)
def __nlpcloudPrivateCallout(client, prompt):
return client.generation(
prompt,
min_length=100,
max_length=256,
length_no_input=True,
remove_input=True,
end_sequence=None,
top_p=1,
temperature=0.85,
top_k=25,
repetition_penalty=1,
length_penalty=1,
do_sample=True,
early_stopping=False,
num_beams=1,
no_repeat_ngram_size=0,
num_return_sequences=1,
bad_words=None,
remove_end_sequence=False
)
def __openaiPrivateCallout(prompt):
return openai.Completion.create(
engine='text-davinci-003',
prompt=prompt,
temperature=0.7,
max_tokens=2500,
top_p=1.0,
frequency_penalty=0.3,
presence_penalty=0.3,
stop=['zxcv'])
def __coherePrivateCallout(client, prompt):
return client.generate(
model='command-xlarge-nightly',
prompt=prompt,
max_tokens=750,
temperature=0.7,
p=1.0,
frequency_penalty=0.3,
presence_penalty=0.3,
stop_sequences=['zxcv'])
def nlpcloudCallOut(prompt):
max_retry = 5
retry = 0
prompt = prompt.encode(encoding='ASCII',errors='ignore').decode() # force it to fix any unicode errors
while True:
try:
sleep(1) # Wait 1 second because NLP Cloud will error with HTTP 429 too many requests
client = nlpcloud.Client(
'finetuned-gpt-neox-20b',
MusesHelper.nlp_cloud_api_key,
gpu=True,
lang='en')
engine_output = MusesHelper.__nlpcloudPrivateCallout(client,prompt)
text = engine_output['generated_text'].strip()
text = re.sub('\s+', ' ', text)
# retry incomplete responses once
# last character is not some type of sentence ending punctuation
if not text.endswith(('.','!','?','"')):
sleep(1) # Wait 1 second because NLP Cloud will error with HTTP 429 too many requests
engine_output = MusesHelper.__nlpcloudPrivateCallout(client,prompt+text)
text2 = engine_output['generated_text'].strip()
text2 = re.sub('\s+', ' ', text2)
text = text + ' ' + text2
# retry incomplete responses twice
# last character is not some type of sentence ending punctuation
if not text.endswith(('.','!','?','"')):
sleep(1) # Wait 1 second because NLP Cloud will error with HTTP 429 too many requests
engine_output = MusesHelper.__nlpcloudPrivateCallout(client,prompt+text)
text2 = engine_output['generated_text'].strip()
text2 = re.sub('\s+', ' ', text2)
text = text + ' ' + text2
filename = '%s_nlpcloud.txt' % time()
MusesHelper.mkDirIfNotExists('logs')
MusesHelper.mkDirIfNotExists('logs/nlpcloud')
MusesHelper.saveFile('logs/nlpcloud/%s' % filename, prompt + '\n\n==========\n\n' + text)
return text
except Exception as oops:
retry += 1
if retry >= max_retry:
return "NLPCLOUD error: %s" % oops
print('Error communicating with NLP Cloud:', oops)
sleep(1)
def openaiCallOut(prompt):
max_retry = 5
retry = 0
prompt = prompt.encode(encoding='ASCII',errors='ignore').decode()
while True:
try:
response = MusesHelper.__openaiPrivateCallout(prompt)
text = response['choices'][0]['text'].strip()
text = re.sub('\s+', ' ', text)
# retry incomplete responses once
# last character is not some type of sentence ending punctuation
if not text.endswith(('.','!','?','"')):
response = MusesHelper.__openaiPrivateCallout(prompt+text)
text2 = response['choices'][0]['text'].strip()
text2 = re.sub('\s+', ' ', text2)
text = text + text2
# retry incomplete responses twice
# last character is not some type of sentence ending punctuation
if not text.endswith(('.','!','?','"')):
response = MusesHelper.__openaiPrivateCallout(prompt+text)
text2 = response['choices'][0]['text'].strip()
text2 = re.sub('\s+', ' ', text2)
text = text + text2
filename = '%s_gpt3.txt' % time()
MusesHelper.mkDirIfNotExists('logs')
MusesHelper.mkDirIfNotExists('logs/openai')
MusesHelper.saveFile('logs/openai/%s' % filename, prompt + '\n\n==========\n\n' + text)
return text
except Exception as oops:
retry += 1
if retry >= max_retry:
return "OpenAI error: %s" % oops
print('Error communicating with OpenAI:', oops)
sleep(1)
def cohereCallOut(prompt):
max_retry = 5
retry = 0
prompt = prompt.encode(encoding='ASCII',errors='ignore').decode()
while True:
try:
client = cohere.Client(MusesHelper.cohere_api_key)
response = MusesHelper.__coherePrivateCallout(client, prompt)
text = response.generations[0].text.strip()
text = text.replace('"/','"')
text = re.sub('\s+', ' ', text)
# retry incomplete responses once
# last character is not some type of sentence ending punctuation
if not text.endswith(('.','!','?','"')):
response = MusesHelper.__coherePrivateCallout(client, prompt+text)
text2 = response.generations[0].text.strip()
text2 = text2.replace('"/','"')
text2 = re.sub('\s+', ' ', text2)
text = text + text2
# retry incomplete responses twice
# last character is not some type of sentence ending punctuation
if not text.endswith(('.','!','?','"')):
response = MusesHelper.__coherePrivateCallout(client, prompt+text)
text2 = response.generations[0].text.strip()
text2 = text2.replace('"/','"')
text2 = re.sub('\s+', ' ', text2)
text = text + text2
filename = '%s_cohere.txt' % time()
MusesHelper.mkDirIfNotExists('logs')
MusesHelper.mkDirIfNotExists('logs/cohere')
MusesHelper.saveFile('logs/cohere/%s' % filename, prompt + '\n\n==========\n\n' + text)
return text
except Exception as oops:
retry += 1
if retry >= max_retry:
return "COHERE error: %s" % oops
print('Error communicating with CO:HERE:', oops)
sleep(1)
def callAIEngine(AIEngine, prompt):
scene = ''
print('\n======================= CALLING AI ENGINE =======================')
print('\n',prompt)
if AIEngine == MusesHelper.AI_ENGINE_OPENAI:
scene = MusesHelper.openaiCallOut(prompt)
if AIEngine == MusesHelper.AI_ENGINE_NLPCLOUD:
scene = MusesHelper.nlpcloudCallOut(prompt)
if AIEngine == MusesHelper.AI_ENGINE_COHERE:
scene = MusesHelper.cohereCallOut(prompt)
print('\n',scene,'\n','=======================')
return scene
def cleanUpAIengineOutput(text):
text = re.sub(r'[1-9]+\.\s?', '\r\n', text)
text = text.replace(': ','-')
text = os.linesep.join([s for s in text.splitlines() if s])
return text
def OnlyFirstParagraph(this_text):
retval = ''
this_text = this_text.strip() # remove spaces
lines = this_text.splitlines()
if lines[0]:
retval = lines[0]
return retval
def removeAnyPreviousLines(this_text, previous_scene):
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
thisLines = tokenizer.tokenize(this_text)
previousLines = tokenizer.tokenize(previous_scene)
textArray = [];
for thisLine in thisLines:
lineGood = True
for previousLine in previousLines:
if thisLine == previousLine:
lineGood = False
if lineGood:
textArray.append(thisLine)
return ' '.join(textArray).strip()
def getLongestText(AIEngine, prompt, previous_scene, numberOfTries):
# HOW MANY TIMES TO REGEN THE SAME PARAGRAPH
prompts = []
for j in range (0,numberOfTries):
prompts.append(prompt)
# MULTIPLE SIMULTANEOUS CONCURRENT CALLS TO AI ENGINE
prompt_queue = []
with ThreadPoolExecutor(max_workers=numberOfTries) as executor:
ordinal = 1
for prompt in prompts:
prompt_queue.append(executor.submit(MusesHelper.callAIEngine, AIEngine, prompt))
ordinal += 1
# WAIT FOR ALL SIMULTANEOUS CONCURRENT CALLS TO COMPLETE
# LOOP TO FIND THE LONGEST PARAGRAPH
longest_text = ''
longest_text_length = 0
for future in concurrent.futures.as_completed(prompt_queue):
try:
generated_text = future.result()
# NLP CLOUD CREATES USUALLY A GOOD FIRST PARAGRAPH, BUT THEN GARBAGE
if AIEngine == MusesHelper.AI_ENGINE_NLPCLOUD:
generated_text = MusesHelper.OnlyFirstParagraph(generated_text)
if generated_text:
generated_text = MusesHelper.removeAnyPreviousLines(generated_text, previous_scene)
len_this_generated_text = len(generated_text)
if len_this_generated_text > longest_text_length:
longest_text_length = len_this_generated_text
longest_text = generated_text
print('\n=== BEST SO FAR ====> %d size \n%s' % (len_this_generated_text, generated_text))
else:
print('\n=== NOT BEST ========> %d size \n%s' % (len_this_generated_text, generated_text))
else:
print('\n\ngenerated blank')
except Exception as exc:
print('\n\ngenerated an exception: %s' % (exc))
print('\n== CHOSEN LONGEST LENGTH ==> %d size \n%s' % (longest_text_length, longest_text))
return longest_text | [
"[]",
"ignore"
] |
2024-01-10 | M1rn4/speechText | clonar.py | import os
import openai
from dotenv import load_dotenv
import requests
from elevenlabs import clone, generate, play, stream
from pydub import AudioSegment
import wave
import io
from elevenlabs.api.voice import Voice, VoiceSample
from voices import mirna1, mitchel1, mirna21, luis1
# Configure API keys
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
eleven_api_key = os.getenv("ELEVEN_API_KEY")
actual_voz = luis1
# Function to interact with GPT-4
def interact_with_gpt4(prompt, voice):
# Implement your GPT-4 interaction logic here
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=256,
n=1,
stop=None,
temperature=0.7
)
response_text = response.choices[0].text.strip()
# Generate audio using cloned voice
audio = generate(text=response_text, voice=voice)
return audio
# audio_stream = generate(
# text=response_text,
# stream=True, voice=voice
# )
# stream(audio_stream)
# return audio_stream
# Function to save audio file
def save_audio_file(datos_audio, nombre_archivo, formato='mp3'):
audio_io = io.BytesIO(datos_audio)
audio = AudioSegment.from_file(audio_io)
audio.export(nombre_archivo, format=formato)
# Main function
def main():
# Set up voice cloning
voice = actual_voz
while True:
prompt = input("Enter your prompt (or type 'exit' to stop): ")
if prompt.lower() == 'exit':
break
# Interact with GPT-4 and generate audio
audio_file = interact_with_gpt4(prompt, voice)
# print(audio_file)
# audio_file = audio_file.decode('latin-1')
# print(audio_file)
# Save audio file
nombre_archivo = 'audioclone.mp3'
save_audio_file(audio_file, nombre_archivo)
# # Save audio file
# nombre_archivo = 'audioclone.mp3'
# with open(audio_file, 'rb') as file:
# datos_audio = file.read()
# save_audio_file(datos_audio, nombre_archivo)
# print("Audio saved successfully.")
# # Clean up the temporary audio file
# os.remove(audio_file)
# print("Temporary audio file deleted.")
if __name__ == "__main__":
main()
| [
"Enter your prompt (or type 'exit' to stop): "
] |
2024-01-10 | M1rn4/speechText | voz.py | import openai
from dotenv import load_dotenv
import os
# Cargar las variables de entorno desde el archivo .env
load_dotenv()
# Obtener la clave de API desde las variables de entorno
api_key = os.getenv("OPENAI_API_KEY")
print(api_key)
# Configurar la clave de API en openai
openai.api_key = api_key
# Abrir el archivo de audio
with open("./audio.mp3", "rb") as audio_file:
# Transcribir el audio
response = openai.Transcription.create(audio=audio_file)
transcript = response['transcriptions'][0]['text']
# Imprimir la transcripción
print(transcript)
# Generar la voz del texto traducido
respuesta = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": transcript}
]
)
# Obtener la respuesta generada
voz_generada = respuesta.choices[0].message.content.strip()
# Imprimir la voz generada
print(voz_generada)
| [
"You are a helpful assistant."
] |
2024-01-10 | Maicon-g14/telegram-ia-bot | chatHandler.py | import os
import logging
from telegram import Update
from telegram.ext import filters, MessageHandler, ApplicationBuilder, ContextTypes
import openai
import logger
openai.api_key = os.getenv("OPEN_AI_TOKEN")
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO
)
def log_response(prompt, response):
logger.log(prompt, response)
async def chatgpt(update: Update, context: ContextTypes.DEFAULT_TYPE):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "Voce se chama White Pixel. Você é uma assistente útil e concisa."},
{"role": "user", "content": update.message.text}
]
)
if response['choices'][0]['finish_reason'] != 'stop':
print(response)
return
print(response['choices'][0]['message']['content'])
log_response(update.message.text, response['choices'][0]['message']['content'])
await context.bot.send_message(
chat_id=update.effective_chat.id,
text=response['choices'][0]['message']['content']
)
async def unknown(update: Update, context: ContextTypes.DEFAULT_TYPE):
await context.bot.send_message(chat_id=update.effective_chat.id, text="Sorry, I didn't understand that command.")
if __name__ == '__main__':
application = ApplicationBuilder().token(os.environ['TELEGRAM_TOKEN']).build()
chatgpt_handler = MessageHandler(filters.TEXT & (~filters.COMMAND), chatgpt)
unknown_handler = MessageHandler(filters.COMMAND, unknown)
application.add_handler(chatgpt_handler)
application.add_handler(unknown_handler)
application.run_polling()
| [
"Voce se chama White Pixel. Você é uma assistente útil e concisa."
] |
2024-01-10 | Utshav-paudel/Company_recommender_LLM | structuring.py | #@ Script for converting Text unstructured company data to score based on information
## Structure output parser
# importing dependecies
from langchain.output_parsers import StructuredOutputParser, ResponseSchema
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
from dotenv import load_dotenv
load_dotenv()
def company_score_generator(input_text):
# designing response schema for our ranking score of company
response_schemas = [ResponseSchema(name="company_name", description="name of the company" ),
ResponseSchema(name="Risk_averse_score", description="score of risk averse of company" ),
ResponseSchema(name ="Budget_focus_score", description="score of how budget focus company is " ),
ResponseSchema(name="Advanced_score", description="score of how advanced company is with current upto date techs and trends" )
]
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
format_instructions = output_parser.get_format_instructions()
# prompt to model
prompt = PromptTemplate(
template="You are a world class algorithm for giving score to text data ranging from 0 for wrost performer to 10 for best performer \n{format_instructions} \n{question}",
input_variables=["question"],
partial_variables={"format_instructions": format_instructions}
)
# model and its input output
model = OpenAI()
# text data of company
# question =
_input = prompt.format_prompt(question= input_text)
output = model(_input.to_string())
result = output_parser.parse(output)
return result
if __name__=="__main__":
company_score_generator()
| [
"question",
"You are a world class algorithm for giving score to text data ranging from 0 for wrost performer to 10 for best performer \n{format_instructions} \n{question}",
"format_instructions"
] |
2024-01-10 | yihong0618/iWhat | what~what.py | import json
import string
from rich.console import Console
from rich.table import Table
from rich.style import Style
from rich.text import Text
from openai import OpenAI
class What:
def __init__(self, what, is_en=False, api_base=None):
self.client = OpenAI()
if api_base:
self.client = OpenAI(base_url=api_base)
else:
self.client = OpenAI()
self.what = what
self.is_en = is_en
self.what_prompt = """
f这个 `{what}` 可能是什么,请按照json格式回答,key值有Maybe和Desc,Maybe回答他最可能是的东西(要求精确些),Desc回答这个东西的描述;
答案应该使用中文。
""".format(
what=what
)
if is_en:
self.what_prompt = """
What is`{what}` might be? please answer in JSON format with key values of 'Maybe' and 'Desc'.
'Maybe' should provide the most likely thing it is (be more precise),
while 'Desc' should describe what this thing is.
And you answer must be english.
""".format(
what=what
)
def _to_what(self):
completion = self.client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": self.what_prompt}],
)
return completion.choices[0].message.content.encode("utf8").decode()
@staticmethod
def _is_all_punctuations(s):
return all(c in string.punctuation for c in s)
def _handle_exception_answer(self):
if self.is_en:
maybe_prompt = "What is`{what}` most likely to be? (Please try to answer more precisely.)".format(
what=self.what
)
desc_prompt = "Describe what `{what}` most likely to be".format(
what=self.what
)
else:
maybe_prompt = "这个 `{what}` 最可能是什么?(要求精确些)".format(what=self.what)
desc_prompt = "描述`{what}`最可能是什么".format(what=self.what)
self.what_prompt = maybe_prompt
maybe_what = self._to_what()
self.what_prompt = desc_prompt
desc_what = self._to_what()
what_json = {"Maybe": maybe_what, "Desc": desc_what}
return what_json
@staticmethod
def _change_line_by_comma_period(in_str):
return (
in_str.replace(",", ",\r\n")
.replace("。", "。\r\n")
.replace(",", ",\r\n")
.replace(".", ".\r\n")
)
def show_what(self):
what = self._to_what()
try:
what_json = json.loads(what)
if "Maybe" not in what_json or "Desc" not in what_json:
raise Exception("Keys incomplete In JSON")
except Exception:
# handle exception when gpt answer is not json format
what_json = self._handle_exception_answer()
if not what_json:
raise Exception("No what JSON!")
console = Console()
title = Text("What is it AI", style=Style(color="#268bd2", bold=True))
table = Table(title=title, show_lines=False, style="dim")
table.add_column("What", style=Style(color="#b58900"))
table.add_column("Maybe", style=Style(color="#d33682"), justify="middle")
table.add_column("Desc", style=Style(color="#859900"), justify="left")
maybe = self._change_line_by_comma_period(what_json["Maybe"].strip())
desc = self._change_line_by_comma_period(what_json["Desc"].strip())
table.add_row(self.what, maybe, desc)
console.print(table)
| [
"这个 `{what}` 最可能是什么?(要求精确些)",
"What is`{what}` most likely to be? (Please try to answer more precisely.)",
"Describe what `{what}` most likely to be",
"描述`{what}`最可能是什么"
] |
2024-01-10 | TaliMotzkin/cseg_fork1 | open_clip_training~src~open_clip~factory.py | import json
import logging
import os
import pathlib
import re
from copy import deepcopy
from pathlib import Path
from typing import Optional, Tuple, List
import torch
from torch import nn
from torch.nn import functional as F
from .model import CLIP, convert_weights_to_fp16, resize_pos_embed
from .openai import load_openai_model
from .pretrained import get_pretrained_url, download_pretrained
from .transform import image_transform
from .transformer_adapter import TwoWayTransformer
from .mask_decoder import MaskDecoder
import numpy as np
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = ('.json',)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f'*{ext}'))
for cf in config_files:
with open(cf, 'r') as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {k: v for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))}
_rescan_model_configs() # initial populate of model config registry
def load_state_dict(checkpoint_path: str, map_location='cpu'):
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
if next(iter(state_dict.items()))[0].startswith('module'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
return state_dict
def load_checkpoint(model, checkpoint_path, strict=True):
state_dict = load_state_dict(checkpoint_path)
resize_pos_embed(state_dict, model)
incompatible_keys = model.load_state_dict(state_dict, strict=strict)
return incompatible_keys
def create_model(
model_name: str,
pretrained: str = '',
precision: str = 'fp32',
device: torch.device = torch.device('cpu'),
jit: bool = False,
force_quick_gelu: bool = False,
pretrained_image: bool = False,
mask_emb_depth: int = 0,
):
model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names
if pretrained.lower() == 'openai':
logging.info(f'Loading pretrained {model_name} from OpenAI.')
model = load_openai_model(model_name, device=device, jit=jit)
# See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372
if precision == "amp" or precision == "fp32":
model = model.float()
else:
if model_name in _MODEL_CONFIGS:
logging.info(f'Loading {model_name} model config.')
model_cfg = deepcopy(_MODEL_CONFIGS[model_name])
else:
logging.error(f'Model config for {model_name} not found; available models {list_models()}.')
raise RuntimeError(f'Model config for {model_name} not found.')
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
if mask_emb_depth > 0:
model_cfg['vision_cfg']['mask_emb_depth'] = mask_emb_depth
if pretrained_image:
if 'timm_model_name' in model_cfg.get('vision_cfg', {}):
# pretrained weight loading for timm models set via vision_cfg
model_cfg['vision_cfg']['timm_model_pretrained'] = True
else:
assert False, 'pretrained image towers currently only supported for timm models'
model = CLIP(**model_cfg)
if pretrained:
checkpoint_path = ''
url = get_pretrained_url(model_name, pretrained)
if url:
checkpoint_path = download_pretrained(url)
elif os.path.exists(pretrained):
checkpoint_path = pretrained
if checkpoint_path:
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
try:
load_checkpoint(model, checkpoint_path)
except:
load_checkpoint(model, checkpoint_path, strict=False)
logging.info("The keys in the checkpoint_path don't match that of model, make sure that you are doing mask prompt tuning!")
else:
logging.warning(f'Pretrained weights ({pretrained}) not found for model {model_name}.')
raise RuntimeError(f'Pretrained weights ({pretrained}) not found for model {model_name}.')
model.to(device=device)
if precision == "fp16":
assert device.type != 'cpu'
convert_weights_to_fp16(model)
if jit:
model = torch.jit.script(model)
return model
def create_cseg_model(
model_name: str,
pretrained: str = '',
precision: str = 'fp32',
device: torch.device = torch.device('cpu'),
jit: bool = False,
force_quick_gelu: bool = False,
pretrained_image: bool = False,
mask_emb_depth: int = 0,
):
mask_model = create_model(
model_name, pretrained, precision, device, jit,
force_quick_gelu=force_quick_gelu,
pretrained_image=pretrained_image,
mask_emb_depth=mask_emb_depth)
image_model = create_model(
model_name, pretrained, precision, device, jit,
force_quick_gelu=force_quick_gelu,
pretrained_image=pretrained_image,
mask_emb_depth=0)
model = ClipEnsembler(image_model, mask_model)
return model
def create_model_and_transforms(
model_name: str,
pretrained: str = '',
precision: str = 'fp32',
device: torch.device = torch.device('cpu'),
jit: bool = False,
force_quick_gelu: bool = False,
pretrained_image: bool = False,
mean: Optional[Tuple[float, ...]] = None,
std: Optional[Tuple[float, ...]] = None,
scale: Optional[Tuple[float, ...]] = None,
erosion: bool = False,
with_mask: bool = False,
mask_emb_depth: int = 0,
):
model = create_cseg_model(
model_name, pretrained, precision, device, jit,
force_quick_gelu=force_quick_gelu,
pretrained_image=pretrained_image,
mask_emb_depth=mask_emb_depth)
model = model.to(device=device)
preprocess_train = image_transform(model.clip_model.visual.image_size, is_train=True, mean=mean, std=std,
scale=scale, erosion=erosion, with_mask=with_mask)
preprocess_val = image_transform(model.clip_model.visual.image_size, is_train=False, mean=mean, std=std, with_mask=with_mask)
return model, preprocess_train, preprocess_val
def list_models():
""" enumerate available model architectures based on config files """
return list(_MODEL_CONFIGS.keys())
def add_model_config(path):
""" add model config path or file and update registry """
if not isinstance(path, Path):
path = Path(path)
_MODEL_CONFIG_PATHS.append(path)
_rescan_model_configs()
class ClipEnsembler(nn.Module):
def __init__(
self,
clip_model,
clip_mask_model,
):
super().__init__()
self.clip_model_reg = clip_model
self.clip_model = clip_mask_model
vit_embed_dim = 1024
self.image_embedding_size = vit_embed_dim
self.non_object_embedding = nn.Parameter(
torch.empty(1, self.clip_model.text_projection.shape[-1])
)
nn.init.normal_(
self.non_object_embedding.data,
std=self.clip_model.transformer.width ** -0.5,
)
self.mask_decoder=MaskDecoder(
num_multimask_outputs=1, # Find a way to make this dynamic
transformer=TwoWayTransformer(
depth=2,
embedding_dim=vit_embed_dim,
mlp_dim=2048,
num_heads=8,
),
transformer_dim=vit_embed_dim,
)
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
def forward(
self,
image: torch.Tensor,
mask: torch.Tensor,
text
):
masked_vit_features = None
regions = mask
#print('IMAGE input size: ', image.shape) # Num Images x 256 x grid x grid
#print('MASK input size: ', regions.shape) # Num Masks Per Image x 256 x grid x grid
if isinstance(regions, list):
assert NotImplementedError
masked_features = torch.cat(
[self.get_image_features(image_i) for image_i in regions], dim=0
)
else:
masked_features, masked_vit_features, cls_embed = self.get_mask_embed(regions)
image_features, image_vit_features, positional_encoding = self.get_image_embed(image)
#print('IMAGE ViT FEATURES: ', image_vit_features.shape) # Num Images x 256 x grid x grid
#print('MASK ViT FEATURES: ', masked_vit_features.shape) # Num Masks Per Image x 256 x grid x grid
b, c, grid, _ = image_vit_features.shape
n, _, _, _ = masked_vit_features.shape
b_enc, c_enc, grid_enc, _ = positional_encoding.shape
updated_mask_vit_features = self.mask_decoder(
image_embeddings=image_vit_features.reshape(b, c, grid**2),
image_pe=positional_encoding.reshape(b_enc, c_enc, grid_enc**2),
mask_embeddings=masked_vit_features.reshape(n, c, grid**2),
cls_embed=cls_embed.reshape(n, 1, grid**2),
)
updated_mask_features = self.get_mask_features(updated_mask_vit_features)
text_features = self.clip_model.encode_text(text)
return updated_mask_features, text_features, self.logit_scale.exp()
def encode_image(
self,
image: torch.Tensor,
mask: torch.Tensor,
):
masked_vit_features = None
regions = mask
#print('IMAGE input size: ', image.shape) # Num Images x 256 x grid x grid
#print('MASK input size: ', regions.shape) # Num Masks Per Image x 256 x grid x grid
if isinstance(regions, list):
assert NotImplementedError
masked_features = torch.cat(
[self.get_image_features(image_i) for image_i in regions], dim=0
)
else:
masked_features, masked_vit_features, cls_embed = self.get_mask_embed(regions)
image_features, image_vit_features, positional_encoding = self.get_image_embed(image)
#print('IMAGE ViT FEATURES: ', image_vit_features.shape) # Num Images x 256 x grid x grid
#print('MASK ViT FEATURES: ', masked_vit_features.shape) # Num Masks Per Image x 256 x grid x grid
b, c, grid, _ = image_vit_features.shape
n, _, _, _ = masked_vit_features.shape
b_enc, c_enc, grid_enc, _ = positional_encoding.shape
updated_mask_vit_features = self.mask_decoder(
image_embeddings=image_vit_features.reshape(b, c, grid**2),
image_pe=positional_encoding.reshape(b_enc, c_enc, grid_enc**2),
mask_embeddings=masked_vit_features.reshape(n, c, grid**2),
cls_embed=cls_embed.reshape(n, 1, grid**2),
)
updated_mask_features = self.get_mask_features(updated_mask_vit_features)
return updated_mask_features
# -----------------------------------------------------------------------------
def get_mask_embed(self, image, region_masks=None):
image_features, vit_embed, _ = self.clip_model.visual.get_vit_embedding(image, region_masks)
cls_embed = vit_embed[:,:1]
#print('CLS EMBED: ', cls_embed.shape)
#cls_embed = cls_embed.reshape(cls_embed.size(0), cls_embed.size(1), cls_embed.size(2)**2).permute(0,2,1)
vit_embed = vit_embed[:,1:]
return image_features, vit_embed, cls_embed
# -----------------------------------------------------------------------------
def get_mask_features(self, vit_region_embed: torch.Tensor):
image_features = self.clip_model.visual.get_clip_embedding(vit_region_embed)
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
return image_features
# -----------------------------------------------------------------------------
def get_image_embed(self, image: torch.Tensor):
image_features, vit_embed, positional_encoding = self.clip_model_reg.visual.get_vit_embedding(image)
vit_embed = vit_embed[:,1:]
return image_features, vit_embed, positional_encoding
# -----------------------------------------------------------------------------
def get_image_features(self, vit_embed: torch.Tensor):
image_features = self.clip_model_reg.visual.get_clip_embedding(vit_embed)
image_features = image_features / image_features.norm(dim=-1, keepdim=True)
return image_features
def normalize_feature(self, feat: torch.Tensor):
return feat / feat.norm(dim=-1, keepdim=True)
| [] |
2024-01-10 | gohyojun15/ANT_diffusion | diffusion~gaussian_diffusion.py | # Modified from OpenAI's diffusion repos
# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py
# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion
# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
import math
import numpy as np
import torch as th
import enum
from .diffusion_utils import discretized_gaussian_log_likelihood, normal_kl
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape))))
class ModelMeanType(enum.Enum):
"""
Which type of output the model predicts.
"""
PREVIOUS_X = enum.auto() # the model predicts x_{t-1}
START_X = enum.auto() # the model predicts x_0
EPSILON = enum.auto() # the model predicts epsilon
class ModelVarType(enum.Enum):
"""
What is used as the model's output variance.
The LEARNED_RANGE option has been added to allow the model to predict
values between FIXED_SMALL and FIXED_LARGE, making its job easier.
"""
LEARNED = enum.auto()
FIXED_SMALL = enum.auto()
FIXED_LARGE = enum.auto()
LEARNED_RANGE = enum.auto()
class LossType(enum.Enum):
MSE = enum.auto() # use raw MSE loss (and KL when learning variances)
RESCALED_MSE = enum.auto() # use raw MSE loss (with RESCALED_KL when learning variances)
KL = enum.auto() # use the variational lower-bound
RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB
def is_vb(self):
return self == LossType.KL or self == LossType.RESCALED_KL
def _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, warmup_frac):
betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64)
warmup_time = int(num_diffusion_timesteps * warmup_frac)
betas[:warmup_time] = np.linspace(beta_start, beta_end, warmup_time, dtype=np.float64)
return betas
def get_beta_schedule(beta_schedule, *, beta_start, beta_end, num_diffusion_timesteps):
"""
This is the deprecated API for creating beta schedules.
See get_named_beta_schedule() for the new library of schedules.
"""
if beta_schedule == "quad":
betas = (
np.linspace(
beta_start**0.5,
beta_end**0.5,
num_diffusion_timesteps,
dtype=np.float64,
)
** 2
)
elif beta_schedule == "linear":
betas = np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64)
elif beta_schedule == "warmup10":
betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.1)
elif beta_schedule == "warmup50":
betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.5)
elif beta_schedule == "const":
betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64)
elif beta_schedule == "jsd": # 1/T, 1/(T-1), 1/(T-2), ..., 1
betas = 1.0 / np.linspace(
num_diffusion_timesteps, 1, num_diffusion_timesteps, dtype=np.float64
)
else:
raise NotImplementedError(beta_schedule)
assert betas.shape == (num_diffusion_timesteps,)
return betas
def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):
"""
Get a pre-defined beta schedule for the given name.
The beta schedule library consists of beta schedules which remain similar
in the limit of num_diffusion_timesteps.
Beta schedules may be added, but should not be removed or changed once
they are committed to maintain backwards compatibility.
"""
if schedule_name == "linear":
# Linear schedule from Ho et al, extended to work for any number of
# diffusion steps.
scale = 1000 / num_diffusion_timesteps
return get_beta_schedule(
"linear",
beta_start=scale * 0.0001,
beta_end=scale * 0.02,
num_diffusion_timesteps=num_diffusion_timesteps,
)
elif schedule_name == "squaredcos_cap_v2":
print("cosine")
return betas_for_alpha_bar(
num_diffusion_timesteps,
lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,
)
else:
raise NotImplementedError(f"unknown beta schedule: {schedule_name}")
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas)
class GaussianDiffusion:
"""
Utilities for training and sampling diffusion models.
Original ported from this codebase:
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42
:param betas: a 1-D numpy array of betas for each diffusion timestep,
starting at T and going to 1.
"""
def __init__(self, *, betas, model_mean_type, model_var_type, loss_type):
self.model_mean_type = model_mean_type
self.model_var_type = model_var_type
self.loss_type = loss_type
# Use float64 for accuracy.
betas = np.array(betas, dtype=np.float64)
self.betas = betas
assert len(betas.shape) == 1, "betas must be 1-D"
assert (betas > 0).all() and (betas <= 1).all()
self.num_timesteps = int(betas.shape[0])
alphas = 1.0 - betas
self.alphas_cumprod = np.cumprod(alphas, axis=0)
self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1])
self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0)
assert self.alphas_cumprod_prev.shape == (self.num_timesteps,)
# calculations for diffusion q(x_t | x_{t-1}) and others
self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod)
self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod)
self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod)
self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1)
# calculations for posterior q(x_{t-1} | x_t, x_0)
self.posterior_variance = (
betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
self.posterior_log_variance_clipped = (
np.log(np.append(self.posterior_variance[1], self.posterior_variance[1:]))
if len(self.posterior_variance) > 1
else np.array([])
)
self.posterior_mean_coef1 = (
betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod)
)
self.posterior_mean_coef2 = (
(1.0 - self.alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - self.alphas_cumprod)
)
def q_mean_variance(self, x_start, t):
"""
Get the distribution q(x_t | x_0).
:param x_start: the [N x C x ...] tensor of noiseless inputs.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
"""
mean = _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = _extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
return mean, variance, log_variance
def q_sample(self, x_start, t, noise=None):
"""
Diffuse the data for a given number of diffusion steps.
In other words, sample from q(x_t | x_0).
:param x_start: the initial data batch.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:param noise: if specified, the split-out normal noise.
:return: A noisy version of x_start.
"""
if noise is None:
noise = th.randn_like(x_start)
assert noise.shape == x_start.shape
return (
_extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
+ _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
)
def q_posterior_mean_variance(self, x_start, x_t, t):
"""
Compute the mean and variance of the diffusion posterior:
q(x_{t-1} | x_t, x_0)
"""
assert x_start.shape == x_t.shape
posterior_mean = (
_extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start
+ _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = _extract_into_tensor(
self.posterior_log_variance_clipped, t, x_t.shape
)
assert (
posterior_mean.shape[0]
== posterior_variance.shape[0]
== posterior_log_variance_clipped.shape[0]
== x_start.shape[0]
)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None):
"""
Apply the model to get p(x_{t-1} | x_t), as well as a prediction of
the initial x, x_0.
:param model: the model, which takes a signal and a batch of timesteps
as input.
:param x: the [N x C x ...] tensor at time t.
:param t: a 1-D Tensor of timesteps.
:param clip_denoised: if True, clip the denoised signal into [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample. Applies before
clip_denoised.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict with the following keys:
- 'mean': the model mean output.
- 'variance': the model variance output.
- 'log_variance': the log of 'variance'.
- 'pred_xstart': the prediction for x_0.
"""
if model_kwargs is None:
model_kwargs = {}
B, C = x.shape[:2]
assert t.shape == (B,)
model_output = model(x, t, **model_kwargs)
if isinstance(model_output, tuple):
model_output, extra = model_output
else:
extra = None
if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:
assert model_output.shape == (B, C * 2, *x.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
min_log = _extract_into_tensor(self.posterior_log_variance_clipped, t, x.shape)
max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)
# The model_var_values is [-1, 1] for [min_var, max_var].
frac = (model_var_values + 1) / 2
model_log_variance = frac * max_log + (1 - frac) * min_log
model_variance = th.exp(model_log_variance)
else:
model_variance, model_log_variance = {
# for fixedlarge, we set the initial (log-)variance like so
# to get a better decoder log likelihood.
ModelVarType.FIXED_LARGE: (
np.append(self.posterior_variance[1], self.betas[1:]),
np.log(np.append(self.posterior_variance[1], self.betas[1:])),
),
ModelVarType.FIXED_SMALL: (
self.posterior_variance,
self.posterior_log_variance_clipped,
),
}[self.model_var_type]
model_variance = _extract_into_tensor(model_variance, t, x.shape)
model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)
def process_xstart(x):
if denoised_fn is not None:
x = denoised_fn(x)
if clip_denoised:
return x.clamp(-1, 1)
return x
if self.model_mean_type == ModelMeanType.START_X:
pred_xstart = process_xstart(model_output)
else:
pred_xstart = process_xstart(
self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)
)
model_mean, _, _ = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t)
assert model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape
return {
"mean": model_mean,
"variance": model_variance,
"log_variance": model_log_variance,
"pred_xstart": pred_xstart,
"extra": extra,
}
def _predict_xstart_from_eps(self, x_t, t, eps):
assert x_t.shape == eps.shape
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
- _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps
)
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute the mean for the previous step, given a function cond_fn that
computes the gradient of a conditional log probability with respect to
x. In particular, cond_fn computes grad(log(p(y|x))), and we want to
condition on y.
This uses the conditioning strategy from Sohl-Dickstein et al. (2015).
"""
gradient = cond_fn(x, t, **model_kwargs)
new_mean = p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float()
return new_mean
def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):
"""
Compute what the p_mean_variance output would have been, should the
model's score function be conditioned by cond_fn.
See condition_mean() for details on cond_fn.
Unlike condition_mean(), this instead uses the conditioning strategy
from Song et al (2020).
"""
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"])
eps = eps - (1 - alpha_bar).sqrt() * cond_fn(x, t, **model_kwargs)
out = p_mean_var.copy()
out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps)
out["mean"], _, _ = self.q_posterior_mean_variance(x_start=out["pred_xstart"], x_t=x, t=t)
return out
def p_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
):
"""
Sample x_{t-1} from the model at the given timestep.
:param model: the model to sample from.
:param x: the current tensor at x_{t-1}.
:param t: the value of t, starting at 0 for the first diffusion step.
:param clip_denoised: if True, clip the x_start prediction to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- 'sample': a random sample from the model.
- 'pred_xstart': a prediction of x_0.
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
noise = th.randn_like(x)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
if cond_fn is not None:
out["mean"] = self.condition_mean(cond_fn, out, x, t, model_kwargs=model_kwargs)
sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def p_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
):
"""
Generate samples from the model.
:param model: the model module.
:param shape: the shape of the samples, (N, C, H, W).
:param noise: if specified, the noise from the encoder to sample.
Should be of the same shape as `shape`.
:param clip_denoised: if True, clip x_start predictions to [-1, 1].
:param denoised_fn: if not None, a function which applies to the
x_start prediction before it is used to sample.
:param cond_fn: if not None, this is a gradient function that acts
similarly to the model.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param device: if specified, the device to create the samples on.
If not specified, use a model parameter's device.
:param progress: if True, show a tqdm progress bar.
:return: a non-differentiable batch of samples.
"""
final = None
for sample in self.p_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
):
final = sample
return final["sample"]
def p_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
):
"""
Generate samples from the model and yield intermediate samples from
each timestep of diffusion.
Arguments are the same as p_sample_loop().
Returns a generator over dicts, where each dict is the return value of
p_sample().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::-1]
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
with th.no_grad():
out = self.p_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
)
yield out
img = out["sample"]
def ddim_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t-1} from the model using DDIM.
Same usage as p_sample().
"""
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"])
alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape)
alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape)
sigma = (
eta
* th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar))
* th.sqrt(1 - alpha_bar / alpha_bar_prev)
)
# Equation 12.
noise = th.randn_like(x)
mean_pred = (
out["pred_xstart"] * th.sqrt(alpha_bar_prev)
+ th.sqrt(1 - alpha_bar_prev - sigma**2) * eps
)
nonzero_mask = (
(t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))
) # no noise when t == 0
sample = mean_pred + nonzero_mask * sigma * noise
return {"sample": sample, "pred_xstart": out["pred_xstart"]}
def ddim_reverse_sample(
self,
model,
x,
t,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
eta=0.0,
):
"""
Sample x_{t+1} from the model using DDIM reverse ODE.
"""
assert eta == 0.0, "Reverse ODE only for deterministic path"
out = self.p_mean_variance(
model,
x,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
model_kwargs=model_kwargs,
)
if cond_fn is not None:
out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs)
# Usually our model outputs epsilon, but we re-derive it
# in case we used x_start or x_prev prediction.
eps = (
_extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x
- out["pred_xstart"]
) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape)
alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape)
# Equation 12. reversed
mean_pred = out["pred_xstart"] * th.sqrt(alpha_bar_next) + th.sqrt(1 - alpha_bar_next) * eps
return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]}
def ddim_sample_loop(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
):
"""
Generate samples from the model using DDIM.
Same usage as p_sample_loop().
"""
final = None
for sample in self.ddim_sample_loop_progressive(
model,
shape,
noise=noise,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
device=device,
progress=progress,
eta=eta,
):
final = sample
return final["sample"]
def ddim_sample_loop_progressive(
self,
model,
shape,
noise=None,
clip_denoised=True,
denoised_fn=None,
cond_fn=None,
model_kwargs=None,
device=None,
progress=False,
eta=0.0,
):
"""
Use DDIM to sample from the model and yield intermediate samples from
each timestep of DDIM.
Same usage as p_sample_loop_progressive().
"""
if device is None:
device = next(model.parameters()).device
assert isinstance(shape, (tuple, list))
if noise is not None:
img = noise
else:
img = th.randn(*shape, device=device)
indices = list(range(self.num_timesteps))[::-1]
if progress:
# Lazy import so that we don't depend on tqdm.
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
t = th.tensor([i] * shape[0], device=device)
with th.no_grad():
out = self.ddim_sample(
model,
img,
t,
clip_denoised=clip_denoised,
denoised_fn=denoised_fn,
cond_fn=cond_fn,
model_kwargs=model_kwargs,
eta=eta,
)
yield out
img = out["sample"]
def _vb_terms_bpd(self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None):
"""
Get a term for the variational lower-bound.
The resulting units are bits (rather than nats, as one might expect).
This allows for comparison to other papers.
:return: a dict with the following keys:
- 'output': a shape [N] tensor of NLLs or KLs.
- 'pred_xstart': the x_0 predictions.
"""
true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)
out = self.p_mean_variance(
model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs
)
kl = normal_kl(true_mean, true_log_variance_clipped, out["mean"], out["log_variance"])
kl = mean_flat(kl) / np.log(2.0)
decoder_nll = -discretized_gaussian_log_likelihood(
x_start, means=out["mean"], log_scales=0.5 * out["log_variance"]
)
assert decoder_nll.shape == x_start.shape
decoder_nll = mean_flat(decoder_nll) / np.log(2.0)
# At the first timestep return the decoder NLL,
# otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t))
output = th.where((t == 0), decoder_nll, kl)
return {"output": output, "pred_xstart": out["pred_xstart"]}
def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):
"""
Compute training losses for a single timestep.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param t: a batch of timestep indices.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:param noise: if specified, the specific Gaussian noise to try to remove.
:return: a dict with the key "loss" containing a tensor of shape [N].
Some mean or variance settings may also have other keys.
"""
if model_kwargs is None:
model_kwargs = {}
if noise is None:
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start, t, noise=noise)
terms = {}
if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL:
terms["loss"] = self._vb_terms_bpd(
model=model,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
model_kwargs=model_kwargs,
)["output"]
if self.loss_type == LossType.RESCALED_KL:
terms["loss"] *= self.num_timesteps
elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:
model_output = model(x_t, t, **model_kwargs)
if self.model_var_type in [
ModelVarType.LEARNED,
ModelVarType.LEARNED_RANGE,
]:
B, C = x_t.shape[:2]
assert model_output.shape == (B, C * 2, *x_t.shape[2:])
model_output, model_var_values = th.split(model_output, C, dim=1)
# Learn the variance using the variational bound, but don't let
# it affect our mean prediction.
frozen_out = th.cat([model_output.detach(), model_var_values], dim=1)
terms["vb"] = self._vb_terms_bpd(
model=lambda *args, r=frozen_out: r,
x_start=x_start,
x_t=x_t,
t=t,
clip_denoised=False,
)["output"]
if self.loss_type == LossType.RESCALED_MSE:
# Divide by 1000 for equivalence with initial implementation.
# Without a factor of 1/1000, the VB term hurts the MSE term.
terms["vb"] *= self.num_timesteps / 1000.0
target = {
ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(
x_start=x_start, x_t=x_t, t=t
)[0],
ModelMeanType.START_X: x_start,
ModelMeanType.EPSILON: noise,
}[self.model_mean_type]
assert model_output.shape == target.shape == x_start.shape
terms["mse"] = mean_flat((target - model_output) ** 2)
if "vb" in terms:
terms["loss"] = terms["mse"] + terms["vb"]
else:
terms["loss"] = terms["mse"]
else:
raise NotImplementedError(self.loss_type)
return terms
def _prior_bpd(self, x_start):
"""
Get the prior KL term for the variational lower-bound, measured in
bits-per-dim.
This term can't be optimized, as it only depends on the encoder.
:param x_start: the [N x C x ...] tensor of inputs.
:return: a batch of [N] KL values (in bits), one per batch element.
"""
batch_size = x_start.shape[0]
t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
return mean_flat(kl_prior) / np.log(2.0)
def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):
"""
Compute the entire variational lower-bound, measured in bits-per-dim,
as well as other related quantities.
:param model: the model to evaluate loss on.
:param x_start: the [N x C x ...] tensor of inputs.
:param clip_denoised: if True, clip denoised samples.
:param model_kwargs: if not None, a dict of extra keyword arguments to
pass to the model. This can be used for conditioning.
:return: a dict containing the following keys:
- total_bpd: the total variational lower-bound, per batch element.
- prior_bpd: the prior term in the lower-bound.
- vb: an [N x T] tensor of terms in the lower-bound.
- xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep.
- mse: an [N x T] tensor of epsilon MSEs for each timestep.
"""
device = x_start.device
batch_size = x_start.shape[0]
vb = []
xstart_mse = []
mse = []
for t in list(range(self.num_timesteps))[::-1]:
t_batch = th.tensor([t] * batch_size, device=device)
noise = th.randn_like(x_start)
x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise)
# Calculate VLB term at the current timestep
with th.no_grad():
out = self._vb_terms_bpd(
model,
x_start=x_start,
x_t=x_t,
t=t_batch,
clip_denoised=clip_denoised,
model_kwargs=model_kwargs,
)
vb.append(out["output"])
xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2))
eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"])
mse.append(mean_flat((eps - noise) ** 2))
vb = th.stack(vb, dim=1)
xstart_mse = th.stack(xstart_mse, dim=1)
mse = th.stack(mse, dim=1)
prior_bpd = self._prior_bpd(x_start)
total_bpd = vb.sum(dim=1) + prior_bpd
return {
"total_bpd": total_bpd,
"prior_bpd": prior_bpd,
"vb": vb,
"xstart_mse": xstart_mse,
"mse": mse,
}
def _extract_into_tensor(arr, timesteps, broadcast_shape):
"""
Extract values from a 1-D numpy array for a batch of indices.
:param arr: the 1-D numpy array.
:param timesteps: a tensor of indices into the array to extract.
:param broadcast_shape: a larger shape of K dimensions with the batch
dimension equal to the length of timesteps.
:return: a tensor of shape [batch_size, 1, ...] where the shape has K dims.
"""
res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float()
while len(res.shape) < len(broadcast_shape):
res = res[..., None]
return res + th.zeros(broadcast_shape, device=timesteps.device)
| [] |
2024-01-10 | gohyojun15/ANT_diffusion | diffusion~timestep_sampler.py | # Modified from OpenAI's diffusion repos
# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py
# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion
# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
from abc import ABC, abstractmethod
import numpy as np
import torch as th
import torch.distributed as dist
def create_named_schedule_sampler(name, diffusion):
"""
Create a ScheduleSampler from a library of pre-defined samplers.
:param name: the name of the sampler.
:param diffusion: the diffusion object to sample for.
"""
if name == "uniform":
return UniformSampler(diffusion)
elif name == "loss-second-moment":
return LossSecondMomentResampler(diffusion)
else:
raise NotImplementedError(f"unknown schedule sampler: {name}")
class ScheduleSampler(ABC):
"""
A distribution over timesteps in the diffusion process, intended to reduce
variance of the objective.
By default, samplers perform unbiased importance sampling, in which the
objective's mean is unchanged.
However, subclasses may override sample() to change how the resampled
terms are reweighted, allowing for actual changes in the objective.
"""
@abstractmethod
def weights(self):
"""
Get a numpy array of weights, one per diffusion step.
The weights needn't be normalized, but must be positive.
"""
def sample(self, batch_size, device):
"""
Importance-sample timesteps for a batch.
:param batch_size: the number of timesteps.
:param device: the torch device to save to.
:return: a tuple (timesteps, weights):
- timesteps: a tensor of timestep indices.
- weights: a tensor of weights to scale the resulting losses.
"""
w = self.weights()
p = w / np.sum(w)
indices_np = np.random.choice(len(p), size=(batch_size,), p=p)
indices = th.from_numpy(indices_np).long().to(device)
weights_np = 1 / (len(p) * p[indices_np])
weights = th.from_numpy(weights_np).float().to(device)
return indices, weights
class UniformSampler(ScheduleSampler):
def __init__(self, diffusion):
self.diffusion = diffusion
self._weights = np.ones([diffusion.num_timesteps])
def weights(self):
return self._weights
class LossAwareSampler(ScheduleSampler):
def update_with_local_losses(self, local_ts, local_losses):
"""
Update the reweighting using losses from a model.
Call this method from each rank with a batch of timesteps and the
corresponding losses for each of those timesteps.
This method will perform synchronization to make sure all of the ranks
maintain the exact same reweighting.
:param local_ts: an integer Tensor of timesteps.
:param local_losses: a 1D Tensor of losses.
"""
batch_sizes = [
th.tensor([0], dtype=th.int32, device=local_ts.device)
for _ in range(dist.get_world_size())
]
dist.all_gather(
batch_sizes,
th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device),
)
# Pad all_gather batches to be the maximum batch size.
batch_sizes = [x.item() for x in batch_sizes]
max_bs = max(batch_sizes)
timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes]
loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes]
dist.all_gather(timestep_batches, local_ts)
dist.all_gather(loss_batches, local_losses)
timesteps = [x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs]]
losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]]
self.update_with_all_losses(timesteps, losses)
@abstractmethod
def update_with_all_losses(self, ts, losses):
"""
Update the reweighting using losses from a model.
Sub-classes should override this method to update the reweighting
using losses from the model.
This method directly updates the reweighting without synchronizing
between workers. It is called by update_with_local_losses from all
ranks with identical arguments. Thus, it should have deterministic
behavior to maintain state across workers.
:param ts: a list of int timesteps.
:param losses: a list of float losses, one per timestep.
"""
class LossSecondMomentResampler(LossAwareSampler):
def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):
self.diffusion = diffusion
self.history_per_term = history_per_term
self.uniform_prob = uniform_prob
self._loss_history = np.zeros([diffusion.num_timesteps, history_per_term], dtype=np.float64)
self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)
def weights(self):
if not self._warmed_up():
return np.ones([self.diffusion.num_timesteps], dtype=np.float64)
weights = np.sqrt(np.mean(self._loss_history**2, axis=-1))
weights /= np.sum(weights)
weights *= 1 - self.uniform_prob
weights += self.uniform_prob / len(weights)
return weights
def update_with_all_losses(self, ts, losses):
for t, loss in zip(ts, losses):
if self._loss_counts[t] == self.history_per_term:
# Shift out the oldest loss term.
self._loss_history[t, :-1] = self._loss_history[t, 1:]
self._loss_history[t, -1] = loss
else:
self._loss_history[t, self._loss_counts[t]] = loss
self._loss_counts[t] += 1
def _warmed_up(self):
return (self._loss_counts == self.history_per_term).all()
| [] |
2024-01-10 | gohyojun15/ANT_diffusion | diffusion~diffusion_utils.py | # Modified from OpenAI's diffusion repos
# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py
# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion
# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
import torch as th
import numpy as np
def normal_kl(mean1, logvar1, mean2, logvar2):
"""
Compute the KL divergence between two gaussians.
Shapes are automatically broadcasted, so batches can be compared to
scalars, among other use cases.
"""
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, th.Tensor):
tensor = obj
break
assert tensor is not None, "at least one argument must be a Tensor"
# Force variances to be Tensors. Broadcasting helps convert scalars to
# Tensors, but it does not work for th.exp().
logvar1, logvar2 = [
x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor) for x in (logvar1, logvar2)
]
return 0.5 * (
-1.0
+ logvar2
- logvar1
+ th.exp(logvar1 - logvar2)
+ ((mean1 - mean2) ** 2) * th.exp(-logvar2)
)
def approx_standard_normal_cdf(x):
"""
A fast approximation of the cumulative distribution function of the
standard normal.
"""
return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3))))
def continuous_gaussian_log_likelihood(x, *, means, log_scales):
"""
Compute the log-likelihood of a continuous Gaussian distribution.
:param x: the targets
:param means: the Gaussian mean Tensor.
:param log_scales: the Gaussian log stddev Tensor.
:return: a tensor like x of log probabilities (in nats).
"""
centered_x = x - means
inv_stdv = th.exp(-log_scales)
normalized_x = centered_x * inv_stdv
log_probs = th.distributions.Normal(th.zeros_like(x), th.ones_like(x)).log_prob(normalized_x)
return log_probs
def discretized_gaussian_log_likelihood(x, *, means, log_scales):
"""
Compute the log-likelihood of a Gaussian distribution discretizing to a
given image.
:param x: the target images. It is assumed that this was uint8 values,
rescaled to the range [-1, 1].
:param means: the Gaussian mean Tensor.
:param log_scales: the Gaussian log stddev Tensor.
:return: a tensor like x of log probabilities (in nats).
"""
assert x.shape == means.shape == log_scales.shape
centered_x = x - means
inv_stdv = th.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1.0 / 255.0)
cdf_plus = approx_standard_normal_cdf(plus_in)
min_in = inv_stdv * (centered_x - 1.0 / 255.0)
cdf_min = approx_standard_normal_cdf(min_in)
log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12))
log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12))
cdf_delta = cdf_plus - cdf_min
log_probs = th.where(
x < -0.999,
log_cdf_plus,
th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))),
)
assert log_probs.shape == x.shape
return log_probs
| [] |
2024-01-10 | gohyojun15/ANT_diffusion | diffusion~respace.py | # Modified from OpenAI's diffusion repos
# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py
# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion
# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
import numpy as np
import torch as th
from .gaussian_diffusion import GaussianDiffusion
def space_timesteps(num_timesteps, section_counts):
"""
Create a list of timesteps to use from an original diffusion process,
given the number of timesteps we want to take from equally-sized portions
of the original process.
For example, if there's 300 timesteps and the section counts are [10,15,20]
then the first 100 timesteps are strided to be 10 timesteps, the second 100
are strided to be 15 timesteps, and the final 100 are strided to be 20.
If the stride is a string starting with "ddim", then the fixed striding
from the DDIM paper is used, and only one section is allowed.
:param num_timesteps: the number of diffusion steps in the original
process to divide up.
:param section_counts: either a list of numbers, or a string containing
comma-separated numbers, indicating the step count
per section. As a special case, use "ddimN" where N
is a number of steps to use the striding from the
DDIM paper.
:return: a set of diffusion steps from the original process to use.
"""
if isinstance(section_counts, str):
if section_counts.startswith("ddim"):
desired_count = int(section_counts[len("ddim") :])
for i in range(1, num_timesteps):
if len(range(0, num_timesteps, i)) == desired_count:
return set(range(0, num_timesteps, i))
raise ValueError(f"cannot create exactly {num_timesteps} steps with an integer stride")
section_counts = [int(x) for x in section_counts.split(",")]
size_per = num_timesteps // len(section_counts)
extra = num_timesteps % len(section_counts)
start_idx = 0
all_steps = []
for i, section_count in enumerate(section_counts):
size = size_per + (1 if i < extra else 0)
if size < section_count:
raise ValueError(f"cannot divide section of {size} steps into {section_count}")
if section_count <= 1:
frac_stride = 1
else:
frac_stride = (size - 1) / (section_count - 1)
cur_idx = 0.0
taken_steps = []
for _ in range(section_count):
taken_steps.append(start_idx + round(cur_idx))
cur_idx += frac_stride
all_steps += taken_steps
start_idx += size
return set(all_steps)
class SpacedDiffusion(GaussianDiffusion):
"""
A diffusion process which can skip steps in a base diffusion process.
:param use_timesteps: a collection (sequence or set) of timesteps from the
original diffusion process to retain.
:param kwargs: the kwargs to create the base diffusion process.
"""
def __init__(self, use_timesteps, **kwargs):
self.use_timesteps = set(use_timesteps)
self.timestep_map = []
self.original_num_steps = len(kwargs["betas"])
base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa
last_alpha_cumprod = 1.0
new_betas = []
for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):
if i in self.use_timesteps:
new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)
last_alpha_cumprod = alpha_cumprod
self.timestep_map.append(i)
kwargs["betas"] = np.array(new_betas)
super().__init__(**kwargs)
def p_mean_variance(self, model, *args, **kwargs): # pylint: disable=signature-differs
return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)
def training_losses(self, model, *args, **kwargs): # pylint: disable=signature-differs
return super().training_losses(self._wrap_model(model), *args, **kwargs)
def condition_mean(self, cond_fn, *args, **kwargs):
return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)
def condition_score(self, cond_fn, *args, **kwargs):
return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)
def _wrap_model(self, model):
if isinstance(model, _WrappedModel):
return model
return _WrappedModel(model, self.timestep_map, self.original_num_steps)
def _scale_timesteps(self, t):
# Scaling is done by the wrapped model.
return t
class _WrappedModel:
def __init__(self, model, timestep_map, original_num_steps):
self.model = model
self.timestep_map = timestep_map
# self.rescale_timesteps = rescale_timesteps
self.original_num_steps = original_num_steps
def __call__(self, x, ts, **kwargs):
map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype)
new_ts = map_tensor[ts]
# if self.rescale_timesteps:
# new_ts = new_ts.float() * (1000.0 / self.original_num_steps)
return self.model(x, new_ts, **kwargs)
| [] |
2024-01-10 | standardgalactic/bonsai-gym | samples~gym-tapecopy-sample~tapecopy_simulator.py | import sys
import logging
from microsoft_bonsai_api.simulator.client import BonsaiClientConfig
from bonsai_gym import GymSimulator3
log = logging.getLogger("gym_simulator")
log.setLevel(logging.DEBUG)
class TapeCopy(GymSimulator3):
# Environment name, from openai-gym
environment_name = "Copy-v0"
simulator_name = "tapecopy_simulator"
# convert openai gym observation to our state type
def gym_to_state(self, observation):
state = {"character": observation}
return state
# convert our action type into openai gym action
def action_to_gym(self, actions):
return [actions["move"], actions["write"], actions["char"]]
if __name__ == "__main__":
# create a brain, openai-gym environment, and simulator
config = BonsaiClientConfig(argv=sys.argv)
sim = TapeCopy(config)
sim.run_gym()
| [] |
2024-01-10 | standardgalactic/bonsai-gym | samples~gym-taxi-sample~taxi_simulator.py | import sys
import logging
from microsoft_bonsai_api.simulator.client import BonsaiClientConfig
from bonsai_gym import GymSimulator3
log = logging.getLogger("gym_simulator")
log.setLevel(logging.DEBUG)
class Taxi(GymSimulator3):
# Environment name, from openai-gym
environment_name = "Taxi-v2"
# simulator name from Inkling
simulator_name = "taxi_simulator"
# convert openai gym observation to our state type
def gym_to_state(self, observation):
state = {"location": int(observation)}
return state
# convert our action type into openai gym action
def action_to_gym(self, action):
return action["command"]
if __name__ == "__main__":
# create a brain, openai-gym environment, and simulator
config = BonsaiClientConfig(argv=sys.argv)
sim = Taxi(config, iteration_limit=200)
sim.run_gym()
| [] |
2024-01-10 | cho4/docuMind | backend~processing.py | from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.text_splitter import CharacterTextSplitter
import sqlite3
import datetime
import pickle
import os
# Validates user signup, checks existing usernames
def signup_user(username, password):
conn = sqlite3.connect("pagetalk.db")
cur = conn.cursor()
cur.execute('SELECT * FROM Authentication WHERE Username=?', (username,))
result = cur.fetchone()
if result is None:
cur.execute('INSERT INTO Authentication VALUES (?, ?)', (username, password))
conn.commit()
conn.close()
return True
else:
conn.close()
return False
# Validates user login, checks match between username and password
def validate_user(username, password):
conn = sqlite3.connect("pagetalk.db")
cur = conn.cursor()
cur.execute('SELECT * FROM Authentication WHERE Username=? AND Password=?', (username, password))
if cur.fetchone() is None:
conn.close()
return False
else:
conn.close()
return True
# Runs query through the similarity search and question answering chain
def get_reply(query, conversation, username):
conn = sqlite3.connect("pagetalk.db")
cur = conn.cursor()
cur.execute('SELECT * FROM Chats WHERE Username=? AND Title=?', (username, conversation))
results = cur.fetchone()
chain = pickle.loads(results[3])
db = pickle.loads(results[2])
docs = db.similarity_search(query)
reply = chain.run(input_documents=docs, question=query)
timestamp = int(datetime.datetime.now().strftime("%Y%m%d%H%M%S"))
cur.execute('INSERT INTO Messages VALUES (?, ?, ?, ?, ?)', (timestamp, query, conversation, username, "user"))
cur.execute('INSERT INTO Messages VALUES (?, ?, ?, ?, ?)', (timestamp+1, reply, conversation, username, "bot"))
conn.commit()
conn.close()
return reply
# Stores text, and all relevant information in the database
def store_text(pdf_reader, title, username):
text = get_text(pdf_reader) # Retrieves the raw text from the pdf
chunks = chunk_text(text) # Separates the text into chunks
embeddings = OpenAIEmbeddings()
db = FAISS.from_texts(chunks, embeddings)
chain = load_qa_chain(OpenAI(), chain_type="stuff")
db_serialized = pickle.dumps(db)
chain_serialized = pickle.dumps(chain)
conn = sqlite3.connect("pagetalk.db")
cur = conn.cursor()
cur.execute('INSERT INTO Chats VALUES(?, ?, ?, ?)', (title, username, db_serialized, chain_serialized))
conn.commit()
conn.close()
# Separates text into chunks for token limit
def chunk_text(text):
text_splitter = CharacterTextSplitter(
separator = '\n',
chunk_size = 2000,
chunk_overlap = 200,
length_function = len,
)
chunks = text_splitter.split_text(text)
return chunks
# Retrieves raw text from pdf
def get_text(pdf_reader):
raw_text = ''
for page in pdf_reader.pages:
text = page.extract_text()
if text:
raw_text += text
return raw_text
def get_chats(username, title):
conn = sqlite3.connect("pagetalk.db")
cur = conn.cursor()
cur.execute("SELECT Message, Sender FROM Messages WHERE Username=? AND Title=? ORDER BY Timestamp", (username, title))
values = cur.fetchall()
conn.close()
messages = []
for msg in values:
messages.append({"message": msg[0], "sender": msg[1]})
return messages | [] |
2024-01-10 | mattzcarey/mini-quivr-demo | demo~prompts~system_prompt.py | from langchain.prompts import PromptTemplate
template = """You are Mini Quivr, a friendly chatbot and personal assistant. Answer questions from the user to the best of your ability. If you don't know the answer, just say that you don't know, don't try to make up an answer: {question}
Answer:"""
def build_prompt():
prompt = PromptTemplate(template=template, input_variables=["question"])
return prompt | [
"question",
"You are Mini Quivr, a friendly chatbot and personal assistant. Answer questions from the user to the best of your ability. If you don't know the answer, just say that you don't know, don't try to make up an answer: {question}\n\nAnswer:",
"t know the answer, just say that you don"
] |
2024-01-10 | mattzcarey/mini-quivr-demo | demo~llm~handlers.py | """These help with streaming and showing the state of the model. They are quite finicky so I recommend you don't change them."""
import os
import streamlit as st
from langchain.callbacks.base import BaseCallbackHandler
class StreamHandler(BaseCallbackHandler):
def __init__(self, container: st.delta_generator.DeltaGenerator, initial_text: str = ""):
self.container = container
self.text = initial_text
self.run_id_ignore_token = None
def on_llm_start(self, serialized: dict, prompts: list, **kwargs):
# Workaround to prevent showing the rephrased question as output
if prompts[0].startswith("Human"):
self.run_id_ignore_token = kwargs.get("run_id")
def on_llm_new_token(self, token: str, **kwargs) -> None:
if self.run_id_ignore_token == kwargs.get("run_id", False):
return
self.text += token
self.container.markdown(self.text)
class PrintRetrievalHandler(BaseCallbackHandler):
def __init__(self, container):
self.status = container.status("**Context Retrieval**")
def on_retriever_start(self, serialized: dict, query: str, **kwargs):
self.status.write(f"**Question:** {query}")
self.status.update(label=f"**Context Retrieval:** {query}")
def on_retriever_end(self, documents, **kwargs):
for idx, doc in enumerate(documents):
source = os.path.basename(doc.metadata["source"])
self.status.write(f"**Document {idx} from {source}**")
self.status.markdown(doc.page_content)
self.status.update(state="complete") | [] |
2024-01-10 | mattzcarey/mini-quivr-demo | demo~llm~retriever.py | import os
import tempfile
import streamlit as st
from langchain.document_loaders import PyPDFLoader
from langchain.embeddings import BedrockEmbeddings, OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from demo.constants.settings import SETTINGS
# Embeddings
embeddings = OpenAIEmbeddings(openai_api_key=SETTINGS.openai_api_key.get_secret_value())
# embeddings = BedrockEmbeddings(region_name="us-east-1") # type: ignore
@st.cache_resource(ttl="1h")
def configure_retriever(uploaded_files):
# Read documents
docs = []
temp_dir = tempfile.TemporaryDirectory()
for file in uploaded_files:
temp_filepath = os.path.join(temp_dir.name, file.name)
with open(temp_filepath, "wb") as f:
f.write(file.getvalue())
loader = PyPDFLoader(temp_filepath)
docs.extend(loader.load())
# Split documents
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1500, chunk_overlap=200)
splits = text_splitter.split_documents(docs)
# Store in vectordb
vectordb = Chroma.from_documents(splits, embeddings)
# Define retriever
retriever = vectordb.as_retriever()
return retriever | [] |
2024-01-10 | FedML-AI/FedML | python~examples~deploy~complex_example~src~main_entry.py | import os
from fedml.serving import FedMLPredictor
from fedml.serving import FedMLInferenceRunner
from langchain import PromptTemplate, LLMChain
from langchain.llms import HuggingFacePipeline
import torch
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
TextGenerationPipeline,
)
class Chatbot(FedMLPredictor): # Inherit FedMLClientPredictor
def __init__(self):
super().__init__()
PROMPT_FOR_GENERATION_FORMAT = f""""Below is an instruction that describes a task. Write a response that appropriately completes the request."
### Instruction:
{{instruction}}
### Response:
"""
prompt = PromptTemplate(
input_variables=["instruction"],
template=PROMPT_FOR_GENERATION_FORMAT
)
config = AutoConfig.from_pretrained("EleutherAI/pythia-70m")
model = AutoModelForCausalLM.from_pretrained(
"EleutherAI/pythia-70m",
torch_dtype=torch.float32, # float 16 not supported on CPU
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/pythia-70m", device_map="auto")
hf_pipeline = HuggingFacePipeline(
pipeline=TextGenerationPipeline(
model=model,
tokenizer=tokenizer,
return_full_text=True,
task="text-generation",
do_sample=True,
max_new_tokens=256,
top_p=0.92,
top_k=0
)
)
self.chatbot = LLMChain(llm=hf_pipeline, prompt=prompt, verbose=True)
def predict(self, request:dict):
input_dict = request
question: str = input_dict.get("text", "").strip()
if len(question) == 0:
response_text = "<received empty input; no response generated.>"
else:
response_text = self.chatbot.predict(instruction=question)
return {"generated_text": str(response_text)}
if __name__ == "__main__":
print("Program starts...")
# Parse arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=50051, help="port number")
args = parser.parse_args()
print(f"args.batch_size: {args.batch_size}")
# Parse environment variables
local_rank = int(os.environ.get("LOCAL_RANK", 100))
print(f"local rank: {local_rank}")
chatbot = Chatbot()
fedml_inference_runner = FedMLInferenceRunner(chatbot)
fedml_inference_runner.run() | [
"\"Below is an instruction that describes a task. Write a response that appropriately completes the request.\"\n\n ### Instruction:\n {instruction}\n\n ### Response:\n ",
"Below is an instruction that describes a task. Write a response that appropriately completes the request.",
"instruction"
] |
2024-01-10 | FedML-AI/FedML | python~fedml~serving~templates~hf_template~src~main_entry.py | import os
from fedml.serving import FedMLPredictor
from fedml.serving import FedMLInferenceRunner
from langchain import PromptTemplate, LLMChain
from langchain.llms import HuggingFacePipeline
import torch
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
TextGenerationPipeline,
)
class Chatbot(FedMLPredictor): # Inherit FedMLClientPredictor
def __init__(self):
super().__init__()
PROMPT_FOR_GENERATION_FORMAT = f""""Below is an instruction that describes a task. Write a response that appropriately completes the request."
### Instruction:
{{instruction}}
### Response:
"""
prompt = PromptTemplate(
input_variables=["instruction"],
template=PROMPT_FOR_GENERATION_FORMAT
)
config = AutoConfig.from_pretrained(os.getenv("hf_model_name", "EleutherAI/pythia-70m"))
model = AutoModelForCausalLM.from_pretrained(
"EleutherAI/pythia-70m",
torch_dtype=torch.float32, # float 16 not supported on CPU
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained(
os.getenv("hf_model_name", "EleutherAI/pythia-70m"), device_map="auto")
hf_pipeline = HuggingFacePipeline(
pipeline=TextGenerationPipeline(
model=model,
tokenizer=tokenizer,
return_full_text=True,
task="text-generation",
do_sample=True,
max_new_tokens=256,
top_p=0.92,
top_k=0
)
)
self.chatbot = LLMChain(llm=hf_pipeline, prompt=prompt, verbose=True)
def predict(self, request:dict):
input_dict = request
question: str = input_dict.get("text", "").strip()
if len(question) == 0:
response_text = "<received empty input; no response generated.>"
else:
response_text = self.chatbot.predict(instruction=question)
return {"generated_text": str(response_text)}
if __name__ == "__main__":
chatbot = Chatbot()
fedml_inference_runner = FedMLInferenceRunner(chatbot)
fedml_inference_runner.run() | [
"\"Below is an instruction that describes a task. Write a response that appropriately completes the request.\"\n\n ### Instruction:\n {instruction}\n\n ### Response:\n ",
"Below is an instruction that describes a task. Write a response that appropriately completes the request.",
"instruction"
] |
2024-01-10 | FedML-AI/FedML | python~examples~deploy~quick_start~src~main_entry.py | import os
from fedml.serving import FedMLPredictor
from fedml.serving import FedMLInferenceRunner
from langchain import PromptTemplate, LLMChain
from langchain.llms import HuggingFacePipeline
import torch
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
TextGenerationPipeline,
)
class Chatbot(FedMLPredictor): # Inherit FedMLClientPredictor
def __init__(self):
super().__init__()
PROMPT_FOR_GENERATION_FORMAT = f""""Below is an instruction that describes a task. Write a response that appropriately completes the request."
### Instruction:
{{instruction}}
### Response:
"""
prompt = PromptTemplate(
input_variables=["instruction"],
template=PROMPT_FOR_GENERATION_FORMAT
)
config = AutoConfig.from_pretrained("EleutherAI/pythia-70m")
model = AutoModelForCausalLM.from_pretrained(
"EleutherAI/pythia-70m",
torch_dtype=torch.float32, # float 16 not supported on CPU
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/pythia-70m", device_map="auto")
hf_pipeline = HuggingFacePipeline(
pipeline=TextGenerationPipeline(
model=model,
tokenizer=tokenizer,
return_full_text=True,
task="text-generation",
do_sample=True,
max_new_tokens=256,
top_p=0.92,
top_k=0
)
)
self.chatbot = LLMChain(llm=hf_pipeline, prompt=prompt, verbose=True)
def predict(self, request:dict):
input_dict = request
question: str = input_dict.get("text", "").strip()
if len(question) == 0:
response_text = "<received empty input; no response generated.>"
else:
response_text = self.chatbot.predict(instruction=question)
return {"generated_text": str(response_text)}
if __name__ == "__main__":
chatbot = Chatbot()
fedml_inference_runner = FedMLInferenceRunner(chatbot)
fedml_inference_runner.run() | [
"\"Below is an instruction that describes a task. Write a response that appropriately completes the request.\"\n\n ### Instruction:\n {instruction}\n\n ### Response:\n ",
"Below is an instruction that describes a task. Write a response that appropriately completes the request.",
"instruction"
] |
2024-01-10 | FedML-AI/FedML | python~examples~deploy~your_own_llm~src~main_entry.py | import os
from fedml.serving import FedMLPredictor
from fedml.serving import FedMLInferenceRunner
from langchain import PromptTemplate, LLMChain
from langchain.llms import HuggingFacePipeline
import torch
from transformers import (
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
TextGenerationPipeline,
)
# DATA_CACHE_DIR is a LOCAL folder that contains the model and config files if
# you do NOT want to transfer the model and config files to MLOps
# Not to also metion DATA_CACHE_DIR in the fedml_model_config.yaml
DATA_CACHE_DIR = "~/.fedml/fedml_serving/model_and_config"
DATA_CACHE_DIR = os.path.expanduser(DATA_CACHE_DIR) # Use absolute path
class Chatbot(FedMLPredictor): # Inherit FedMLClientPredictor
def __init__(self):
super().__init__()
PROMPT_FOR_GENERATION_FORMAT = f""""Below is an instruction that describes a task. Write a response that appropriately completes the request."
### Instruction:
{{instruction}}
### Response:
"""
prompt = PromptTemplate(
input_variables=["instruction"],
template=PROMPT_FOR_GENERATION_FORMAT
)
config = AutoConfig.from_pretrained(DATA_CACHE_DIR)
model = AutoModelForCausalLM.from_pretrained(
DATA_CACHE_DIR,
torch_dtype=torch.float32, # float 16 not supported on CPU
trust_remote_code=True,
device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained(DATA_CACHE_DIR, device_map="auto")
hf_pipeline = HuggingFacePipeline(
pipeline=TextGenerationPipeline(
model=model,
tokenizer=tokenizer,
return_full_text=True,
task="text-generation",
do_sample=True,
max_new_tokens=256,
top_p=0.92,
top_k=0
)
)
self.chatbot = LLMChain(llm=hf_pipeline, prompt=prompt, verbose=True)
def predict(self, request:dict):
input_dict = request
question: str = input_dict.get("text", "").strip()
if len(question) == 0:
response_text = "<received empty input; no response generated.>"
else:
response_text = self.chatbot.predict(instruction=question)
return {"generated_text": str(response_text)}
if __name__ == "__main__":
chatbot = Chatbot()
fedml_inference_runner = FedMLInferenceRunner(chatbot)
fedml_inference_runner.run() | [
"\"Below is an instruction that describes a task. Write a response that appropriately completes the request.\"\n\n ### Instruction:\n {instruction}\n\n ### Response:\n ",
"Below is an instruction that describes a task. Write a response that appropriately completes the request.",
"instruction"
] |
2024-01-10 | cohere-ai/sandbox-grounded-qa | qa~model.py | # Copyright (c) 2022 Cohere Inc. and its affiliates.
#
# Licensed under the MIT License (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License in the LICENSE file at the top
# level of this repository.
import os
import cohere
import numpy as np
from cohere.classify import Example
from qa.util import pretty_print
_DATA_DIRNAME = os.path.join(os.path.dirname(__file__), "prompt_data")
def get_contextual_search_query(history, co, model="xlarge", verbosity=0):
"""Adds message history context to user query."""
prompt_path = os.path.join(_DATA_DIRNAME, "get_contextual_search_query.prompt")
with open(prompt_path) as f:
prompt = f.read() + f"{history}\n-"
prediction = co.generate(
model=model,
prompt=prompt,
max_tokens=50,
temperature=0.75,
k=0,
p=0.75,
frequency_penalty=0,
presence_penalty=0,
stop_sequences=["\n"],
return_likelihoods="GENERATION",
num_generations=4,
)
likelihood = [g.likelihood for g in prediction.generations]
result = prediction.generations[np.argmax(likelihood)].text
if verbosity:
pretty_print("OKGREEN", "contextual question prompt: " + prompt)
pretty_print("OKCYAN", "contextual question: " + result)
return result.strip()
def get_sample_answer(question, co, model="xlarge"):
"""Return a sample answer to a question based on the model's training data."""
prompt_path = os.path.join(_DATA_DIRNAME, "get_sample_answer.prompt")
with open(prompt_path) as f:
prompt = f.read() + f"{question}\nAnswer:"
response = co.generate(model=model,
prompt=prompt,
max_tokens=50,
temperature=0.8,
k=0,
p=0.7,
stop_sequences=["--"])
return response.generations[0].text
| [
"get_contextual_search_query.prompt",
"get_sample_answer.prompt",
"PLACEHOLDER\n-",
"PLACEHOLDER\nAnswer:"
] |
2024-01-10 | cohere-ai/sandbox-grounded-qa | qa~bot.py | # Copyright (c) 2022 Cohere Inc. and its affiliates.
#
# Licensed under the MIT License (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License in the LICENSE file at the top
# level of this repository.
from sys import settrace
import cohere
from qa.answer import answer_with_search
from qa.model import get_contextual_search_query
from qa.util import pretty_print
class GroundedQaBot():
"""A class yielding Grounded question-answering conversational agents."""
def __init__(self, cohere_api_key, serp_api_key):
self._cohere_api_key = cohere_api_key
self._serp_api_key = serp_api_key
self._chat_history = []
self._co = cohere.Client(self._cohere_api_key)
@property
def chat_history(self):
return self._chat_history
def set_chat_history(self, chat_history):
self._chat_history = chat_history
def answer(self, question, verbosity=0, n_paragraphs=1, url=None, model='xlarge'):
"""Answer a question, based on recent conversational history."""
self.chat_history.append("user: " + question)
history = "\n".join(self.chat_history[-6:])
question = get_contextual_search_query(history, self._co, verbosity=verbosity)
answer_text, source_urls, source_texts = answer_with_search(question,
self._co,
self._serp_api_key,
verbosity=verbosity,
url=url,
model=model,
n_paragraphs=n_paragraphs)
self._chat_history.append("bot: " + answer_text)
if not source_texts or "".join(source_texts).isspace():
reply = ("Sorry, I could not find any relevant information for that "
"question.")
elif answer_text.strip() == question.strip():
reply = ("I had trouble answering the question, but maybe this link on "
"the right will help.")
else:
reply = f"{answer_text}"
return (reply, source_urls, source_texts)
| [] |
2024-01-10 | mgoin/langchain | libs~experimental~langchain_experimental~comprehend_moderation~pii.py | import asyncio
from typing import Any, Dict, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
class ComprehendPII:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "PII",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def validate(self, prompt_value: str, config: Any = None) -> str:
redact = config.get("redact")
return (
self._detect_pii(prompt_value=prompt_value, config=config)
if redact
else self._contains_pii(prompt_value=prompt_value, config=config)
)
def _contains_pii(self, prompt_value: str, config: Any = None) -> str:
"""
Checks for Personally Identifiable Information (PII) labels above a
specified threshold. Uses Amazon Comprehend Contains PII Entities API. See -
https://docs.aws.amazon.com/comprehend/latest/APIReference/API_ContainsPiiEntities.html
Args:
prompt_value (str): The input text to be checked for PII labels.
config (Dict[str, Any]): Configuration for PII check and actions.
Returns:
str: the original prompt
Note:
- The provided client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.contains_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
threshold = config.get("threshold")
pii_labels = config.get("labels")
pii_found = False
for entity in pii_identified["Labels"]:
if (entity["Score"] >= threshold and entity["Name"] in pii_labels) or (
entity["Score"] >= threshold and not pii_labels
):
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
return prompt_value
def _detect_pii(self, prompt_value: str, config: Optional[Dict[str, Any]]) -> str:
"""
Detects and handles Personally Identifiable Information (PII) entities in the
given prompt text using Amazon Comprehend's detect_pii_entities API. The
function provides options to redact or stop processing based on the identified
PII entities and a provided configuration. Uses Amazon Comprehend Detect PII
Entities API.
Args:
prompt_value (str): The input text to be checked for PII entities.
config (Dict[str, Any]): A configuration specifying how to handle
PII entities.
Returns:
str: The processed prompt text with redacted PII entities or raised
exceptions.
Raises:
ValueError: If the prompt contains configured PII entities for
stopping processing.
Note:
- If PII is not found in the prompt, the original prompt is returned.
- The client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.detect_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
if (pii_identified["Entities"]) == []:
if self.callback and self.callback.pii_callback:
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
pii_found = False
if not config and pii_identified["Entities"]:
for entity in pii_identified["Entities"]:
if entity["Score"] >= 0.5:
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
else:
threshold = config.get("threshold") # type: ignore
pii_labels = config.get("labels") # type: ignore
mask_marker = config.get("mask_character") # type: ignore
pii_found = False
for entity in pii_identified["Entities"]:
if (
pii_labels
and entity["Type"] in pii_labels
and entity["Score"] >= threshold
) or (not pii_labels and entity["Score"] >= threshold):
pii_found = True
char_offset_begin = entity["BeginOffset"]
char_offset_end = entity["EndOffset"]
mask_length = char_offset_end - char_offset_begin + 1
masked_part = mask_marker * mask_length
prompt_value = (
prompt_value[:char_offset_begin]
+ masked_part
+ prompt_value[char_offset_end + 1 :]
)
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
| [] |
2024-01-10 | mgoin/langchain | libs~langchain~langchain~vectorstores~pgembedding.py | from __future__ import annotations
import logging
import uuid
from typing import Any, Dict, Iterable, List, Optional, Tuple, Type
import sqlalchemy
from sqlalchemy import func
from sqlalchemy.dialects.postgresql import JSON, UUID
from sqlalchemy.orm import Session, declarative_base, relationship
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.base import VectorStore
Base = declarative_base() # type: Any
ADA_TOKEN_COUNT = 1536
_LANGCHAIN_DEFAULT_COLLECTION_NAME = "langchain"
class BaseModel(Base):
"""Base model for all SQL stores."""
__abstract__ = True
uuid = sqlalchemy.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
class CollectionStore(BaseModel):
"""Collection store."""
__tablename__ = "langchain_pg_collection"
name = sqlalchemy.Column(sqlalchemy.String)
cmetadata = sqlalchemy.Column(JSON)
embeddings = relationship(
"EmbeddingStore",
back_populates="collection",
passive_deletes=True,
)
@classmethod
def get_by_name(cls, session: Session, name: str) -> Optional["CollectionStore"]:
return session.query(cls).filter(cls.name == name).first() # type: ignore
@classmethod
def get_or_create(
cls,
session: Session,
name: str,
cmetadata: Optional[dict] = None,
) -> Tuple["CollectionStore", bool]:
"""
Get or create a collection.
Returns [Collection, bool] where the bool is True if the collection was created.
"""
created = False
collection = cls.get_by_name(session, name)
if collection:
return collection, created
collection = cls(name=name, cmetadata=cmetadata)
session.add(collection)
session.commit()
created = True
return collection, created
class EmbeddingStore(BaseModel):
"""Embedding store."""
__tablename__ = "langchain_pg_embedding"
collection_id = sqlalchemy.Column(
UUID(as_uuid=True),
sqlalchemy.ForeignKey(
f"{CollectionStore.__tablename__}.uuid",
ondelete="CASCADE",
),
)
collection = relationship(CollectionStore, back_populates="embeddings")
embedding = sqlalchemy.Column(sqlalchemy.ARRAY(sqlalchemy.REAL)) # type: ignore
document = sqlalchemy.Column(sqlalchemy.String, nullable=True)
cmetadata = sqlalchemy.Column(JSON, nullable=True)
# custom_id : any user defined id
custom_id = sqlalchemy.Column(sqlalchemy.String, nullable=True)
class QueryResult:
"""Result from a query."""
EmbeddingStore: EmbeddingStore
distance: float
class PGEmbedding(VectorStore):
"""`Postgres` with the `pg_embedding` extension as a vector store.
pg_embedding uses sequential scan by default. but you can create a HNSW index
using the create_hnsw_index method.
- `connection_string` is a postgres connection string.
- `embedding_function` any embedding function implementing
`langchain.embeddings.base.Embeddings` interface.
- `collection_name` is the name of the collection to use. (default: langchain)
- NOTE: This is not the name of the table, but the name of the collection.
The tables will be created when initializing the store (if not exists)
So, make sure the user has the right permissions to create tables.
- `distance_strategy` is the distance strategy to use. (default: EUCLIDEAN)
- `EUCLIDEAN` is the euclidean distance.
- `pre_delete_collection` if True, will delete the collection if it exists.
(default: False)
- Useful for testing.
"""
def __init__(
self,
connection_string: str,
embedding_function: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
collection_metadata: Optional[dict] = None,
pre_delete_collection: bool = False,
logger: Optional[logging.Logger] = None,
) -> None:
self.connection_string = connection_string
self.embedding_function = embedding_function
self.collection_name = collection_name
self.collection_metadata = collection_metadata
self.pre_delete_collection = pre_delete_collection
self.logger = logger or logging.getLogger(__name__)
self.__post_init__()
def __post_init__(
self,
) -> None:
self._conn = self.connect()
self.create_hnsw_extension()
self.create_tables_if_not_exists()
self.create_collection()
@property
def embeddings(self) -> Embeddings:
return self.embedding_function
def connect(self) -> sqlalchemy.engine.Connection:
engine = sqlalchemy.create_engine(self.connection_string)
conn = engine.connect()
return conn
def create_hnsw_extension(self) -> None:
try:
with Session(self._conn) as session:
statement = sqlalchemy.text("CREATE EXTENSION IF NOT EXISTS embedding")
session.execute(statement)
session.commit()
except Exception as e:
self.logger.exception(e)
def create_tables_if_not_exists(self) -> None:
with self._conn.begin():
Base.metadata.create_all(self._conn)
def drop_tables(self) -> None:
with self._conn.begin():
Base.metadata.drop_all(self._conn)
def create_collection(self) -> None:
if self.pre_delete_collection:
self.delete_collection()
with Session(self._conn) as session:
CollectionStore.get_or_create(
session, self.collection_name, cmetadata=self.collection_metadata
)
def create_hnsw_index(
self,
max_elements: int = 10000,
dims: int = ADA_TOKEN_COUNT,
m: int = 8,
ef_construction: int = 16,
ef_search: int = 16,
) -> None:
create_index_query = sqlalchemy.text(
"CREATE INDEX IF NOT EXISTS langchain_pg_embedding_idx "
"ON langchain_pg_embedding USING hnsw (embedding) "
"WITH ("
"maxelements = {}, "
"dims = {}, "
"m = {}, "
"efconstruction = {}, "
"efsearch = {}"
");".format(max_elements, dims, m, ef_construction, ef_search)
)
# Execute the queries
try:
with Session(self._conn) as session:
# Create the HNSW index
session.execute(create_index_query)
session.commit()
print("HNSW extension and index created successfully.")
except Exception as e:
print(f"Failed to create HNSW extension or index: {e}")
def delete_collection(self) -> None:
self.logger.debug("Trying to delete collection")
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
self.logger.warning("Collection not found")
return
session.delete(collection)
session.commit()
def get_collection(self, session: Session) -> Optional["CollectionStore"]:
return CollectionStore.get_by_name(session, self.collection_name)
@classmethod
def _initialize_from_embeddings(
cls,
texts: List[str],
embeddings: List[List[float]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGEmbedding:
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
if not metadatas:
metadatas = [{} for _ in texts]
connection_string = cls.get_connection_string(kwargs)
store = cls(
connection_string=connection_string,
collection_name=collection_name,
embedding_function=embedding,
pre_delete_collection=pre_delete_collection,
)
store.add_embeddings(
texts=texts, embeddings=embeddings, metadatas=metadatas, ids=ids, **kwargs
)
return store
def add_embeddings(
self,
texts: List[str],
embeddings: List[List[float]],
metadatas: List[dict],
ids: List[str],
**kwargs: Any,
) -> None:
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError("Collection not found")
for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids):
embedding_store = EmbeddingStore(
embedding=embedding,
document=text,
cmetadata=metadata,
custom_id=id,
)
collection.embeddings.append(embedding_store)
session.add(embedding_store)
session.commit()
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
embeddings = self.embedding_function.embed_documents(list(texts))
if not metadatas:
metadatas = [{} for _ in texts]
with Session(self._conn) as session:
collection = self.get_collection(session)
if not collection:
raise ValueError("Collection not found")
for text, metadata, embedding, id in zip(texts, metadatas, embeddings, ids):
embedding_store = EmbeddingStore(
embedding=embedding,
document=text,
cmetadata=metadata,
custom_id=id,
)
collection.embeddings.append(embedding_store)
session.add(embedding_store)
session.commit()
return ids
def similarity_search(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
embedding = self.embedding_function.embed_query(text=query)
return self.similarity_search_by_vector(
embedding=embedding,
k=k,
filter=filter,
)
def similarity_search_with_score(
self,
query: str,
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
embedding = self.embedding_function.embed_query(query)
docs = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return docs
def similarity_search_with_score_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
) -> List[Tuple[Document, float]]:
with Session(self._conn) as session:
collection = self.get_collection(session)
set_enable_seqscan_stmt = sqlalchemy.text("SET enable_seqscan = off")
session.execute(set_enable_seqscan_stmt)
if not collection:
raise ValueError("Collection not found")
filter_by = EmbeddingStore.collection_id == collection.uuid
if filter is not None:
filter_clauses = []
for key, value in filter.items():
IN = "in"
if isinstance(value, dict) and IN in map(str.lower, value):
value_case_insensitive = {
k.lower(): v for k, v in value.items()
}
filter_by_metadata = EmbeddingStore.cmetadata[key].astext.in_(
value_case_insensitive[IN]
)
filter_clauses.append(filter_by_metadata)
else:
filter_by_metadata = EmbeddingStore.cmetadata[
key
].astext == str(value)
filter_clauses.append(filter_by_metadata)
filter_by = sqlalchemy.and_(filter_by, *filter_clauses)
results: List[QueryResult] = (
session.query(
EmbeddingStore,
func.abs(EmbeddingStore.embedding.op("<->")(embedding)).label(
"distance"
),
) # Specify the columns you need here, e.g., EmbeddingStore.embedding
.filter(filter_by)
.order_by(
func.abs(EmbeddingStore.embedding.op("<->")(embedding)).asc()
) # Using PostgreSQL specific operator with the correct column name
.limit(k)
.all()
)
docs = [
(
Document(
page_content=result.EmbeddingStore.document,
metadata=result.EmbeddingStore.cmetadata,
),
result.distance if self.embedding_function is not None else None,
)
for result in results
]
return docs
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = 4,
filter: Optional[dict] = None,
**kwargs: Any,
) -> List[Document]:
docs_and_scores = self.similarity_search_with_score_by_vector(
embedding=embedding, k=k, filter=filter
)
return [doc for doc, _ in docs_and_scores]
@classmethod
def from_texts(
cls: Type[PGEmbedding],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGEmbedding:
embeddings = embedding.embed_documents(list(texts))
return cls._initialize_from_embeddings(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
@classmethod
def from_embeddings(
cls,
text_embeddings: List[Tuple[str, List[float]]],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGEmbedding:
texts = [t[0] for t in text_embeddings]
embeddings = [t[1] for t in text_embeddings]
return cls._initialize_from_embeddings(
texts,
embeddings,
embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
pre_delete_collection=pre_delete_collection,
**kwargs,
)
@classmethod
def from_existing_index(
cls: Type[PGEmbedding],
embedding: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGEmbedding:
connection_string = cls.get_connection_string(kwargs)
store = cls(
connection_string=connection_string,
collection_name=collection_name,
embedding_function=embedding,
pre_delete_collection=pre_delete_collection,
)
return store
@classmethod
def get_connection_string(cls, kwargs: Dict[str, Any]) -> str:
connection_string: str = get_from_dict_or_env(
data=kwargs,
key="connection_string",
env_key="POSTGRES_CONNECTION_STRING",
)
if not connection_string:
raise ValueError(
"Postgres connection string is required"
"Either pass it as a parameter"
"or set the POSTGRES_CONNECTION_STRING environment variable."
)
return connection_string
@classmethod
def from_documents(
cls: Type[PGEmbedding],
documents: List[Document],
embedding: Embeddings,
collection_name: str = _LANGCHAIN_DEFAULT_COLLECTION_NAME,
ids: Optional[List[str]] = None,
pre_delete_collection: bool = False,
**kwargs: Any,
) -> PGEmbedding:
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
connection_string = cls.get_connection_string(kwargs)
kwargs["connection_string"] = connection_string
return cls.from_texts(
texts=texts,
pre_delete_collection=pre_delete_collection,
embedding=embedding,
metadatas=metadatas,
ids=ids,
collection_name=collection_name,
**kwargs,
)
| [] |
2024-01-10 | mgoin/langchain | libs~langchain~tests~integration_tests~llms~test_minimax.py | """Test Minimax API wrapper."""
from langchain.llms.minimax import Minimax
def test_minimax_call() -> None:
"""Test valid call to minimax."""
llm = Minimax(max_tokens=10)
output = llm("Hello world!")
assert isinstance(output, str)
| [] |
2024-01-10 | mgoin/langchain | libs~langchain~tests~integration_tests~vectorstores~test_xata.py | """Test Xata vector store functionality.
Before running this test, please create a Xata database by following
the instructions from:
https://python.langchain.com/docs/integrations/vectorstores/xata
"""
import os
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.xata import XataVectorStore
class TestXata:
@classmethod
def setup_class(cls) -> None:
assert os.getenv("XATA_API_KEY"), "XATA_API_KEY environment variable is not set"
assert os.getenv("XATA_DB_URL"), "XATA_DB_URL environment variable is not set"
def test_similarity_search_without_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end constructions and search without metadata."""
texts = ["foo", "bar", "baz"]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
docsearch.delete(delete_all=True)
def test_similarity_search_with_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search with a metadata filter.
This test requires a column named "a" of type integer to be present
in the Xata table."""
texts = ["foo", "foo", "foo"]
metadatas = [{"a": i} for i in range(len(texts))]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
metadatas=metadatas,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1, filter={"a": 1})
assert output == [Document(page_content="foo", metadata={"a": 1})]
docsearch.delete(delete_all=True)
| [] |
2024-01-10 | mgoin/langchain | libs~langchain~tests~integration_tests~document_loaders~test_recursive_url_loader.py | from langchain.document_loaders.recursive_url_loader import RecursiveUrlLoader
def test_async_recursive_url_loader() -> None:
url = "https://docs.python.org/3.9/"
loader = RecursiveUrlLoader(
url=url, extractor=lambda _: "placeholder", use_async=True, max_depth=1
)
docs = loader.load()
assert len(docs) == 24
assert docs[0].page_content == "placeholder"
def test_sync_recursive_url_loader() -> None:
url = "https://docs.python.org/3.9/"
loader = RecursiveUrlLoader(
url=url, extractor=lambda _: "placeholder", use_async=False, max_depth=1
)
docs = loader.load()
assert len(docs) == 24
assert docs[0].page_content == "placeholder"
def test_loading_invalid_url() -> None:
url = "https://this.url.is.invalid/this/is/a/test"
loader = RecursiveUrlLoader(
url=url, max_depth=1, extractor=lambda _: "placeholder", use_async=False
)
docs = loader.load()
assert len(docs) == 0
| [] |
2024-01-10 | mgoin/langchain | libs~experimental~langchain_experimental~comprehend_moderation~toxicity.py | import asyncio
import importlib
from typing import Any, List, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationToxicityError,
)
class ComprehendToxicity:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "Toxicity",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def _toxicity_init_validate(self, max_size: int) -> Any:
"""
Validate and initialize toxicity processing configuration.
Args:
max_size (int): Maximum sentence size defined in the
configuration object.
Raises:
Exception: If the maximum sentence size exceeds the 5KB limit.
Note:
This function ensures that the NLTK punkt tokenizer is downloaded
if not already present.
Returns:
None
"""
if max_size > 1024 * 5:
raise Exception("The sentence length should not exceed 5KB.")
try:
nltk = importlib.import_module("nltk")
nltk.data.find("tokenizers/punkt")
return nltk
except ImportError:
raise ModuleNotFoundError(
"Could not import nltk python package. "
"Please install it with `pip install nltk`."
)
except LookupError:
nltk.download("punkt")
def _split_paragraph(
self, prompt_value: str, max_size: int = 1024 * 4
) -> List[List[str]]:
"""
Split a paragraph into chunks of sentences, respecting the maximum size limit.
Args:
paragraph (str): The input paragraph to be split into chunks.
max_size (int, optional): The maximum size limit in bytes for
each chunk. Defaults to 1024.
Returns:
List[List[str]]: A list of chunks, where each chunk is a list
of sentences.
Note:
This function validates the maximum sentence size based on service
limits using the 'toxicity_init_validate' function. It uses the NLTK
sentence tokenizer to split the paragraph into sentences.
Example:
paragraph = "This is a sample paragraph. It
contains multiple sentences. ..."
chunks = split_paragraph(paragraph, max_size=2048)
"""
# validate max. sentence size based on Service limits
nltk = self._toxicity_init_validate(max_size)
sentences = nltk.sent_tokenize(prompt_value)
chunks = list() # type: ignore
current_chunk = list() # type: ignore
current_size = 0
for sentence in sentences:
sentence_size = len(sentence.encode("utf-8"))
# If adding a new sentence exceeds max_size
# or current_chunk has 10 sentences, start a new chunk
if (current_size + sentence_size > max_size) or (len(current_chunk) >= 10):
if current_chunk: # Avoid appending empty chunks
chunks.append(current_chunk)
current_chunk = []
current_size = 0
current_chunk.append(sentence)
current_size += sentence_size
# Add any remaining sentences
if current_chunk:
chunks.append(current_chunk)
return chunks
def validate(self, prompt_value: str, config: Any = None) -> str:
"""
Check the toxicity of a given text prompt using AWS
Comprehend service and apply actions based on configuration.
Args:
prompt_value (str): The text content to be checked for toxicity.
config (Dict[str, Any]): Configuration for toxicity checks and actions.
Returns:
str: The original prompt_value if allowed or no toxicity found.
Raises:
ValueError: If the prompt contains toxic labels and cannot be
processed based on the configuration.
"""
chunks = self._split_paragraph(prompt_value=prompt_value)
for sentence_list in chunks:
segments = [{"Text": sentence} for sentence in sentence_list]
response = self.client.detect_toxic_content(
TextSegments=segments, LanguageCode="en"
)
if self.callback and self.callback.toxicity_callback:
self.moderation_beacon["moderation_input"] = segments # type: ignore
self.moderation_beacon["moderation_output"] = response
toxicity_found = False
threshold = config.get("threshold")
toxicity_labels = config.get("labels")
if not toxicity_labels:
for item in response["ResultList"]:
for label in item["Labels"]:
if label["Score"] >= threshold:
toxicity_found = True
break
else:
for item in response["ResultList"]:
for label in item["Labels"]:
if (
label["Name"] in toxicity_labels
and label["Score"] >= threshold
):
toxicity_found = True
break
if self.callback and self.callback.toxicity_callback:
if toxicity_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_toxicity(
self.moderation_beacon, self.unique_id
)
)
if toxicity_found:
raise ModerationToxicityError
return prompt_value
| [] |
2024-01-10 | mgoin/langchain | libs~langchain~langchain~vectorstores~vearch.py | from __future__ import annotations
import os
import time
import uuid
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type
import numpy as np
from langchain.docstore.document import Document
from langchain.schema.embeddings import Embeddings
from langchain.vectorstores.base import VectorStore
if TYPE_CHECKING:
import vearch
DEFAULT_TOPN = 4
class VearchDb(VectorStore):
_DEFAULT_TABLE_NAME = "langchain_vearch"
def __init__(
self,
embedding_function: Embeddings,
table_name: str = _DEFAULT_TABLE_NAME,
metadata_path: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Initialize vearch vector store"""
try:
import vearch
except ImportError:
raise ValueError(
"Could not import vearch python package. "
"Please install it with `pip install vearch`."
)
if metadata_path is None:
metadata_path = os.getcwd().replace("\\", "/")
if not os.path.isdir(metadata_path):
os.makedirs(metadata_path)
log_path = os.path.join(metadata_path, "log")
if not os.path.isdir(log_path):
os.makedirs(log_path)
self.vearch_engine = vearch.Engine(metadata_path, log_path)
if not table_name:
table_name = self._DEFAULT_TABLE_NAME
table_name += "_"
table_name += str(uuid.uuid4()).split("-")[-1]
self.using_table_name = table_name
self.using_metapath = metadata_path
self.embedding_func = embedding_function
@property
def embeddings(self) -> Optional[Embeddings]:
return self.embedding_func
@classmethod
def from_documents(
cls: Type[VearchDb],
documents: List[Document],
embedding: Embeddings,
table_name: str = "langchain_vearch",
metadata_path: Optional[str] = None,
**kwargs: Any,
) -> VearchDb:
"""Return Vearch VectorStore"""
texts = [d.page_content for d in documents]
metadatas = [d.metadata for d in documents]
return cls.from_texts(
texts=texts,
embedding=embedding,
metadatas=metadatas,
table_name=table_name,
metadata_path=metadata_path,
**kwargs,
)
@classmethod
def from_texts(
cls: Type[VearchDb],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
table_name: str = _DEFAULT_TABLE_NAME,
metadata_path: Optional[str] = None,
**kwargs: Any,
) -> VearchDb:
"""Return Vearch VectorStore"""
vearch_db = cls(
embedding_function=embedding,
table_name=table_name,
metadata_path=metadata_path,
)
vearch_db.add_texts(texts=texts, metadatas=metadatas)
return vearch_db
def _create_table(
self,
dim: int = 1024,
filed_list: List[dict] = [
{"filed": "text", "type": "str"},
{"filed": "metadata", "type": "str"},
],
) -> int:
"""
Create VectorStore Table
Args:
dim:dimension of vector
fileds_list: the filed you want to store
Return:
code,0 for success,1 for failed
"""
type_dict = {"int": vearch.dataType.INT, "str": vearch.dataType.STRING}
engine_info = {
"index_size": 10000,
"retrieval_type": "IVFPQ",
"retrieval_param": {"ncentroids": 2048, "nsubvector": 32},
}
fields = [
vearch.GammaFieldInfo(fi["filed"], type_dict[fi["type"]])
for fi in filed_list
]
vector_field = vearch.GammaVectorInfo(
name="text_embedding",
type=vearch.dataType.VECTOR,
is_index=True,
dimension=dim,
model_id="",
store_type="MemoryOnly",
store_param={"cache_size": 10000},
has_source=False,
)
response_code = self.vearch_engine.create_table(
engine_info,
name=self.using_table_name,
fields=fields,
vector_field=vector_field,
)
return response_code
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""
Returns:
List of ids from adding the texts into the vectorstore.
"""
embeddings = None
if self.embedding_func is not None:
embeddings = self.embedding_func.embed_documents(list(texts))
table_path = os.path.join(
self.using_metapath, self.using_table_name + ".schema"
)
if not os.path.exists(table_path):
if embeddings is None:
raise ValueError("embeddings is None")
dim = len(embeddings[0])
response_code = self._create_table(dim)
if response_code:
raise ValueError("create table failed!!!")
if embeddings is not None and metadatas is not None:
doc_items = []
for text, metadata, embed in zip(texts, metadatas, embeddings):
profiles: dict[str, Any] = {}
profiles["text"] = text
profiles["metadata"] = metadata["source"]
profiles["text_embedding"] = embed
doc_items.append(profiles)
docid = self.vearch_engine.add(doc_items)
t_time = 0
while len(docid) != len(embeddings):
time.sleep(0.5)
if t_time > 6:
break
t_time += 1
self.vearch_engine.dump()
return docid
def _load(self) -> None:
"""
load vearch engine
"""
self.vearch_engine.load()
@classmethod
def load_local(
cls,
embedding: Embeddings,
table_name: str = _DEFAULT_TABLE_NAME,
metadata_path: Optional[str] = None,
**kwargs: Any,
) -> VearchDb:
"""Load the local specified table.
Returns:
Success or failure of loading the local specified table
"""
if not metadata_path:
raise ValueError("No metadata path!!!")
if not table_name:
raise ValueError("No table name!!!")
table_path = os.path.join(metadata_path, table_name + ".schema")
if not os.path.exists(table_path):
raise ValueError("vearch vectorbase table not exist!!!")
vearch_db = cls(
embedding_function=embedding,
table_name=table_name,
metadata_path=metadata_path,
)
vearch_db._load()
return vearch_db
def similarity_search(
self,
query: str,
k: int = DEFAULT_TOPN,
**kwargs: Any,
) -> List[Document]:
"""
Return docs most similar to query.
"""
if self.vearch_engine is None:
raise ValueError("Vearch engine is None!!!")
if self.embedding_func is None:
raise ValueError("embedding_func is None!!!")
embeddings = self.embedding_func.embed_query(query)
docs = self.similarity_search_by_vector(embeddings, k)
return docs
def similarity_search_by_vector(
self,
embedding: List[float],
k: int = DEFAULT_TOPN,
**kwargs: Any,
) -> List[Document]:
"""The most k similar documents and scores of the specified query.
Args:
embeddings: embedding vector of the query.
k: The k most similar documents to the text query.
min_score: the score of similar documents to the text query
Returns:
The k most similar documents to the specified text query.
0 is dissimilar, 1 is the most similar.
"""
query_data = {
"vector": [
{
"field": "text_embedding",
"feature": np.array(embedding),
}
],
"fields": [],
"is_brute_search": 1,
"retrieval_param": {"metric_type": "InnerProduct", "nprobe": 20},
"topn": k,
}
query_result = self.vearch_engine.search(query_data)
docs = []
for item in query_result[0]["result_items"]:
content = ""
meta_data = {}
for item_key in item:
if item_key == "text":
content = item[item_key]
continue
if item_key == "metadata":
meta_data["source"] = item[item_key]
continue
docs.append(Document(page_content=content, metadata=meta_data))
return docs
def similarity_search_with_score(
self,
query: str,
k: int = DEFAULT_TOPN,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
"""The most k similar documents and scores of the specified query.
Args:
embeddings: embedding vector of the query.
k: The k most similar documents to the text query.
min_score: the score of similar documents to the text query
Returns:
The k most similar documents to the specified text query.
0 is dissimilar, 1 is the most similar.
"""
if self.embedding_func is None:
raise ValueError("embedding_func is None!!!")
embeddings = self.embedding_func.embed_query(query)
query_data = {
"vector": [
{
"field": "text_embedding",
"feature": np.array(embeddings),
}
],
"fields": [],
"is_brute_search": 1,
"retrieval_param": {"metric_type": "InnerProduct", "nprobe": 20},
"topn": k,
}
query_result = self.vearch_engine.search(query_data)
results: List[Tuple[Document, float]] = []
for item in query_result[0]["result_items"]:
content = ""
meta_data = {}
for item_key in item:
if item_key == "text":
content = item[item_key]
continue
if item_key == "metadata":
meta_data["source"] = item[item_key]
continue
if item_key == "score":
score = item[item_key]
continue
tmp_res = (Document(page_content=content, metadata=meta_data), score)
results.append(tmp_res)
return results
def _similarity_search_with_relevance_scores(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> List[Tuple[Document, float]]:
return self.similarity_search_with_score(query, k, **kwargs)
def delete(
self,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> Optional[bool]:
"""Delete the documents which have the specified ids.
Args:
ids: The ids of the embedding vectors.
**kwargs: Other keyword arguments that subclasses might use.
Returns:
Optional[bool]: True if deletion is successful.
False otherwise, None if not implemented.
"""
if self.vearch_engine is None:
raise ValueError("Verach Engine is None!!!")
ret: Optional[bool] = None
tmp_res = []
if ids is None or ids.__len__() == 0:
return ret
for _id in ids:
ret = self.vearch_engine.del_doc(_id)
tmp_res.append(ret)
ret = all(i == 0 for i in tmp_res)
return ret
def get(
self,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> Dict[str, Document]:
"""Return docs according ids.
Args:
ids: The ids of the embedding vectors.
Returns:
Documents which satisfy the input conditions.
"""
if self.vearch_engine is None:
raise ValueError("vearch engine is None!!!")
results: Dict[str, Document] = {}
if ids is None or ids.__len__() == 0:
return results
for id in ids:
docs_detail = self.vearch_engine.get_doc_by_id(id)
if docs_detail == {}:
continue
content = ""
meta_info = {}
for field in docs_detail:
if field == "text":
content = docs_detail[field]
continue
elif field == "metadata":
meta_info["source"] = docs_detail[field]
continue
results[docs_detail["_id"]] = Document(
page_content=content, metadata=meta_info
)
return results
| [] |
2024-01-10 | mgoin/langchain | libs~langchain~langchain~memory~readonly.py | from typing import Any, Dict, List
from langchain.schema import BaseMemory
class ReadOnlySharedMemory(BaseMemory):
"""A memory wrapper that is read-only and cannot be changed."""
memory: BaseMemory
@property
def memory_variables(self) -> List[str]:
"""Return memory variables."""
return self.memory.memory_variables
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Load memory variables from memory."""
return self.memory.load_memory_variables(inputs)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Nothing should be saved or changed"""
pass
def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
pass
| [] |
2024-01-10 | kumar045/anthropic-sdk-python | tests~test_api.py | import pytest
import anthropic
from anthropic import api, ApiException
def test_prompt_validator():
# No exceptions expected
api._validate_request({"max_tokens_to_sample": 1, "prompt": f"{anthropic.HUMAN_PROMPT} Hello{anthropic.AI_PROMPT}"})
api._validate_request({"max_tokens_to_sample": 1, "prompt": f"{anthropic.HUMAN_PROMPT} Hello{anthropic.AI_PROMPT} First answer{anthropic.HUMAN_PROMPT} Try again{anthropic.AI_PROMPT}"})
with pytest.raises(ApiException):
api._validate_request({"max_tokens_to_sample": 1, "prompt": f"{anthropic.HUMAN_PROMPT} Hello"})
with pytest.raises(ApiException):
api._validate_request({"max_tokens_to_sample": 1, "prompt": f"{anthropic.AI_PROMPT} "})
with pytest.raises(ApiException):
api._validate_request({"max_tokens_to_sample": 1, "prompt": f"Human: Hello{anthropic.AI_PROMPT}"})
| [] |
2024-01-10 | AICardioCare/prompt-csv | src~pages~2_CSV%20Agent.py | import re
import sys
from io import StringIO
import pandas as pd
import streamlit as st
from langchain.agents import create_pandas_dataframe_agent
from langchain.llms import OpenAI
from modules.history import ChatHistory
from modules.layout import Layout
from modules.sidebar import Sidebar
from modules.utils import Utilities
# To be able to update the changes made to modules in localhost (press r)
def reload_module(module_name):
import importlib
import sys
if module_name in sys.modules:
importlib.reload(sys.modules[module_name])
return sys.modules[module_name]
layout_module = reload_module("modules.layout")
sidebar_module = reload_module("modules.sidebar")
utils_module = reload_module("modules.utils")
Sidebar = sidebar_module.Sidebar
st.set_page_config(layout="wide", page_icon="❤️", page_title="AI Cardio Care | Speak to your Heart💓")
# Instantiate the main components
layout, sidebar, utils = Layout(), Sidebar(), Utilities()
layout.show_header()
user_api_key = utils.load_api_key()
uploaded_file = utils.handle_upload(["csv"])
if not user_api_key:
layout.show_api_key_missing()
# Configure the sidebar
sidebar.show_options()
sidebar.about()
if user_api_key and uploaded_file:
uploaded_file.seek(0)
# Read Data as Pandas
data = pd.read_csv(uploaded_file)
# Define pandas df agent - 0 ~ no creativity vs 1 ~ very creative
chatbot = create_pandas_dataframe_agent(OpenAI(temperature=0, openai_api_key=user_api_key), data, verbose=True)
# Initialize chat history
history = ChatHistory()
try:
st.session_state["chatbot"] = chatbot
# Create containers for chat responses and user prompts
response_container, prompt_container = st.container(), st.container()
with prompt_container:
# Display the prompt form
is_ready, user_input = layout.prompt_form()
# Initialize the chat history
history.initialize(uploaded_file)
# Reset the chat history if button clicked
if st.session_state["reset_chat"]:
history.reset(uploaded_file)
if is_ready:
# Update the chat history and display the chat messages
history.append("user", user_input)
old_stdout = sys.stdout
sys.stdout = captured_output = StringIO()
output = st.session_state["chatbot"].run(user_input)
sys.stdout = old_stdout
history.append("assistant", output)
# Clean up the agent's thoughts to remove unwanted characters
thoughts = captured_output.getvalue()
cleaned_thoughts = re.sub(r"\x1b\[[0-9;]*[a-zA-Z]", "", thoughts)
cleaned_thoughts = re.sub(r"\[1m>", "", cleaned_thoughts)
# Display the agent's thoughts
with st.expander("Display the agent's thoughts"):
st.write(cleaned_thoughts)
history.generate_messages(response_container)
except Exception as e:
st.error(f"Error: {str(e)}")
| [] |
2024-01-10 | seshakiran/langchain-agent-production-starter | src~agent~tools~video_message.py | """Tool for generating images."""
import logging
from langchain.agents import Tool
from steamship import Steamship, Block, SteamshipError
NAME = "VideoMessage"
DESCRIPTION = """
Useful for when you want to send a video message.
Input: The message you want to say in a video.
Output: the UUID of the generated video.
"""
PLUGIN_HANDLE = "did-video-generator"
class VideoMessageTool(Tool):
"""Tool used to generate images from a text-prompt."""
client: Steamship
def __init__(self, client: Steamship):
super().__init__(
name=NAME, func=self.run, description=DESCRIPTION, client=client
)
@property
def is_single_input(self) -> bool:
"""Whether the tool only accepts a single input."""
return True
def run(self, prompt: str, **kwargs) -> str:
"""Generate a video."""
video_generator = self.client.use_plugin(PLUGIN_HANDLE)
print("Video generator")
task = video_generator.generate(
text=prompt,
append_output_to_file=True,
options={
"source_url": "https://www.steamship.com/images/agents/man-in-suit-midjourney.png",
"stitch": True,
"provider": {
"type": "microsoft",
"voice_id": "en-US-AshleyNeural",
"voice_config": {"style": "Default"},
"expressions": [
{"start_frame": 0, "expression": "surprise", "intensity": 1.0},
{"start_frame": 50, "expression": "happy", "intensity": 1.0},
{"start_frame": 100, "expression": "serious", "intensity": 0.6},
{"start_frame": 150, "expression": "neutral", "intensity": 1.0},
],
},
"transition_frames": 20,
},
)
task.wait(retry_delay_s=3)
blocks = task.output.blocks
logging.info(f"[{self.name}] got back {len(blocks)} blocks")
if len(blocks) > 0:
logging.info(f"[{self.name}] image size: {len(blocks[0].raw())}")
return blocks[0].id
raise SteamshipError(f"[{self.name}] Tool unable to generate image!")
if __name__ == "__main__":
with Steamship.temporary_workspace() as client:
tool = VideoMessageTool(client=client)
id = tool.run(
"Unlike anything you experienced before"
)
b = Block.get(client=client, _id=id)
b.set_public_data(True)
print(b.raw_data_url)
| [] |
2024-01-10 | seshakiran/langchain-agent-production-starter | src~agent~tools~album_art.py | """Tool for generating album art.
The purpose of this tool is to illustrate how to wrap the GenerateImageTool
with a custom tool description & some prompt engineering to steer the image
one way or another.
The GenerateImageTool leaves the user + LLM in complete control of the image
generation prompt... but what if you wanted to make sure the prompt was:
- A particular style?
- A particular mood?
- Something else entirely, involving web scraping and other operations?
You can do that by wrapping the GenerateImageTool, as you see here, and then
sending in your own custom prompt.
"""
import json
import logging
from langchain.agents import Tool
from steamship import Steamship
from steamship.base.error import SteamshipError
from steamship.data.plugin.plugin_instance import PluginInstance
from .image import GenerateImageTool
NAME = "GenerateAlbumArt"
DESCRIPTION = """
Useful for when you need to generate album art.
Input: A description of the album that needs art
Output: the UUID of a generated image
"""
class GenerateAlbumArtTool(Tool):
"""Tool used to generate album art from a album description."""
client: Steamship
tool: GenerateImageTool
def __init__(self, client: Steamship):
super().__init__(
name=NAME,
func=self.run,
description=DESCRIPTION,
client=client,
tool=GenerateImageTool(client),
)
@property
def is_single_input(self) -> bool:
"""Whether the tool only accepts a single input."""
return True
def run(self, prompt: str, **kwargs) -> str:
"""Respond to LLM prompt."""
# Here we create a NEW prompt, which is based on the prompt provided
# to this tool, but including extra terms.
image_gen_prompt = f"album art, 4k, high def, pop art, professional, high quality, award winning, grammy, platinum, {prompt}"
# Then we just return the results of the wrapped GenerateImageTool,
# passing it the new prompt that we created.
return self.tool.run(image_gen_prompt)
| [
"album art, 4k, high def, pop art, professional, high quality, award winning, grammy, platinum, PLACEHOLDER"
] |
2024-01-10 | taochenshh/exp4nav | src~utils~logger.py | '''
This file is modified from openai/baselines
'''
import datetime
import json
import os
import os.path as osp
import shutil
import sys
import tempfile
from datetime import datetime
from numbers import Number
LOG_OUTPUT_FORMATS = ['stdout', 'log', 'csv']
# Also valid: json, tensorboard
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, 'wt')
self.own_file = True
else:
assert hasattr(filename_or_file, 'read'), \
'expected file or str, got %s' % filename_or_file
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
kvs = {k: v for k, v in kvs.items() if isinstance(v, Number)}
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if isinstance(val, float):
valstr = '%-8.3g' % (val,)
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print('WARNING: tried to write empty key-value dict')
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = '-' * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items()):
lines.append('| %s%s | %s%s |' % (
key,
' ' * (keywidth - len(key)),
val,
' ' * (valwidth - len(val)),
))
lines.append(dashes)
self.file.write('\n'.join(lines) + '\n')
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
return s[:20] + '...' if len(s) > 23 else s
def writeseq(self, seq):
for arg in seq:
self.file.write(arg)
self.file.write('\n')
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'wt')
def writekvs(self, kvs):
kvs = {k: v for k, v in kvs.items() if isinstance(v, Number)}
for k, v in sorted(kvs.items()):
if hasattr(v, 'dtype'):
v = v.tolist()
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + '\n')
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'a+t')
self.keys = []
self.sep = ','
def writekvs(self, kvs):
kvs = {k: v for k, v in kvs.items() if isinstance(v, Number)}
# Add our current row to the history
extra_keys = kvs.keys() - self.keys
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
self.file.write(k)
self.file.write('\n')
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write('\n')
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
v = kvs.get(k)
if v:
self.file.write(str(v))
self.file.write('\n')
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
path = osp.join(osp.abspath(dir), datetime.now().strftime('%b%d_%H-%M-%S'))
from tensorboardX import SummaryWriter
self.writer = SummaryWriter(log_dir=path)
def writekvs(self, kvs):
assert 'iter' in kvs.keys()
step = kvs['iter']
scalar_kvs = {}
array_kvs = {}
for k, v in kvs.items():
if isinstance(v, Number):
scalar_kvs[k] = v
else:
array_kvs[k] = v
for k, v in scalar_kvs.items():
self.writer.add_scalar(k, float(v), step)
for k, v in array_kvs.items():
self.writer.add_histogram(k, v, step)
def close(self):
if self.writer:
self.writer.close()
self.writer = None
def make_output_format(format, ev_dir):
os.makedirs(ev_dir, exist_ok=True)
if format == 'stdout':
return HumanOutputFormat(sys.stdout)
elif format == 'log':
suffix = ""
return HumanOutputFormat(osp.join(ev_dir, 'log%s.txt' % suffix))
elif format == 'json':
return JSONOutputFormat(osp.join(ev_dir, 'progress.json'))
elif format == 'csv':
return CSVOutputFormat(osp.join(ev_dir, 'progress.csv'))
elif format == 'tensorboard':
return TensorBoardOutputFormat(osp.join(ev_dir, 'tb'))
else:
raise ValueError('Unknown format specified: %s' % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
"""
Logger.CURRENT.logkv(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
level: int. (see logger.py docs) If the global logger level is higher than
the level argument here, don't print to stdout.
"""
Logger.CURRENT.dumpkvs()
def getkvs():
return Logger.CURRENT.name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the
console and output files (if you've configured an output file).
"""
Logger.CURRENT.log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
Logger.CURRENT.set_level(level)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return Logger.CURRENT.get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
# ================================================================
# Backend
# ================================================================
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats):
self.name2val = {} # values this iteration
self.level = INFO
self.dir = dir
self.output_formats = output_formats
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def dumpkvs(self):
if self.level == DISABLED: return
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(self.name2val)
self.name2val.clear()
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
Logger.DEFAULT = Logger.CURRENT = Logger(dir=None, output_formats=[HumanOutputFormat(sys.stdout)])
def configure(dir=None, format_strs=None):
if dir is None:
dir = os.getenv('OPENAI_LOGDIR')
if dir is None:
dir = osp.join(tempfile.gettempdir(),
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"))
assert isinstance(dir, str)
os.makedirs(dir, exist_ok=True)
if format_strs is None:
strs = os.getenv('OPENAI_LOG_FORMAT')
format_strs = strs.split(',') if strs else LOG_OUTPUT_FORMATS
output_formats = [make_output_format(f, dir) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats)
log('Logging to %s' % dir)
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log('Reset logger')
class scoped_configure(object):
def __init__(self, dir=None, format_strs=None):
self.dir = dir
self.format_strs = format_strs
self.prevlogger = None
def __enter__(self):
self.prevlogger = Logger.CURRENT
configure(dir=self.dir, format_strs=self.format_strs)
def __exit__(self, *args):
Logger.CURRENT.close()
Logger.CURRENT = self.prevlogger
# ================================================================
def _demo():
info("hi")
debug("shouldn't appear")
set_level(DEBUG)
debug("should appear")
dir = "/tmp/testlogging"
if os.path.exists(dir):
shutil.rmtree(dir)
configure(dir=dir)
logkv("a", 3)
logkv("b", 2.5)
dumpkvs()
logkv("b", -2.5)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see a = 5.5")
logkv("b", -2.5)
dumpkvs()
logkv("a", "longasslongasslongasslongasslongasslongassvalue")
dumpkvs()
# ================================================================
# Readers
# ================================================================
def read_json(fname):
import pandas
ds = []
with open(fname, 'rt') as fh:
for line in fh:
ds.append(json.loads(line))
return pandas.DataFrame(ds)
def read_csv(fname):
import pandas
return pandas.read_csv(fname, index_col=None, comment='#')
def read_tb(path):
"""
path : a tensorboard file OR a directory, where we will find all TB files
of the form events.*
"""
import pandas
import numpy as np
from glob import glob
from collections import defaultdict
import tensorflow as tf
if osp.isdir(path):
fnames = glob(osp.join(path, "events.*"))
elif osp.basename(path).startswith("events."):
fnames = [path]
else:
raise NotImplementedError("Expected tensorboard file or directory containing them. Got %s" % path)
tag2pairs = defaultdict(list)
maxstep = 0
for fname in fnames:
for summary in tf.train.summary_iterator(fname):
if summary.step > 0:
for v in summary.summary.value:
pair = (summary.step, v.simple_value)
tag2pairs[v.tag].append(pair)
maxstep = max(summary.step, maxstep)
data = np.empty((maxstep, len(tag2pairs)))
data[:] = np.nan
tags = sorted(tag2pairs.keys())
for (colidx, tag) in enumerate(tags):
pairs = tag2pairs[tag]
for (step, value) in pairs:
data[step - 1, colidx] = value
return pandas.DataFrame(data, columns=tags)
if __name__ == "__main__":
_demo()
| [] |
2024-01-10 | yachty66/EconomicAgents | economic_agents~horton.py | import matplotlib.pyplot as plt
import logging
from .open_ai import OpenAI
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
class Horton:
def __init__(self, api_key, model, image_path, logging):
self.openai_api_key = api_key
self.model = OpenAI(key=api_key, model=model)
self.image_path = image_path
self.logging = logging
def play(self):
wage_asks = [13, 14, 15, 16, 17, 18, 19, 20]
min_wages = [13, 15]
results = {}
for i in min_wages:
for j in wage_asks:
prompt = f"""
You are hiring for the role "Dishwasher."
The typical hourly rate for this role is $12/hour.
You have 2 candidates.
Person 1: Has 1 year(s) of experience in this role. Requests ${j}/hour.
Person 2: Has 0 year(s) of experience in this role. Requests ${i}/hour.
Who would you hire? You have to pick one. Its important that you only answer with the number and nothing else:
"""
solution = self.model.generate(prompt)
scenario = f"Min wage: {i}, Wage ask: {j}"
results[scenario] = solution
if self.logging:
logger.info(f"Prompt: {prompt}")
logger.info(f"Model response: {solution}")
return results
def create_plot(self, results):
scenarios = list(results.keys())
choices = [int(choice) for choice in results.values()]
plt.figure(figsize=(12, 6))
plt.bar(scenarios, choices, color='blue')
plt.xlabel('Scenarios')
plt.ylabel('Choices (1 for Person 1, 2 for Person 2)')
plt.title('Hiring choice for each scenario')
plt.xticks(rotation=90)
plt.savefig(self.image_path)
def __call__(self):
results = self.play()
self.create_plot(results)
return results
| [
"\n You are hiring for the role \"Dishwasher.\"\n\n The typical hourly rate for this role is $12/hour.\n\n You have 2 candidates.\n\n Person 1: Has 1 year(s) of experience in this role. Requests $PLACEHOLDER/hour.\n Person 2: Has 0 year(s) of experience in this role. Requests $PLACEHOLDER/hour.\n\n Who would you hire? You have to pick one. Its important that you only answer with the number and nothing else:\n "
] |
2024-01-10 | yachty66/EconomicAgents | economic_agents~charness_rabin.py | import matplotlib.pyplot as plt
import logging
from .open_ai import OpenAI
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class CharnessRabin:
def __init__(self, api_key, model, personality, image_path, logging):
openai_api_key: str = None
self.openai_api_key = openai_api_key
self.model = OpenAI(key=api_key, model=model)
self.image_path = image_path
self.logging = logging
self.personalities = [
"You only care about fairness between players",
"You only care about your own pay-off",
"You only care about the total pay-off of both players",
" "
]
self.personality_choice = personality
self.scenarios = {
"B29\n[400,400],[750,400]": ((400, 400), (750, 400)),
"B2\n[400,400],[750,375]": ((400, 400), (750, 375)),
"B23\n[800,200],[0,0]": ((800, 200), (0, 0)),
"B8\n[300,600],[700,500]": ((300, 600), (700, 500)),
"B15\n[200,700],[600,600]": ((200, 700), (600, 600)),
"B26\n[0,800],[400,400]": ((0, 800), (400, 400))
}
def play(self):
results = {}
for scenario, allocations in self.scenarios.items():
left_a, left_b = allocations[0]
right_a, right_b = allocations[1]
prompt = f"""You are deciding on allocation for yourself and another person, Person A.
{self.personalities[self.personality_choice]}
Option Left: You get ${left_b}, Person A gets ${left_a}
Option Right: You get ${right_b}, Person A gets ${right_a}
What do you choose, with one word [Left, Right]?"""
solution = self.model.generate(prompt)
results[scenario] = solution
if logging:
logger.info(f"Prompt: {prompt}")
logger.info(f"Model response: {solution}")
return results
def create_plot(self, results):
scenarios = list(results.keys())
choices = [1 if choice == 'Right' else -1 for choice in results.values()]
plt.figure(figsize=(12, 6))
plt.scatter(choices, scenarios, color='blue')
plt.xlabel('Choices (Right on 1, Left on -1)')
plt.ylabel('Scenarios')
plt.title('Choices for each scenario')
plt.xticks([-1, 1], ['Left', 'Right'])
plt.savefig(self.image_path)
def __call__(self):
results = self.play()
self.create_plot(results)
return results
| [] |
2024-01-10 | yachty66/EconomicAgents | economic_agents~kahneman.py | import matplotlib.pyplot as plt
import numpy as np
import logging
from .open_ai import OpenAI
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class Kahneman:
def __init__(self, api_key, model, image_path, logging):
openai_api_key: str = None
self.openai_api_key = openai_api_key
self.model = OpenAI(key=api_key, model=model)
self.image_path = image_path
self.logging = logging
self.politics_view = [
"socialist",
"leftist",
"liberal",
"moderate",
"liberterian",
"conservative",
]
self.store_actions = ["raises the price to", "changes the price to"]
self.prices = [16, 20, 40, 100]
def play(self):
results = {}
for politics in self.politics_view:
for store_action in self.store_actions:
for price in self.prices:
prompt = f"""A hardware store has been selling snow shovels for $15. The morning after a large snowstorm, the store {store_action} ${price}.
Please rate this action as:
1) Completely Fair
2) Acceptable
3) Unfair
4) Very Unfair
You are a {politics}.
What is your choice [1, 2, 3, or 4]. Its important that you only answer with the number and nothing else:"""
solution = self.model.generate(prompt)
results[(politics, store_action, price)] = solution
if self.logging:
logger.info(f"Prompt: {prompt}")
logger.info(f"Model response: {solution}")
return results
def create_plot(self, results):
plot_data = {}
for (politics, store_action, price), judgment in results.items():
judgment = int(judgment) - 1
if politics not in plot_data:
plot_data[politics] = {}
if price not in plot_data[politics]:
plot_data[politics][price] = {}
if store_action not in plot_data[politics][price]:
plot_data[politics][price][store_action] = [0, 0, 0]
plot_data[politics][price][store_action][judgment] += 1
fig, axs = plt.subplots(len(self.politics_view), len(self.prices), figsize=(13, 13))
plt.subplots_adjust(wspace=0.5, hspace=0.5)
for i, politics in enumerate(self.politics_view):
for j, price in enumerate(self.prices):
ax = axs[i, j]
width = 0.35
x = np.arange(3)
for k, store_action in enumerate(self.store_actions):
view_data = [plot_data[politics][price][store_action][l] for l in range(3)]
ax.bar(x + k*width/2, view_data, width/2, color=['red', 'grey'][k], label=store_action)
ax.set_xlabel('Moral Judgments')
ax.set_ylabel('Count')
ax.set_title(f'{politics} for price {price}')
ax.set_xticks(x + width/4)
ax.set_xticklabels(["Acceptable", "Unfair", "Very Unfair"])
ax.legend()
fig.tight_layout()
plt.savefig(self.image_path)
plt.show()
def __call__(self):
results = self.play()
self.create_plot(results)
return results | [
"A hardware store has been selling snow shovels for $15. The morning after a large snowstorm, the store PLACEHOLDER $PLACEHOLDER. \n\n Please rate this action as:\n 1) Completely Fair \n 2) Acceptable\n 3) Unfair \n 4) Very Unfair\n\n You are a PLACEHOLDER.\n What is your choice [1, 2, 3, or 4]. Its important that you only answer with the number and nothing else:"
] |
2024-01-10 | yachty66/EconomicAgents | economic_agents~zeckhauser.py | from collections import Counter
import logging
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from .open_ai import OpenAI
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
class Zeckhauser:
def __init__(self, api_key, model, image_path, logging):
self.model = OpenAI(key=api_key, model=model)
self.image_path = image_path
self.logging = logging
self.views = [
'car safety is the most important thing.',
'car safety is a terrible waste of money; we should only fund highway safety.',
'car safety is all that matters. We should not fund highway safety.',
'car safety and highway safety are equally important',
'car safety is slightly more important than highway safety',
"I don't really care about car safety or highway safety",
'highway safety is the most important thing.',
'highway safety is a terrible waste of money; we should only fund car safety.',
'highway safety is all that matters. We should not fund car safety.',
'highway safety and car safety are equally important',
'highway safety is slightly more important than car safety',
"I don't really care about highway safety or car safety"
]
self.status_quo = [30, 50, 60, 70, " "]
self.options = ['a', 'b', 'c', 'd']
def play(self):
generate_status_quo = [
"The current budget allocation is 30% to auto safety and 70% to highway safety.",
"The current budget allocation is 50% to auto safety and 50% to highway safety.",
"The current budget allocation is 60% to auto safety and 40% to highway safety.",
"The current budget allocation is 70% to auto safety and 30% to highway safety.",
" "
]
results = {}
for element in generate_status_quo:
for view in self.views:
prompt = f"""The National Highway Safety Commission is deciding how to allocate its budget between two safety research programs:
i) improving automobile safety (bumpers, body, gas tank configurations, seatbelts) and
ii) improving the safety of interstate highways (guard rails, grading, highway interchanges, and implementing selective reduced speed limits).
{element}
They are considering the following options:
a) Allocate 70% to auto safety and 30% to highway safety
b) Allocate 40% to auto safety and 60% to highway safety
c) Allocate 30% to auto safety and 70% to highway safety
d) Allocate 50% to auto safety and 50% to highway safety
Your own views: {view}
Please choose your most preferred option in light of your views a, b, c or d. Its important that you respond only with one letter and not more!:"""
solution = self.model.generate(prompt)
results[(view, element)] = solution
if self.logging:
logger.info(f"Prompt: {prompt}")
logger.info(f"Model response: {solution}")
return results
def calculate_percentages(self,counter):
total = sum(counter.values())
return {key: (value / total) * 100 for key, value in counter.items()}
def create_plot(self, results):
status_quo_30 = []
status_quo_50 = []
status_quo_60 = []
status_quo_70 = []
neutral_status_quo = []
for key, value in results.items():
second_element = key[1]
first_digit = next((char for char in second_element if char.isdigit()), None)
if first_digit == '3':
status_quo_30.append({key: value})
elif first_digit == '5':
status_quo_50.append({key: value})
elif first_digit == '6':
status_quo_60.append({key: value})
elif first_digit == '7':
status_quo_70.append({key: value})
else:
neutral_status_quo.append({key: value})
status_quo_30_data = self.calculate_percentages(Counter([list(d.values())[0] for d in status_quo_30]))
status_quo_50_data = self.calculate_percentages(Counter([list(d.values())[0] for d in status_quo_50]))
status_quo_60_data = self.calculate_percentages(Counter([list(d.values())[0] for d in status_quo_60]))
status_quo_70_data = self.calculate_percentages(Counter([list(d.values())[0] for d in status_quo_70]))
neutral_status_quo_data = self.calculate_percentages(Counter([list(d.values())[0] for d in neutral_status_quo]))
views = ["30% auto \n framed as Status Quo", "50% auto \n framed as Status Quo", "60% auto \n framed as Status Quo", "70% auto \n framed as Status Quo", "Neutral framing"]
choices = ["70% car, 30% hwy", "40% car, 60% hwy", "30% car, 70% hwy", "50% car, 50% hwy"]
fig, axs = plt.subplots(1, 5, figsize=(20, 3.5), sharey=True)
data_list = [status_quo_30_data, status_quo_50_data, status_quo_60_data, status_quo_70_data, neutral_status_quo_data]
for i, ax in enumerate(axs):
keys = list(data_list[i].keys())
values = list(data_list[i].values())
ax.bar(np.arange(len(keys)), values)
ax.set_title(views[i])
ax.set_xticks(np.arange(len(choices)))
ax.set_xticklabels(choices, rotation='vertical')
ax.set_ylim(0, 100)
ax.set_yticks([0, 20, 40, 60, 80])
ax.set_yticklabels(['0%', '20%', '40%', '60%', '80%'])
plt.tight_layout()
plt.savefig(self.image_path)
if self.logging:
logger.info(f"Plot saved at: {self.image_path}")
plt.show()
def __call__(self):
results = self.play()
self.create_plot(results)
return results | [
"The National Highway Safety Commission is deciding how to allocate its budget between two safety research programs: \n\n i) improving automobile safety (bumpers, body, gas tank configurations, seatbelts) and \n ii) improving the safety of interstate highways (guard rails, grading, highway interchanges, and implementing selective reduced speed limits).\n\n PLACEHOLDER\n\n They are considering the following options:\n\n a) Allocate 70% to auto safety and 30% to highway safety\n b) Allocate 40% to auto safety and 60% to highway safety\n c) Allocate 30% to auto safety and 70% to highway safety\n d) Allocate 50% to auto safety and 50% to highway safety\n\n Your own views: PLACEHOLDER\n\n Please choose your most preferred option in light of your views a, b, c or d. Its important that you respond only with one letter and not more!:"
] |
2024-01-10 | Gabyperin/Python | API%20KEY%20IA%20Gaby~apiiagaby.py | import openai as ia
#entre nesse site(https://platform.openai.com/account/api-keys) e crie sua chave secreta,
#ao criar, copie e cole abaixo.
ia.api_key = ''
request = input('Descreva a imagem a ser gerada: ')
text = "Mulher/Homem mais lindo do mundo"
#coloca o link da foto abaixo:
link = ""
if request == text:
print(f'URL da imagem gerada: \n{link}')
else:
response = ia.Image.create(
prompt = request,
n=1,
size="1024x1024"
)
imagem_url = response['data'][0]['url']
print(f'URL da imagem gerada: \n{imagem_url}')
| [] |
2024-01-10 | Gabyperin/Python | API%20KEY%20IA~apiia.py | import openai as ia
#Entra nesse site (https://platform.openai.com/account/api-keys) cria uma chave secreta,
#copia e cola onde está pedindo.
ia.api_key = 'coloca aqui a api'
request = input('Descreva a imagem a ser gerada: ')
response = ia.Image.create(
prompt = request,
n=1,
size="1024x1024"
)
imagem_url = response['data'][0]['url']
print(f'URL da imagem gerada: \n{imagem_url}') | [] |
2024-01-10 | Cisco-Kosha/rag-connector | app~schemas~ingestion.py | from typing import Optional, Union
from langchain.schema import Document
from pydantic import BaseModel
class DocumentChunker(BaseModel):
chunk_size: Optional[Union[int, None]]
chunk_overlap: Optional[Union[int, None]]
documents: Union[str, list[Document]]
class Config:
json_schema_extra = {
"example": {
"chunk_size": 1000,
"chunk_overlap": 100,
"documents": [
{
"page_content": "{\"userId\": 1, \"id\": 1, \"title\": \"delectus aut autem\", \"completed\": "
"false}",
"metadata": {
"source": "/private/var/folders/pm/5v47_r592xz7d9rm1jb53tyh0000gn/T/tmprszk62ep",
"seq_num": 1
}
}
]
}
}
class VectorStore(BaseModel):
host: str
class Config:
json_schema_extra = {
"example": {
"host": "localhost"
}
}
class StoreInVectoDB(BaseModel):
vectorstore: Optional[str] = "chromadb"
embedding_model: Optional[str] = "openai"
documents: list[Document]
class Config:
json_schema_extra = {
"example": {
"vectorstore": "chromadb",
"embedding_model": "openai",
"documents": [
{
"page_content": "Some payload that is chunked into multiple smaller documents",
"metadata": {
"source": "/private/var/folders/pm/5v47_r592xz7d9rm1jb53tyh0000gn/T/tmphr547jgv",
"seq_num": 1
}
}
]
}
}
| [
"{\"userId\": 1, \"id\": 1, \"title\": \"delectus aut autem\", \"completed\": false}",
"Some payload that is chunked into multiple smaller documents"
] |
2024-01-10 | Cisco-Kosha/rag-connector | app~helper~utility.py | import openai
def text_embedding(text):
response = openai.Embedding.create(model="text-embedding-ada-002", input=text)
return response["data"][0]["embedding"]
| [] |
2024-01-10 | Cisco-Kosha/rag-connector | app~api~api_v1~endpoints~retrieval.py |
import os
import traceback
from typing import Any, List
import openai
from chromadb.utils import embedding_functions
from fastapi import APIRouter
import chromadb
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores.chroma import Chroma
from app.helper.utility import text_embedding
from starlette.responses import Response
from app.connectors.chroma import ChromaDBConnector
from app.core.config import settings, logger
from app.schemas.retrieval import ChatBotParameters
router = APIRouter()
DEFAULT_K = 4
# default_collection_name: str = "default_collection"
@router.post("/chatbot", status_code=200)
def chatbot(properties: ChatBotParameters) -> Any:
"""
This endpoint is used to fetch the top K documents from a vectorstore, based on a query and then send it as context to the LLM model
"""
try:
if properties.embedding_model is None:
return Response(status_code=400, content="Embedding model is empty")
if properties.embedding_model == "openai":
chroma_connector = ChromaDBConnector(host_url=settings.CHROMADB_CONNECTOR_SERVER_URL, jwt_token=settings.JWT_TOKEN)
collection = chroma_connector.get_or_create_collection(settings.DEFAULT_COLLECTION_NAME)
# print(collection)
# openai_ef = embedding_functions.OpenAIEmbeddingFunction(
# api_key=os.environ["OPENAI_API_KEY"],
# model_name="text-embedding-ada-002"
# )
# vector = text_embedding(properties.prompt)
#
# print(vector)
# results = chroma_connector.query(name=str(collection['id']), vector=[vector], include=["documents"],
# n_results=10)
# res = "\n".join(str(item) for item in results['documents'][0])
documents = chroma_connector.get_collection(collection['id'])
# print(documents['documents'])
vector = text_embedding(documents['documents'])
results = chroma_connector.query(name=str(collection['id']), vector=[vector], include=["documents"],
n_results=15)
embeddings = OpenAIEmbeddings()
chromadb_client = chromadb.HttpClient(
host=settings.CHROMADB_SERVER_URL, port="80", headers={"Authorization": "Bearer " + settings.CHROMADB_SERVER_API_KEY})
# chat = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", verbose=True,
# openai_api_base=settings.OPENAI_CONNECTOR_SERVER_URL, openai_api_type=settings.JWT_TOKEN)
chat = ChatOpenAI(temperature=properties.temperature, model_name="gpt-3.5-turbo", verbose=True)
db = Chroma(embedding_function=embeddings,
collection_name=settings.DEFAULT_COLLECTION_NAME, client=chromadb_client)
qa = RetrievalQA.from_chain_type(llm=chat, chain_type="stuff", retriever=db.as_retriever())
# res = "\n".join(str(item) for item in results['documents'][0])
# prompt = f'```{res}```'
#
# messages = [
# {"role": "system", "content": "You are an API Expert. You are helping a customer with an API issue. Do not worry about missing parts and formatting issues. Do your best to help the customer."},
# {"role": "user", "content": prompt}
# ]
# response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo",
# messages=messages,
# temperature=0
# )
# response_message = response["choices"][0]["message"]["content"]
#
# print(response_message)
# return response_message
return qa.run(properties.prompt)
except Exception as e:
logger.error(e)
traceback.print_exc()
return Response(status_code=400, content=str(e))
return "Success"
| [] |
2024-01-10 | Cisco-Kosha/rag-connector | app~vectorstore~chromadb_db.py | import uuid
from typing import Any, Optional, List, Callable, Iterable
from app.embeddings.openai import OpenAI
from app.connectors.chroma import ChromaDBConnector
from app.core.config import settings, logger
default_collection_name: str = "default_collection"
class Chroma(object):
def __init__(
self,
collection_name: str = settings.DEFAULT_COLLECTION_NAME,
embedding_function: Optional[str] = None,
persist_directory: Optional[str] = None,
relevance_score_fn: Optional[Callable[[float], float]] = None,
) -> None:
"""Initialize with a Chroma client."""
self._collection_name = collection_name
self._embedding_function = embedding_function
self._persist_directory = persist_directory
self._relevance_score_fn = relevance_score_fn
self.chroma_connector = ChromaDBConnector(host_url=settings.CHROMADB_CONNECTOR_SERVER_URL, jwt_token=settings.JWT_TOKEN)
self._collection = None
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
ids: Optional[List[str]] = None,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts (Iterable[str]): Texts to add to the vectorstore.
metadatas (Optional[List[dict]], optional): Optional list of metadatas.
ids (Optional[List[str]], optional): Optional list of IDs.
Returns:
List[str]: List of IDs of the added texts.
"""
if ids is None:
ids = [str(uuid.uuid1()) for _ in texts]
embeddings = None
texts = list(texts)
if self._embedding_function is not None:
if self._embedding_function == "openai":
openai_obj = OpenAI()
embeddings = openai_obj.get_len_safe_embeddings(texts)
# chromadb_client = chromadb.HttpClient(
# host="localhost", port="8211", headers={"Authorization": "Bearer test-token"})
# self._collection = self.chroma_connector.get_or_create_collection(self._collection_name)
# os.environ["OPENAI_API_KEY"] = ""
# openai_embeddings = OpenAIEmbeddings()
# self._collection = chromadb_client.get_or_create_collection(self._collection_name,
# embedding_function=openai_embeddings.embed_documents)
self._collection = self.chroma_connector.get_or_create_collection(self._collection_name)
print(self._collection)
if metadatas:
# fill metadatas with empty dicts if somebody
# did not specify metadata for all texts
length_diff = len(texts) - len(metadatas)
if length_diff:
metadatas = metadatas + [{}] * length_diff
empty_ids = []
non_empty_ids = []
for idx, m in enumerate(metadatas):
if m:
non_empty_ids.append(idx)
else:
empty_ids.append(idx)
if non_empty_ids:
metadatas = [metadatas[idx] for idx in non_empty_ids]
texts_with_metadatas = [texts[idx] for idx in non_empty_ids]
embeddings_with_metadatas = (
[embeddings[idx] for idx in non_empty_ids] if embeddings else None
)
ids_with_metadata = [ids[idx] for idx in non_empty_ids]
try:
print(ids_with_metadata)
print(self._collection['id'])
self.chroma_connector.upsert_documents(collection_id=str(self._collection['id']),
ids=ids_with_metadata,
embeddings=embeddings_with_metadatas,
metadatas=metadatas,
documents=texts_with_metadatas)
except ValueError as e:
if "Expected metadata value to be" in str(e):
msg = (
"Try filtering complex metadata from the document using "
"langchain.vectorstores.utils.filter_complex_metadata."
)
raise ValueError(e.args[0] + "\n\n" + msg)
else:
raise e
if empty_ids:
texts_without_metadatas = [texts[j] for j in empty_ids]
embeddings_without_metadatas = (
[embeddings[j] for j in empty_ids] if embeddings else None
)
ids_without_metadatas = [ids[j] for j in empty_ids]
self.chroma_connector.upsert_documents(collection_id=str(self._collection['id']),
ids=ids_without_metadatas,
embeddings=embeddings_without_metadatas,
documents=texts_without_metadatas)
else:
self.chroma_connector.upsert_documents(collection_id=str(self._collection['id']), ids=ids,
embeddings=embeddings,
documents=texts)
return ids
| [] |
2024-01-10 | pabloDeputter/tcell-topic-modelling | src~tpm.py | import utils
import pandas as pd
import matplotlib as plt
from gensim import corpora, models
from gensim.models import CoherenceModel
from scipy.sparse import csr_matrix
@utils.timer_decorator
def create_corpus(dtm: pd.DataFrame, dictionary: corpora.Dictionary, filename: str):
corpus = []
# Iterate over rows.
for idx, row in dtm.iterrows():
doc = []
# Iterate over index.
for seq, count in row.items():
# Add each sequence to the document * count times.
doc += [str(seq)] * int(count)
corpus.append(dictionary.doc2bow(doc))
# Write Corpus to disk.
corpora.MmCorpus.serialize(filename, corpus)
@utils.timer_decorator
def create_corpus_(td_matrix: csr_matrix, dictionary: corpora.Dictionary, filename: str):
corpus = []
# Iterate over rows (documents).
for idx in range(td_matrix.shape[0]):
doc = [(seq_idx, int(count)) for seq_idx, count in enumerate(td_matrix[idx, :].toarray().flatten()) if
count > 0]
# Use the dictionary to convert document to bag of words format
corpus.append(doc)
# Write Corpus to disk.
corpora.MmCorpus.serialize(filename, corpus)
@utils.timer_decorator
def create_dictionary(dtm: pd.DataFrame, filename: str):
# Create a dictionary where the keys are sequences and the values are unique integer IDs.
dictionary = corpora.Dictionary([list(map(str, dtm.columns))])
# TODO - Filter out words based on document frequency or other criteria
# dictionary.filter_extremes(no_below=5, no_above=0.5)
# Write Dictionary to disk.
dictionary.save(filename)
@utils.timer_decorator
def create_dictionary_(seq_encoder, filename: str):
# Create a dictionary where the keys are sequences and the values are unique integer IDs.
dictionary = corpora.Dictionary([seq_encoder.classes_])
# TODO - Filter out words based on document frequency or other criteria
# dictionary.filter_extremes(no_below=5, no_above=0.5)
# Write Dictionary to disk.
dictionary.save(filename)
@utils.timer_decorator
def train_model(corpus: corpora.MmCorpus, dictionary: corpora.Dictionary, filename: str = '', save: bool = True,
num_topics: int = 10, random_state: int = 42, passes: int = 10, iterations: int = 200,
chunksize: int = 20000, eval_every: int = 10):
# Train LDA model on the corpus.
model = models.LdaMulticore(corpus=corpus, id2word=dictionary, num_topics=num_topics, workers=7,
random_state=random_state,
# Number of passes through the corpus during training, higher is more accurate but slower.
passes=passes,
# Number of iterations over the corpus, controls how much model is updated.
iterations=iterations,
# Number of documents to be used in each training iteration, higher is faster but uses more memory.
chunksize=chunksize,
eval_every=eval_every)
# Doesn't work with LdaMulticore.
# alpha='auto', eta='auto')
# Write model to disk.
if save:
model.save(filename)
return model
@utils.timer_decorator
def optimize_parameters(dtm: pd.DataFrame, corpus: corpora.MmCorpus, dictionary: corpora.Dictionary,
min_topics: int = 5,
max_topics: int = 50, step_size: int = 5):
# Range of number of topics to try.
topics_range = range(min_topics, max_topics, step_size)
# Transform tdm into list of lists, where each list contains the sequences in the document * count times.
texts = [[str(seq)] * int(count) for idx, doc in dtm.iterrows() for seq, count in doc.items()]
model_list = []
coh_perp_values = []
# Try each number of topics.
for num_topics in topics_range:
print("Training model with", num_topics, "topics.")
model = train_model(corpus, dictionary, save=False, num_topics=num_topics)
model_list.append(model)
# Calculate perplexity score, the lower, the better.
perplexity = model.log_perplexity(corpus)
# Calculate coherence score, the higher, the better.
coherence_model = CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='c_v')
coh_perp_values.append((coherence_model.get_coherence(), perplexity))
print("Num Topics =", num_topics, " has Coherence Value of", coherence_model.get_coherence(),
" and Perplexity Value of", perplexity)
# Unzip coherence and perplexity values.
coherence_values, perplexity_values = zip(*coh_perp_values)
# Plotting.
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel("Num Topics")
ax1.set_ylabel("Coherence score", color=color)
ax1.plot(topics_range, coherence_values, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx()
color = 'tab:blue'
ax2.set_ylabel('Perplexity score', color=color)
ax2.plot(topics_range, perplexity_values, color=color)
ax2.tick_params(axis='y', labelcolor=color)
fig.tight_layout()
plt.show()
| [] |
2024-01-10 | softwaredoug/vmware | vmware~search~gpt_query.py | from dotenv import load_dotenv
import json
import os
import openai
import csv
from sys import argv
load_dotenv()
openai.api_key = os.getenv('OPEN_AI_KEY')
model = "gpt-3.5-turbo"
def prompt1():
prompt = """
Within VMWare technical documentation or blog posts, generate an article body for the query "{query}". Include the title and body of the article.
"""
return prompt
def prompt2():
prompt = """
Given VMWare question answering forum, etc - please generate a forum question (ie stackoverflow) that solves the problem behind the question "{query}". Please feel free to liberally misspell and give common, alternate spellings of the topics in the question (ie vmware -> VM Ware, elasticsearch -> elastic search, etc)
Please don't include code in the response.
"""
return prompt
def prompt3():
prompt = """
In search relevance, users type in search queries that don't match the vocabulary used in the underlying corpus. This is known as the 'vocabulary problem'. Some important reasons for the vocabulary problem - the author of the article in the corpus does not use the same terminology as the searcher.
Given a corpus of VMWare technical documentation, blog posts, and forum posts, please propose some alternate, equivalent synonyms, alternate forms, similar terms, tags, and other possible expansions for the search query below. Note it's very important to come up with synonyms that expand one word to multiple words or vice-versa.
Please list the response as a numbered list with no explanation. Here's the query:
> {query}
"""
return prompt
def prompt4():
return """
Here are some basic definitions I want you to understand about interpreting search queries:
* Phrase searches - searches wrapped in quotes that search for that exact phrase to increase precision of the query. IE instead of searching just `doug turnbull` searching `"doug turnbull"` ensures the words occur together
* Collocations - two word phrases that frequently occur together in an English corpus. For example Palo Alto - these two words rarely occur by themselves in an English corpus. Instead they are more commonly encountered as a single concept
I'm going to give you a search query, please return alternate search queries as a list of strings with two-word collocations quoted
For example, if I asked for the query
network companies
You might respond with the following.
* networking startups
* networking companies
* "silicon valley" internet corporations
* company that does "software engineering" for Ethernet
...
* "palo alto" networks
Notice how only two word phrases are quoted, and these quoted phrases are common concepts in an American English corpus likely to improve the precision of the original search query.
Please do only include a bulleted list with *'s for bullets and no natural language explanation. Please include 10 responses total.
Here is the query:
> {query}
"""
def understand_nl_query(prompt, query):
prompt = prompt.format(query=query)
completion = openai.ChatCompletion.create(
model=model,
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
]
)
response = completion.choices[0].message.content
return response
def queries_to_articles(prompt, reader, cache):
for row in reader:
query = row['Query']
query_id = int(row['QueryId'])
if query in cache.keys():
print(f"Found {query} in cache")
yield query_id, query, cache[query]['article']
else:
yield query_id, query, understand_nl_query(prompt, query)
def load_query_database(filename='query_database.1.json'):
try:
return json.load(open(filename, 'r'))
except FileNotFoundError:
return {}
def articles_for_queries(filename: str):
query_database = load_query_database(filename)
if 'prompt' not in query_database.keys():
query_database['prompt'] = prompt4()
prompt = query_database['prompt']
if prompt != prompt4():
print("Prompt has changed. Should you reprocess all queries?")
if 'questions' not in query_database.keys():
query_database['questions'] = {}
for key in query_database.keys():
if key not in ['questions', 'prompt']:
raise ValueError(f"Key {key} is not a valid key")
with open('data/test.csv', 'r') as f:
reader = csv.DictReader(f)
for query_id, query, article in queries_to_articles(prompt, reader, query_database['questions']):
print("----")
print(query)
print(article)
if query not in query_database['questions'].keys():
query_database['questions'][query] = {'article': article,
'query_id': query_id}
if query_id % 10 == 0:
print(f"Completed {query_id} queries")
json.dump(query_database, open(filename, 'w'))
json.dump(query_database, open(filename, 'w'))
if __name__ == '__main__':
articles_for_queries(argv[1])
| [
"\nIn search relevance, users type in search queries that don't match the vocabulary used in the underlying corpus. This is known as the 'vocabulary problem'. Some important reasons for the vocabulary problem - the author of the article in the corpus does not use the same terminology as the searcher.\n\nGiven a corpus of VMWare technical documentation, blog posts, and forum posts, please propose some alternate, equivalent synonyms, alternate forms, similar terms, tags, and other possible expansions for the search query below. Note it's very important to come up with synonyms that expand one word to multiple words or vice-versa.\n\nPlease list the response as a numbered list with no explanation. Here's the query:\n\n> {query}\n ",
"\n Given VMWare question answering forum, etc - please generate a forum question (ie stackoverflow) that solves the problem behind the question \"{query}\". Please feel free to liberally misspell and give common, alternate spellings of the topics in the question (ie vmware -> VM Ware, elasticsearch -> elastic search, etc)\n\n Please don't include code in the response.\n ",
"You are a helpful assistant.",
"\n Within VMWare technical documentation or blog posts, generate an article body for the query \"{query}\". Include the title and body of the article.\n "
] |
2024-01-10 | chloechsu/revisiting-ppo | src~policy_gradients~torch_utils.py | import torch as ch
from torch.distributions.categorical import Categorical
import numpy as np
'''
Common functions/utilities implemented in PyTorch
Sorted into categories:
- General functions
- Actor-critic helpers
- Policy gradient (PPO/TRPO) helpers
- Normalization helpers
- Neural network helpers
- Initialization helpers
'''
########################
### GENERAL UTILITY FUNCTIONS:
# Parameters, unroll, cu_tensorize, cpu_tensorize, shape_equal_cmp,
# shape_equal, scat, determinant, safe_op_or_neg_one
########################
CKPTS_TABLE = 'checkpoints'
class Parameters():
'''
Parameters class, just a nice way of accessing a dictionary
> ps = Parameters({"a": 1, "b": 3})
> ps.A # returns 1
> ps.B # returns 3
'''
def __init__(self, params):
self.params = params
def __getattr__(self, x):
return self.params[x.lower()]
def unroll(*tensors):
'''
Utility function unrolling a list of tensors
Inputs:
- tensors; all arguments should be tensors (at least 2D))))
Returns:
- The same tensors but with the first two dimensions flattened
'''
rets = []
for t in tensors:
assert len(t.shape) >= 2
new_shape = [t.shape[0]*t.shape[1]] + list(t.shape[2:])
rets.append(t.contiguous().view(new_shape))
return rets
def cu_tensorize(t):
'''
Utility function for turning arrays into cuda tensors
Inputs:
- t, list
Returns:
- Tensor version of t
'''
return ch.tensor(t).float().cuda()
def cpu_tensorize(t):
'''
Utility function for turning arrays into cpu tensors
Inputs:
- t, list
Returns:
- Tensor version of t
'''
return ch.tensor(t).float()
def gpu_mapper():
return ch.device('cuda:0') if not cpu else ch.device('cpu')
def shape_equal_cmp(*args):
'''
Checks that the shapes of the passed arguments are equal
Inputs:
- All arguments should be tensors
Returns:
- True if all arguments have the same shape, else ValueError
'''
for i in range(len(args)-1):
if args[i].shape != args[i+1].shape:
s = "\n".join([str(x.shape) for x in args])
raise ValueError("Expected equal shapes. Got:\n%s" % s)
return True
def shape_equal(a, *args):
'''
Checks that a group of tensors has a required shape
Inputs:
- a, required shape for all the tensors
- Rest of the arguments are tensors
Returns:
- True if all tensors are of shape a, otherwise ValueError
'''
for arg in args:
if list(arg.shape) != list(a):
if len(arg.shape) != len(a):
raise ValueError("Expected shape: %s, Got shape %s" \
% (str(a), str(arg.shape)))
for i in range(len(arg.shape)):
if a[i] == -1 or a[i] == arg.shape[i]:
continue
raise ValueError("Expected shape: %s, Got shape %s" \
% (str(a), str(arg.shape)))
return shape_equal_cmp(*args)
def scat(a, b, axis):
'''
Set-or-Cat (scat)
Circumventing a PyTorch bug that auto-squeezes empty tensors.
Inputs:
a - A torch tensor, or None
b - A torch tensor, can not be None
axis - Axis to concat with
Returns:
- b if a is None, otherwise b concatted to a
'''
if a is None:
return b
return ch.cat((a, b), axis)
def determinant(mat):
'''
Returns the determinant of a diagonal matrix
Inputs:
- mat, a diagonal matrix
Returns:
- The determinant of mat, aka product of the diagonal
'''
return ch.exp(ch.log(mat).sum())
def log_determinant(mat):
'''
Returns the log determinant of a diagonal matrix
Inputs:
- mat, a diagonal matrix
Returns:
- The log determinant of mat, aka sum of the log diagonal
'''
return ch.log(mat).sum()
def safe_op_or_neg_one(maybe_empty, op):
'''
Performs an operation on a tensor which may be empty.
Returns -1 if the tensor is empty, and returns the result
of the op otherwise.
Inputs:
- maybe_empty, tensor which may be empty
- op, an operation (tensor) -> (object) to perform
Returns:
- -1 if tensor is empty otherwise op(maybe_empty)
'''
if maybe_empty.nelement() == 0:
return -1.
else:
return op(maybe_empty)
########################
### ACTOR-CRITIC HELPERS:
# discount_path, get_path_indices, select_prob_dists
########################
# Can be used to convert rewards into discounted returns:
# ret[i] = sum of t = i to T of gamma^(t-i) * rew[t]
def discount_path(path, h):
'''
Given a "path" of items x_1, x_2, ... x_n, return the discounted
path, i.e.
X_1 = x_1 + h*x_2 + h^2 x_3 + h^3 x_4
X_2 = x_2 + h*x_3 + h^2 x_4 + h^3 x_5
etc.
Can do (more efficiently?) w SciPy. Python here for readability
Inputs:
- path, list/tensor of floats
- h, discount rate
Outputs:
- Discounted path, as above
'''
curr = 0
rets = []
for i in range(len(path)):
curr = curr*h + path[-1-i]
rets.append(curr)
rets = ch.stack(list(reversed(rets)), 0)
return rets
def get_path_indices(not_dones):
"""
Returns list of tuples of the form:
(agent index, time index start, time index end + 1)
For each path seen in the not_dones array of shape (# agents, # time steps)
E.g. if we have an not_dones of composition:
tensor([[1, 1, 0, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 1, 1, 0, 1, 1, 0, 1]], dtype=torch.uint8)
Then we would return:
[(0, 0, 3), (0, 3, 10), (1, 0, 3), (1, 3, 5), (1, 5, 9), (1, 9, 10)]
"""
indices = []
num_timesteps = not_dones.shape[1]
for actor in range(not_dones.shape[0]):
last_index = 0
for i in range(num_timesteps):
if not_dones[actor, i] == 0.:
indices.append((actor, last_index, i + 1))
last_index = i + 1
if last_index != num_timesteps:
indices.append((actor, last_index, num_timesteps))
return indices
def select_prob_dists(pds, selected=None, detach=True):
'''
Given a tensor/tuple probability distributions, and
some indices, select a subset of the distributions
`pds`s according to the indices `selected`.
Inputs:
- pds: list of propo
'''
if type(pds) is tuple:
if selected is not None:
if pds[1].shape[0] == pds[0].shape[0]:
tup = (pds[0][selected], pds[1][selected])
else:
tup = (pds[0][selected], pds[1])
else:
tup = pds
return tuple(x.detach() if detach else x for x in tup)
out = pds[selected] if selected is not None else pds
return out.detach() if detach else out
########################
### POLICY GRADIENT HELPERS:
# vjp, jvp, cg_solve, backtracking_line_search
########################
def vjp(f_x, theta, v, create=True):
'''
Vector-jacobian product
Calculates v^TJ, or J^T v, using standard backprop
Input:
- f_x, function of which we want the Jacobian
- theta, variable with respect to which we want Jacobian
- v, vector that we want multiplied by the Jacobian
Returns:
- J^T @ v, without using n^2 space
'''
grad_list = ch.autograd.grad(f_x, theta, v, retain_graph=True, create_graph=create)
return ch.nn.utils.parameters_to_vector(grad_list)
def jvp(f_x, theta, v):
'''
Jacobian-vector product
Calculate the Jacobian-vector product, see
https://j-towns.github.io/2017/06/12/A-new-trick.html for math
Input:
- f_x, function of which we want the Jacobian
- theta, variable with respect to which we want Jacobian
- v, vector that we want multiplied by the Jacobian
Returns:
- J @ v, without using n^2 space
'''
w = ch.ones_like(f_x, requires_grad=True)
JTw = vjp(f_x, theta, w)
return vjp(JTw, w, v)
def cg_solve(fvp_func, b, nsteps):
'''
Conjugate Gradients Algorithm
Solves Hx = b, where H is the Fisher matrix and b is known
Input:
- fvp_func, a callable function returning Fisher-vector product
- b, the RHS of the above
- nsteps, the number of steps on CG to take
Returns:
- An approximate solution x of Hx = b
'''
# Initialize the solution, residual, direction vectors
x = ch.zeros(b.size())
r = b.clone()
p = b.clone()
new_rnorm = ch.dot(r,r)
for _ in range(nsteps):
rnorm = new_rnorm
fvp = fvp_func(p)
alpha = rnorm / ch.dot(p, fvp)
x += alpha * p
r -= alpha * fvp
new_rnorm = ch.dot(r, r)
ratio = new_rnorm / rnorm
p = r + ratio * p
return x
def backtracking_line_search(f, x, expected_improve_rate,
num_tries=10, accept_ratio=.1):
'''
Backtracking Line Search
Inputs:
- f, function for improvement of the objective
- x, biggest step to try (successively halved)
- num_tries, number of times to try halving x before giving up
- accept_ratio, how much of the expected improve rate we have to
improve by
'''
# f gives improvement
for i in range(num_tries):
scaling = 2**(-i)
scaled = x * scaling
improve = f(scaled)
expected_improve = expected_improve_rate * scaling
if improve/expected_improve > accept_ratio and improve > 0:
print("We good! %f" % (scaling,))
return scaled
return 0.
########################
### NORMALIZATION HELPERS:
# RunningStat, ZFilter, StateWithTime
########################
class RunningStat(object):
'''
Keeps track of first and second moments (mean and variance)
of a streaming time series.
Taken from https://github.com/joschu/modular_rl
Math in http://www.johndcook.com/blog/standard_deviation/
'''
def __init__(self, shape):
self._n = 0
self._M = np.zeros(shape)
self._S = np.zeros(shape)
def push(self, x):
x = np.asarray(x)
assert x.shape == self._M.shape
self._n += 1
if self._n == 1:
self._M[...] = x
else:
oldM = self._M.copy()
self._M[...] = oldM + (x - oldM) / self._n
self._S[...] = self._S + (x - oldM) * (x - self._M)
@property
def n(self):
return self._n
@property
def mean(self):
return self._M
@property
def var(self):
return self._S / (self._n - 1) if self._n > 1 else np.square(self._M)
@property
def std(self):
return np.sqrt(self.var)
@property
def shape(self):
return self._M.shape
class Identity:
'''
A convenience class which simply implements __call__
as the identity function
'''
def __call__(self, x, *args, **kwargs):
return x
def reset(self):
pass
class ConstantFilter:
def __init__(self, prev_filter, constant):
self.prev_filter = prev_filter
self.constant = constant
def __call__(self, x, **kwargs):
x = self.prev_filter(x, **kwargs)
return x * self.constant
def reset(self):
self.prev_filter.reset()
class RewardFilter:
"""
Incorrect reward normalization [copied from OAI code]
update return
divide reward by std(return) without subtracting and adding back mean
"""
def __init__(self, prev_filter, shape, gamma, clip=None):
assert shape is not None
self.gamma = gamma
self.prev_filter = prev_filter
self.rs = RunningStat(shape)
self.ret = np.zeros(shape)
self.clip = clip
def __call__(self, x, **kwargs):
x = self.prev_filter(x, **kwargs)
self.ret = self.ret * self.gamma + x
self.rs.push(self.ret)
x = x / (self.rs.std + 1e-8)
if self.clip:
x = np.clip(x, -self.clip, self.clip)
return x
def reset(self):
self.ret = np.zeros_like(self.ret)
self.prev_filter.reset()
class ZFilter:
"""
y = (x-mean)/std
using running estimates of mean,std
"""
def __init__(self, prev_filter, shape, center=True, scale=True, clip=None):
assert shape is not None
self.center = center
self.scale = scale
self.clip = clip
self.rs = RunningStat(shape)
self.prev_filter = prev_filter
def __call__(self, x, **kwargs):
x = self.prev_filter(x, **kwargs)
self.rs.push(x)
if self.center:
x = x - self.rs.mean
if self.scale:
if self.center:
x = x / (self.rs.std + 1e-8)
else:
diff = x - self.rs.mean
diff = diff/(self.rs.std + 1e-8)
x = diff + self.rs.mean
if self.clip:
x = np.clip(x, -self.clip, self.clip)
return x
def reset(self):
self.prev_filter.reset()
class StateWithTime:
'''
Keeps track of the time t in an environment, and
adds t/T as a dimension to the state, where T is the
time horizon, given at initialization.
'''
def __init__(self, prev_filter, horizon):
self.counter = 0
self.horizon = horizon
self.prev_filter = prev_filter
def __call__(self, x, reset=False, count=True, **kwargs):
x = self.prev_filter(x, **kwargs)
self.counter += 1 if count else 0
self.counter = 0 if reset else self.counter
return np.array(list(x) + [self.counter/self.horizon,])
def reset(self):
self.prev_filter.reset()
# TODO: redo this in a not fucked way (ie using python language features)
class Trajectories:
def __init__(self, states=None, rewards=None, returns=None, not_dones=None,
actions=None, action_log_probs=None, advantages=None,
unrolled=False, values=None):
self.states = states
self.rewards = rewards
self.returns = returns
self.values = values
self.not_dones = not_dones
self.actions = actions
self.action_log_probs = action_log_probs
self.advantages = advantages
self.unrolled = unrolled
# this is disgusting and we should fix it
if states is not None:
num_saps = states.shape[0]
assert states is None or states.shape[0] == num_saps
assert rewards is None or rewards.shape[0] == num_saps
assert returns is None or returns.shape[0] == num_saps
assert values is None or values.shape[0] == num_saps
assert not_dones is None or not_dones.shape[0] == num_saps
assert actions is None or actions.shape[0] == num_saps
assert action_log_probs is None or action_log_probs.shape[0] == num_saps
assert advantages is None or advantages.shape[0] == num_saps
self.size = num_saps
def unroll(self):
assert not self.unrolled
return self.tensor_op(unroll, should_wrap=False)
def tensor_op(self, lam, should_wrap=True):
if should_wrap:
def op(*args):
return [lam(v) for v in args]
else:
op = lam
tt = op(self.states, self.rewards, self.returns, self.not_dones)
tt2 = op(self.actions, self.action_log_probs, self.advantages)
values, = op(self.values)
ts = Trajectories(states=tt[0], rewards=tt[1], returns=tt[2],
not_dones=tt[3], actions=tt2[0],
action_log_probs=tt2[1], advantages=tt2[2],
values=values, unrolled=True)
return ts
########################
### NEURAL NETWORK HELPERS:
# orthogonal_init
########################
def orthogonal_init(tensor, gain=1):
'''
Fills the input `Tensor` using the orthogonal initialization scheme from OpenAI
Args:
tensor: an n-dimensional `torch.Tensor`, where :math:`n \geq 2`
gain: optional scaling factor
Examples:
>>> w = torch.empty(3, 5)
>>> orthogonal_init(w)
'''
if tensor.ndimension() < 2:
raise ValueError("Only tensors with 2 or more dimensions are supported")
rows = tensor.size(0)
cols = tensor[0].numel()
flattened = tensor.new(rows, cols).normal_(0, 1)
if rows < cols:
flattened.t_()
# Compute the qr factorization
u, s, v = ch.svd(flattened, some=True)
if rows < cols:
u.t_()
q = u if tuple(u.shape) == (rows, cols) else v
with ch.no_grad():
tensor.view_as(q).copy_(q)
tensor.mul_(gain)
return tensor
def add_gaussian_noise(reward, std):
if std > 0.0:
reward += np.random.normal(loc=0.0, scale=std)
return reward
def add_uniform_noise(reward, p, high=1., low=-1.):
random_mask = np.random.binomial(1, p)
random_reward = np.random.uniform(low, high)
return random_mask * random_reward + (1 - random_mask) * reward
def add_sparsity_noise(reward, p):
random_mask = np.random.binomial(1, p)
return (1 - random_mask) * reward
| [] |
2024-01-10 | lastmile-ai/aiconfig | python~src~aiconfig~schema.py | import warnings
from typing import Any, Dict, List, Literal, Optional, Union
from aiconfig.util.config_utils import extract_override_settings
from pydantic import BaseModel
# Pydantic doesn't handle circular type references very well, TODO: handle this better than defining as type Any
# JSONObject represents a JSON object as a dictionary with string keys and JSONValue values
JSONObject = Dict[str, Any]
# InferenceSettings represents settings for model inference as a JSON object
InferenceSettings = JSONObject
class OutputDataWithStringValue(BaseModel):
"""
This represents the output content that is storied as a string, but we use
both the `kind` field here and the `mime_type` in ExecuteResult to convert
the string into the output format we want.
"""
kind: Literal["file_uri", "base64"]
value: str
class FunctionCallData(BaseModel):
"""
Function call data reprsenting a single function call
"""
arguments: str
"""
The arguments to call the function with, as generated by the model in JSON
format. Note that the model does not always generate valid JSON, and may
hallucinate parameters not defined by your function schema. Validate the
arguments in your code before calling your function.
"""
name: str
"""The name of the function to call."""
class Config:
extra = "allow"
class ToolCallData(BaseModel):
"""
Generic tool call data
"""
id: Optional[str]
"""
Note: the `id` field is non-optional in OpenAI but we're keeping
it optional for practical purposes. See:
https://github.com/lastmile-ai/aiconfig/pull/636#discussion_r1437087325
"""
function: FunctionCallData
type: Literal["function"]
class OutputDataWithToolCallsValue(BaseModel):
"""
This based off of ChatCompletionMessageToolCall from openai.types.chat
and is used for general tool calls.
"""
kind: Literal["tool_calls"]
value: List[ToolCallData]
OutputDataWithValue = Union[
OutputDataWithStringValue,
OutputDataWithToolCallsValue,
]
class ExecuteResult(BaseModel):
"""
ExecuteResult represents the result of executing a prompt.
"""
# Type of output
output_type: Literal["execute_result"]
# nth choice.
execution_count: Union[int, None] = None
# The result of the executing prompt.
data: Union[OutputDataWithValue, str, Any]
# The MIME type of the result. If not specified, the MIME type will be assumed to be plain text.
mime_type: Optional[str] = None
# Output metadata
metadata: Dict[str, Any]
class Error(BaseModel):
"""
Error represents an error that occurred while executing a prompt.
"""
# Type of output
output_type: Literal["error"]
# The name of the error
ename: str
# The value, or message, of the error
evalue: str
# The error's traceback, represented as an array of strings
traceback: List[str]
# Output can be one of ExecuteResult, ExecuteResult, DisplayData, Stream, or Error
Output = Union[ExecuteResult, Error]
class ModelMetadata(BaseModel):
# The ID of the model to use.
name: str
# Model Inference settings that apply to this prompt.
settings: Optional[InferenceSettings] = {}
class PromptMetadata(BaseModel):
# Model name/settings that apply to this prompt
# These settings override any global model settings that may have been defined in the AIConfig metadata.
# If this is a string, it is assumed to be the model name.
# Ift this is undefined, the default model specified in the default_model_property will be used for this Prompt.
model: Optional[Union[ModelMetadata, str]] = None
# Tags for this prompt. Tags must be unique, and must not contain commas.
tags: Optional[List[str]] = None
# Parameter definitions that are accessible to this prompt
parameters: Optional[JSONObject] = {}
class Config:
extra = "allow"
class Attachment(BaseModel):
"""
Attachment used to pass data in PromptInput for non-text inputs (ex: image, audio)
"""
# The data representing the attachment
data: Any
# The MIME type of the result. If not specified, the MIME type will be assumed to be text/plain
mime_type: Optional[str] = None
# Output metadata
metadata: Optional[Dict[str, Any]] = None
class PromptInput(BaseModel):
# Attachments can be used to pass in non-text inputs (ex: image, audio)
attachments: Optional[List[Attachment]] = None
# Freeform data for the overall prompt input (ex: document answering question
# requires both images (attachments) and question (data))
data: Optional[Any] = None
class Config:
extra = "allow"
class Prompt(BaseModel):
# A unique identifier for the prompt. This is used to reference the prompt in other parts of the AIConfig (such as other prompts)
name: str
# The prompt string, or a more complex prompt object
input: Union[str, PromptInput]
# Metadata for the prompt
metadata: Optional[PromptMetadata] = None
# Execution, display, or stream outputs (currently a work-in-progress)
outputs: Optional[List[Output]] = []
class Config:
extra = "allow"
def add_output(self, output: Output):
self.outputs.append(output)
def get_raw_prompt_from_config(self):
"""Gets raw prompt from config"""
if isinstance(self.input, str):
return self.input
else:
return self.input.prompt
class SchemaVersion(BaseModel):
major: int
minor: int
class ConfigMetadata(BaseModel):
# Parameter definitions that are accessible to all prompts in this AIConfig.
# These parameters can be referenced in the prompts using handlebars syntax.
# For more information, see https://handlebarsjs.com/guide/#basic-usage.
parameters: Optional[JSONObject] = {}
# Globally defined model settings. Any prompts that use these models will have these settings applied by default,
# unless they override them with their own model settings.
models: Optional[Dict[str, InferenceSettings]] = {}
# Default model to use for prompts that do not specify a model.
default_model: Optional[str] = None
# Model ID to ModelParser ID mapping.
# This is useful if you want to use a custom ModelParser for a model, or if a single ModelParser can handle multiple models.
# Key is Model ID , Value is ModelParserID
model_parsers: Optional[Dict[str, str]] = None
class Config:
extra = "allow"
class AIConfig(BaseModel):
"""
AIConfig schema, latest version. For older versions, see AIConfigV*
"""
# Friendly name descriptor for the AIConfig. Could default to the filename if not specified.
name: str
# The version of the AIConfig schema
schema_version: Union[SchemaVersion, Literal["v1", "latest"]] = "latest"
# Root-level metadata that applies to the entire AIConfig
metadata: ConfigMetadata
# Description of the AIConfig. If you have a collection of different AIConfigs, this may be used for dynamic prompt routing.
description: Optional[str] = ""
# An Array of prompts that make up the AIConfig
prompts: List[Prompt] = []
# An index of prompts by name, constructed during post-initialization.
prompt_index: Dict[str, Prompt] = {}
class Config:
extra = "allow"
def model_post_init(self, __context):
"""Post init hook for model"""
self.prompt_index = {prompt.name: prompt for prompt in self.prompts}
def set_name(self, name: str):
"""
Sets the name of the AIConfig
Args:
name (str): The name of the AIConfig
"""
self.name = name
def set_description(self, description: str):
"""
Sets the description of the AIConfig
Args:
description (str): The description of the AIConfig
"""
self.description = description
def add_model(self, model_name: str, model_settings: InferenceSettings):
"""
Adds model settings to config level metadata
"""
if model_name in self.metadata.models:
raise Exception(f"Model '{model_name}' already exists. Use `update_model()`.")
self.metadata.models[model_name] = model_settings
def delete_model(self, model_name: str):
"""
Deletes model settings from config level metadata
"""
if model_name not in self.metadata.models:
raise Exception(f"Model '{model_name}' does not exist.")
del self.metadata.models[model_name]
def get_model_name(self, prompt: Union[str, Prompt]) -> str:
"""
Extracts the model ID from the prompt.
Args:
prompt: Either the name of the prompt or a prompt object.
Returns:
str: Name of the model used by the prompt.
"""
if isinstance(prompt, str):
prompt = self.prompt_index[prompt]
if not prompt:
raise Exception(f"Prompt '{prompt}' not found in config.")
if not prompt.metadata or not prompt.metadata.model:
# If the prompt doesn't have a model, use the default model
default_model = self.metadata.default_model
if not default_model:
raise Exception(f"No model specified in AIConfig metadata, prompt {prompt.name} does not specify a model.")
return default_model
if isinstance(prompt.metadata.model, str):
return prompt.metadata.model
else:
# Expect a ModelMetadata object
return prompt.metadata.model.name
def set_default_model(self, model_name: Union[str, None]):
"""
Sets the model to use for all prompts by default in the AIConfig. Set to None to delete the default model.
Args:
model_name (str): The name of the default model.
"""
self.metadata.default_model = model_name
def get_default_model(self) -> Union[str, None]:
"""
Returns the default model for the AIConfig.
"""
return self.metadata.default_model
def set_model_parser(self, model_name: str, model_parser_id: Union[str, None]):
"""
Adds a model name : model parser ID mapping to the AIConfig metadata. This model parser will be used to parse Promps in the AIConfig that use the given model.
Args:
model_name (str): The name of the model to set the parser.
model_parser_id (str): The ID of the model parser to use for the mode. If None, the model parser for the model will be removed.
"""
if not self.metadata.model_parsers:
self.metadata.model_parsers = {}
self.metadata.model_parsers[model_name] = model_parser_id
def get_metadata(self, prompt_name: Optional[str] = None):
"""
Gets the metadata for a prompt. If no prompt is specified, gets the global metadata.
Args:
prompt_name (str, optional): The name of the prompt. Defaults to None.
Returns:
PromptMetadata: The metadata for the prompt.
"""
if prompt_name:
if prompt_name not in self.prompt_index:
raise IndexError(f"Prompt '{prompt_name}' not found in config.")
return self.prompt_index[prompt_name].metadata
else:
return self.metadata
def get_parameters(
self,
prompt_or_prompt_name: Optional[str | Prompt] = None,
) -> JSONObject:
"""
Get the parameters for a prompt, using the global parameters if
needed.
Args:
prompt_or_prompt_name Optional[str | Prompt]: The name of the
prompt or the prompt object. If not specified, use the
global parameters.
"""
prompt = prompt_or_prompt_name
if isinstance(prompt_or_prompt_name, str):
if prompt_or_prompt_name not in self.prompt_index:
raise IndexError(f"Prompt '{prompt_or_prompt_name}' not found in config, available prompts are:\n {list(self.prompt_index.keys())}")
prompt = self.prompt_index[prompt_or_prompt_name]
assert prompt is None or isinstance(prompt, Prompt)
if prompt is None or not prompt.metadata or not prompt.metadata.parameters:
return self.get_global_parameters()
return self.get_prompt_parameters(prompt)
# pylint: disable=W0102
def get_global_parameters(
self,
default_return_value: JSONObject = {},
) -> JSONObject:
"""
Get the global parameters for the AIConfig. If they're not defined,
return a default value ({} unless overridden)
Args:
default_return_value JSONObject - Default value to return if
global parameters are not defined.
"""
return self._get_global_parameters_exact() or default_return_value
# pylint: enable=W0102
def _get_global_parameters_exact(self) -> JSONObject | None:
"""
Get the global parameters for the AIConfig. This should be the
the explicit value (ie: if parameters is None, return None, not {})
"""
return self.metadata.parameters
# pylint: disable=W0102
def get_prompt_parameters(
self,
prompt: Prompt,
default_return_value: JSONObject = {},
) -> JSONObject:
"""
Get the prompt's local parameters. If they're not defined,
return a default value ({} unless overridden)
Args:
default_return_value JSONObject - Default value to return if
prompt parameters are not defined.
"""
return self._get_prompt_parameters_exact(prompt) or default_return_value
# pylint: enable=W0102
def _get_prompt_parameters_exact(
self,
prompt: Prompt,
) -> JSONObject | None:
"""
Get the global parameters for the AIConfig. This should be the
the explicit value (ie: if parameters is None, return None, not {})
"""
if not prompt.metadata:
return prompt.metadata
return prompt.metadata.parameters
def set_parameter(self, parameter_name: str, parameter_value: Union[str, JSONObject], prompt_name: Optional[str] = None):
"""
Sets a parameter in the AI configuration metadata. If a prompt_name
is specified, it adds the parameter to a specific prompt's metadata
in the AI configuration. Otherwise, it adds the parameter to the
global metadata.
Args:
parameter_name (str): The name of the parameter.
parameter_value: The value of the parameter. It can be more than
just a string. It can be a string or a JSON object. For
example:
{
person: {
firstname: "john",
lastname: "smith",
},
}
Using the parameter in a prompt with handlebars syntax would
look like this:
"{{person.firstname}} {{person.lastname}}"
prompt_name (str, optional): The name of the prompt to add the
parameter to. Defaults to None.
"""
target_metadata = self.get_metadata(prompt_name)
if not target_metadata:
# Technically this check is not needed since the metadata is a
# required field in Config while it is not required in Prompt.
# Therefore, if it's not defined, we can infer that it should
# be a PromptMetadata type, but this is just good robustness
# in case we ever change our schema in the future
if prompt_name:
prompt = self.get_prompt(prompt_name)
# check next line not needed since it's already assumed
# we got here because target_metadata is None, just being
# extra safe
if not prompt.metadata:
target_metadata = PromptMetadata(parameters={})
prompt.metadata = target_metadata
else:
if not self.metadata:
target_metadata = ConfigMetadata()
self.metadata = target_metadata
assert target_metadata is not None
if target_metadata.parameters is None:
target_metadata.parameters = {}
target_metadata.parameters[parameter_name] = parameter_value
def set_parameters(self, parameters: JSONObject, prompt_name: Optional[str] = None) -> None:
"""
Set the entire parameters dict for either a prompt (if specified)
or the AIConfig (if prompt is not specified). It overwrites whatever
was previously stored as parameters for the prompt or AIConfig.
Args:
parameters (JSONObject): The entire set of parameters. Ex:
{
"city": "New York",
"sort_by": "geographical location",
}
In this example, we call `set_parameter` twice:
1) set_parameter("city", "New York", prompt_name)
2) set_parameter("sort_by", "geographical location", prompt_name)
prompt_name (str, optional): The name of the prompt to add the
parameters dict to. If none is provided, we update the
AIConfig-level parameters instead
"""
# Clear all existing parameters before setting new ones
parameter_names_to_delete = []
if prompt_name:
prompt = self.get_prompt(prompt_name)
parameter_names_to_delete = list(self.get_prompt_parameters(prompt).keys())
else:
parameter_names_to_delete = list(self.get_global_parameters().keys())
for parameter_name in parameter_names_to_delete:
self.delete_parameter(parameter_name, prompt_name)
for parameter_name, parameter_value in parameters.items():
self.set_parameter(parameter_name, parameter_value, prompt_name)
def update_parameter(
self,
parameter_name: str,
parameter_value: str,
prompt_name: Optional[str] = None,
):
"""
Updates a parameter in the AI configuration metadata. If a prompt_name is specified, it updates the parameter
in a specific prompt's metadata in the AI configuration. Otherwise, it updates the parameter in the global
metadata. If the parameter doesn't exist, it adds the parameter.
Args:
parameter_name (str): The name of the parameter.
parameter_value (str): The value of the parameter.
prompt_name (str, optional): The name of the prompt (if applicable). Defaults to None.
"""
target_metadata = self.get_metadata(prompt_name)
target_metadata.parameters[parameter_name] = parameter_value
def delete_parameter(self, parameter_name, prompt_name: Optional[str] = None):
"""
Removes a parameter from the AI configuration metadata. If a prompt_name is specified, it removes the parameter
from a particular prompt's metadata in the AI configuration. Else, it removes the parameter from the global
metadata. If the parameter doesn't exist, do nothing.
Args:
parameter_name (str): The name of the parameter.
prompt_name (str, optional): The name of the prompt to remove the parameter from. Defaults to None.
"""
target_metadata = self.get_metadata(prompt_name)
# Remove the parameter if it exists
if parameter_name in target_metadata.parameters:
del target_metadata.parameters[parameter_name]
else:
scope_suffix = f"prompt '{prompt_name}'" if prompt_name is not None else "current AIConfig-scoped metadata"
raise KeyError(f"Parameter '{parameter_name}' does not exist for {scope_suffix}.")
def get_prompt(self, prompt_name: str) -> Prompt:
"""
Gets a prompt byname from the aiconfig.
Args:
prompt_name (str): The name of the prompt to get.
Returns:
Prompt: The prompt object.
"""
if prompt_name not in self.prompt_index:
raise IndexError("Prompt '{}' not found in config, available prompts are:\n {}".format(prompt_name, list(self.prompt_index.keys())))
return self.prompt_index[prompt_name]
def add_prompt(self, prompt_name: str, prompt_data: Prompt, index: int | None = None):
"""
Adds a prompt to the .aiconfig.
Args:
prompt_name (str): The name of the prompt to add.
prompt_data (Prompt): The prompt object containing the prompt data.
"""
if prompt_name is None:
prompt_name = prompt_data.name
if prompt_name in self.prompt_index:
raise Exception("Prompt with name {} already exists. Use`update_prompt()`".format(prompt_name))
prompt_data.name = prompt_name
self.prompt_index[prompt_name] = prompt_data
if index is None:
self.prompts.append(prompt_data)
else:
self.prompts.insert(index, prompt_data)
def update_prompt(self, prompt_name: str, prompt_data: Prompt):
"""
Given a prompt name and a prompt object, updates the prompt in the .aiconfig.
Args:
prompt_name (str): The name of the prompt to update.
prompt_data (Prompt): The prompt object containing the updated prompt data.
"""
if prompt_name not in self.prompt_index:
raise IndexError("Prompt '{}' not found in config, available prompts are:\n {}".format(prompt_name, list(self.prompt_index.keys())))
self.prompt_index[prompt_name] = prompt_data
# update prompt list
for i, prompt in enumerate(self.prompts):
if prompt.name == prompt_name:
self.prompts[i] = prompt_data
del self.prompt_index[prompt_name]
self.prompt_index[prompt_data.name] = prompt_data
break
def delete_prompt(self, prompt_name: str):
"""
Given a prompt name, deletes the prompt from the .aiconfig.
Args:
prompt_name (str): The name of the prompt to delete.
"""
if prompt_name not in self.prompt_index:
raise IndexError("Prompt '{}' not found in config, available prompts are:\n {}".format(prompt_name, list(self.prompt_index.keys())))
del self.prompt_index[prompt_name]
# remove from prompt list
self.prompts = [prompt for prompt in self.prompts if prompt.name != prompt_name]
def get_model_metadata(self, inference_settings: InferenceSettings, model_id: str) -> ModelMetadata:
"""
Generate a model metadata object based on the provided inference settings
This function takes the inference settings and the model ID and generates a ModelMetadata object.
Args:
inference_settings (InferenceSettings): The inference settings.
model_id (str): The model id.
Returns:
ModelMetadata: The model metadata.
"""
overriden_settings = extract_override_settings(self, inference_settings, model_id)
if not overriden_settings:
model_metadata = ModelMetadata(**{"name": model_id})
else:
model_metadata = ModelMetadata(**{"name": model_id, "settings": overriden_settings})
return model_metadata
# TODO (rossdan): If we pass in a new model under ModelMetadata, but that model is
# not already registered to a model parser, we should throw an error and instruct
# user how to update this in their code or AIConfig. OR we should allow a
# model_parser_id field to be passed into ModelMetadata and (somehow) find the ID
# that matches this class and do this automatically with the
# `update_model_parser_registry_with_config_runtime`` function
# Tracked in https://github.com/lastmile-ai/aiconfig/issues/503
def update_model(self, model_name: Optional[str] = None, settings: Optional[InferenceSettings] = None, prompt_name: Optional[str] = None):
"""
Updates model name and/or settings at the prompt (if specified) or AIConfig level.
Args:
name (str): The model name to update.
- If None: keep existing name for prompt; error for AIConfig.
settings (dict): The model settings to update.
- If None: keep existing settings for prompt; keep existing
for AIConfig (if model name exists) or create empty settings
prompt_name (Optional[str]): If specified, the model updatd will
only be applied to the prompt with the given prompt_name.
Examples:
update_model("gpt3", None, "my_prompt")
--> updates "my_prompt" to use "gpt3" with existing settings
update_model("gpt3", None)
--> updates aiconfig model key "gpt3" to use existing
settings (empty if model was not previously defined)
update_model(None, {}, "my_prompt")
--> updates "my_prompt" to use same model with empty settings
update_model(None, {})
--> errors because AiConfig needs a name to know which
model to update
update_model(None, None, "my_prompt")
--> errors becasue no model name or settings provided
"""
if model_name is None and settings is None:
raise ValueError("Cannot update model. Either model name or model settings must be specified.")
if model_name is None and prompt_name is None: # Only settings param is set
raise ValueError(
"""
Cannot update model. There are two things you are trying: \
1) Update the settings of a prompt \
Fix: You must pass in a `prompt_name` argument \
2) Update the settings at the AIConfig-level \
Fix: You must pass in a `name` for the model you wish \
to update. AIConfig-level can have multiple models, \
so without a model name, we don't know which model \
to set the settings for."
"""
)
if prompt_name is not None:
# We first update the model name, then update the model settings
if model_name is not None:
self._update_model_name_for_prompt(model_name, prompt_name)
if settings is not None:
self._update_model_settings_for_prompt(settings, prompt_name)
else:
if model_name is not None:
self._update_model_for_aiconfig(model_name, settings)
def _update_model_name_for_prompt(self, model_name: str, prompt_name: str):
"""
Updates model name at the prompt. To keep things simplified, at the
prompt level we are only updating the model name, preserving existing
settings if they exist, or setting the settings to empty dict. We
will update the settings in a follow up
`_update_model_settings_for_prompt()` call. The reason we do is
to delegate the `settings is None` check inside of `update_model()`
instead of making this function more complicated.
If model is not already specified for a prompt, we err on the side of
passing in the entire ModelMetadata into the prompt, even if there
are no settings, just becuase this makes it easier to manage for
future writes in case we want to add model settings later
(see `_update_model_settings`).
Args:
model_name (str): Model name to set
prompt_name (str): The name of the prompt we want to update
"""
prompt = self.get_prompt(prompt_name)
if not prompt:
raise IndexError(
f"Cannot update model name of '{model_name}' for prompt '{prompt_name}'. Prompt {prompt_name} does not exist in AIConfig."
)
if prompt.metadata is None:
model_metadata = ModelMetadata(name=model_name, settings={})
prompt.metadata = PromptMetadata(model=model_metadata)
elif prompt.metadata.model is None or isinstance(prompt.metadata.model, str):
prompt.metadata.model = ModelMetadata(name=model_name, settings={})
else:
# prompt.metadata.model is a ModelMetadata object
model_settings: InferenceSettings = prompt.metadata.model.settings or {}
prompt.metadata.model = ModelMetadata(name=model_name, settings=model_settings)
def _update_model_settings_for_prompt(self, settings: InferenceSettings, prompt_name: str):
"""
Updates model name at the prompt level. We do not update at the
AIConfig level because an AIConfig can have multiple models, so
without the model name, we don't know which model to update.
Args:
settings (InferenceSettings): Model settings to set
prompt_name (str): The name of the prompt we want to update.
"""
prompt = self.get_prompt(prompt_name)
if not prompt:
raise IndexError(f"Cannot update model settings for prompt '{prompt_name}'. Prompt '{prompt_name}' does not exist in AIConfig.")
metadata_error_message = f"""
Cannot update model settings for prompt '{prompt_name}' because it does not \
have a model name set for it. Please be sure that a model is set for this \
prompt. You can do this be calling `update_model()` and passing a model name \
as an argument.
"""
if prompt.metadata is None or prompt.metadata.model is None:
raise ValueError(metadata_error_message)
if isinstance(prompt.metadata.model, str):
model_name = prompt.metadata.model
prompt.metadata.model = ModelMetadata(name=model_name, settings=settings)
else:
prompt.metadata.model.settings = settings
def _update_model_for_aiconfig(self, model_name: str, settings: Union[InferenceSettings, None], prompt_name: Optional[str] = None):
"""
Updates model name at the AIConfig level.
Args:
model_name (str): Model name to set
settings (Optional[InferenceSettings]): Model settings to set
For AI-Config level settings we don't know the old model name
so can't grab the older settings. If this is None, we will:
Case 1: Model name already exists at AIConfig level
--> Preserve the existing settings
Case 2: Model name is new at AIConfig level
--> Create an empty dict
"""
warning_message = f"""
No prompt name was given to update the model name to '{model_name}'. We are \
assuming this is intentional and are therefore updating the \
AIConfig-level settings. If this is a mistake, please rerun the \
`update_model` function with a specified `prompt_name` argument.
"""
warnings.warn(warning_message)
if self.metadata.models is None:
model_settings = settings or {}
self.metadata.models = {model_name: model_settings}
else:
# If the model name already exists and settings is None,
# this is essentially a no-op since we are preserving
# existing settings for that model name
model_settings = settings or self.metadata.models.get(model_name, {})
self.metadata.models[model_name] = model_settings
def set_metadata(self, key: str, value: Any, prompt_name: Optional[str] = None):
"""
Sets a metadata property in the AIConfig
Args:
key (str): The Metadata key.
value (str): Metadata value. Must be a JSON-serializable object (ie dict, list, str, etc).
prompt_name (str, optional): If specified, the metadata will only be updated for the prompt with the given name
"""
if prompt_name:
prompt = self.get_prompt(prompt_name)
if not prompt:
raise IndexError(f"Cannot set metadata property '{key}' for prompt {prompt_name}. Prompt {prompt_name} does not exist in AIConfig.")
setattr(prompt.metadata, key, value)
else:
setattr(self.metadata, key, value)
def delete_metadata(self, key: str, prompt_name: Optional[str] = None):
"""
Removes a metadata property in the AIConfig
Args:
key (str): The Metadata key.
prompt_name (str, optional): If specified, the metadata will only be deleted for the prompt with the given name
"""
if prompt_name:
prompt = self.get_prompt(prompt_name)
if not prompt:
raise IndexError(f"Cannot delete metadata. Prompt '{prompt_name}' not found in config.")
if hasattr(prompt.metadata, key):
delattr(prompt.metadata, key)
else:
raise KeyError(f"Metadata '{key}' does not exist for Prompt {prompt_name}.")
else:
if hasattr(self.metadata, key):
delattr(self.metadata, key)
else:
raise KeyError(f"Metadata '{key}' does not exist in config.")
# TODO: rename _get_metadata to get_metadata
def add_output(self, prompt_name: str, output: Output, overwrite: bool = False):
"""
Add an output to the prompt with the given name in the AIConfig
Args:
prompt_name (str): The name of the prompt to add the output to.
output (Output): The output to add.
overwrite (bool, optional): Overwrites the existing output if True. Otherwise appends the output to the prompt's output list. Defaults to False.
"""
prompt = self.get_prompt(prompt_name)
if not prompt:
raise IndexError(f"Cannot add output. Prompt '{prompt_name}' not found in config.")
if not output:
raise ValueError(f"Cannot add output to prompt '{prompt_name}'. Output is not defined.")
if overwrite:
prompt.outputs = [output]
else:
prompt.outputs.append(output)
def add_outputs(self, prompt_name: str, outputs: List[Output], overwrite: bool = False):
"""
Add multiple outputs to the prompt with the given name in the AIConfig
Args:
prompt_name (str): The name of the prompt to add the outputs to.
outputs (List[Output]): List of outputs to add.
overwrite (bool, optional): Overwrites the existing output if True. Otherwise appends the outputs to the prompt's output list. Defaults to False.
"""
prompt = self.get_prompt(prompt_name)
if not prompt:
raise IndexError(f"Cannot add outputs. Prompt '{prompt_name}' not found in config.")
if not outputs:
raise ValueError(f"Cannot add outputs. No outputs provided for prompt '{prompt_name}'.")
if overwrite:
prompt.outputs = outputs
else:
prompt.outputs.extend(outputs)
def delete_output(self, prompt_name: str):
"""
Deletes the outputs for the prompt with the given prompt_name.
Args:
prompt_name (str): The name of the prompt to delete the outputs for.
Returns:
List[Output]: The outputs that were deleted.
"""
prompt = self.get_prompt(prompt_name)
existing_outputs = prompt.outputs
prompt.outputs = []
return existing_outputs
def get_latest_output(self, prompt: str | Prompt):
"""
Gets the latest output associated with a prompt.
Args:
prompt (str|Prompt): The name of the prompt or the prompt object.
"""
if isinstance(prompt, str):
prompt = self.prompt_index[prompt]
if not prompt.outputs:
return None
return prompt.outputs[-1]
def get_output_text(self, prompt: str | Prompt):
"""
Gets the string representing the output from a prompt.
Args:
prompt (str|Prompt): The name of the prompt or the prompt object.
"""
"""
Library Helpers
"""
def get_global_settings(self, model_name: str):
"""
Gets the global settings for a model.
Args:
model_name (str): The name of the model.
Returns:
dict: The global settings for the model with the given name. Returns an empty dict if no settings are defined.
"""
return self.metadata.models.get(model_name, {})
AIConfigV1 = AIConfig
| [
"{}"
] |
2024-01-10 | lastmile-ai/aiconfig | python~src~aiconfig~eval~metrics.py | import json
import sys
from abc import abstractmethod
from dataclasses import dataclass
from functools import partial, total_ordering
from typing import Any, Callable, Generic, Protocol, Type
import lastmile_utils.lib.core.api as core_utils
import nltk
import pandas as pd
from aiconfig.eval import common
from aiconfig.eval.openai import OpenAIChatCompletionCreate, default_openai_chat_completion_create, make_fn_completion_text_to_serialized_json
from nltk.sentiment.vader import SentimentIntensityAnalyzer as NLTKSentimentIntensityAnalyzer
from result import Err, Ok, Result
@dataclass(frozen=True)
class Metric(Generic[common.T_Evaluable, common.T_MetricValue]):
"""See metrics.py for examples."""
evaluation_fn: common.EvaluationFunction[common.T_Evaluable, common.T_MetricValue]
metric_metadata: common.EvaluationMetricMetadata[common.T_Evaluable, common.T_MetricValue]
async def __call__(self, datum: common.T_Evaluable) -> common.T_MetricValue:
"""
For convenience, make a Metric callable.
Similar to torch Module `forward()`.
"""
return await self.evaluation_fn(datum)
def _check_substring(
output_datum: str,
substring: str,
#
case_sensitive: bool,
) -> bool:
if case_sensitive:
return substring in output_datum
else:
return substring.lower() in output_datum.lower()
async def _calculate_brevity(datum: str) -> int:
if len(datum) == 0:
raise ValueError("Brevity is meaningless for empty string.")
return len(datum)
@dataclass(frozen=True)
class TextSentimentScores(common.CustomMetricValue):
mapping: dict[str, float]
pos: float
neg: float
neu: float
compound: float
highest: str
@total_ordering
@dataclass(frozen=True, eq=False)
class TextOverallPositiveSentiment(common.CustomMetricValue):
"""Compare by total positive sentiment: positive - negative"""
pos: float
neg: float
def __eq__(self, other: Any) -> bool:
"""Overrides the default implementation"""
return isinstance(other, TextOverallPositiveSentiment) and (
self.pos,
self.neg,
) == (other.pos, other.neg)
def __lt__(self, other: Any) -> bool:
if not isinstance(other, TextOverallPositiveSentiment):
raise TypeError(f"Cannot compare TextPositiveSentimentScores with {type(other)}")
return self.pos - self.neg < other.pos - other.neg
class GetPolarityScores(Protocol):
@abstractmethod
def __call__(self, text: str) -> dict[str, float]:
pass
def _get_nltk_polarity_scores(text: str, model: str) -> dict[str, float]:
nltk.download(model, quiet=True) # type: ignore
return NLTKSentimentIntensityAnalyzer().polarity_scores(text) # type: ignore
def _get_sentiment_scores(output_datum: str, get_polarity_scores: GetPolarityScores) -> TextSentimentScores:
mapping: dict[str, float] = get_polarity_scores(output_datum)
highest: str = pd.Series(mapping).idxmax() # type: ignore
return TextSentimentScores(mapping=mapping, **mapping, highest=highest)
def make_get_sentiment_scores(get_polarity_scores: GetPolarityScores) -> common.EvaluationFunction[str, TextSentimentScores]:
async def _f(datum: str) -> TextSentimentScores:
return _get_sentiment_scores(datum, get_polarity_scores)
return _f
def make_get_sentiment_class(get_polarity_scores: GetPolarityScores) -> common.EvaluationFunction[str, str]:
async def _f(datum: str) -> str:
scores = _get_sentiment_scores(datum, get_polarity_scores)
return scores.highest
return _f
def make_get_overall_positive_sentiment(get_polarity_scores: GetPolarityScores) -> common.EvaluationFunction[str, TextOverallPositiveSentiment]:
async def _f(datum: str) -> TextOverallPositiveSentiment:
scores = _get_sentiment_scores(datum, get_polarity_scores)
return TextOverallPositiveSentiment(pos=scores.pos, neg=scores.neg)
return _f
def make_sentiment_scores_metric(
get_polarity_scores: GetPolarityScores,
make_evaluation_fn: Callable[[GetPolarityScores], common.EvaluationFunction[str, common.T_MetricValue]],
name: str,
description: str,
best_value: common.T_MetricValue | None = None,
worst_value: common.T_MetricValue | None = None,
) -> Metric[str, common.T_MetricValue]:
evaluation_fn: common.EvaluationFunction[str, common.T_MetricValue] = make_evaluation_fn(get_polarity_scores)
out: Metric[str, common.T_MetricValue] = Metric(
evaluation_fn=evaluation_fn,
metric_metadata=common.EvaluationMetricMetadata(
#
name=name,
description=description,
#
best_value=best_value,
worst_value=worst_value,
),
)
return out
def make_structured_llm_metric(
chat_completion_create: common.CompletionTextToSerializedJSON,
eval_llm_name: str,
pydantic_basemodel_type: Type[common.T_BaseModel],
metric_name: str,
metric_description: str,
field_descriptions: dict[str, str] = {},
) -> Metric[str, common.CustomMetricPydanticObject[common.T_BaseModel]]:
def _make_evaluation_fn(
basemodel_type: Type[common.T_BaseModel],
) -> common.EvaluationFunction[str, common.CustomMetricPydanticObject[common.T_BaseModel]]:
async def _evaluation_fn(datum: str) -> common.CustomMetricPydanticObject[common.T_BaseModel]:
resp = common.get_llm_structured_response(
input_text=datum,
chat_completion_create=chat_completion_create,
basemodel_type=basemodel_type,
)
# Intentional: unwrap and raise here to conform to the Metric interface.
match resp:
case Err(e):
raise ValueError(f"Error getting structured response: {e}")
case Ok(data):
return common.CustomMetricPydanticObject(data=data)
return _evaluation_fn
return Metric(
evaluation_fn=_make_evaluation_fn(pydantic_basemodel_type),
metric_metadata=common.EvaluationMetricMetadata(
name=metric_name,
description=metric_description,
extra_metadata=dict(
basemodel_type_name=pydantic_basemodel_type.__name__,
eval_llm_name=eval_llm_name,
field_descriptions_json=json.dumps(field_descriptions, sort_keys=True),
),
),
)
def _make_openai_structured_llm_metric_helper(
eval_llm_name: str,
pydantic_basemodel_type: Type[common.T_BaseModel],
metric_name: str,
metric_description: str,
field_descriptions: dict[str, str],
openai_chat_completion_create: OpenAIChatCompletionCreate | None = None,
) -> Result[Metric[str, common.CustomMetricPydanticObject[common.T_BaseModel]], str]:
schema = pydantic_basemodel_type.model_json_schema()
properties = schema["properties"]
required = schema["required"]
if not field_descriptions.keys() <= properties.keys():
return Err(
f"""
The following field_descriptions keys are not in the schema:
{set(field_descriptions.keys()) - set(properties.keys())}
"""
)
def _with_description(key: str, value: dict[str, str]) -> dict[str, str]:
if key in field_descriptions:
return core_utils.dict_union_allow_replace(value, {"description": field_descriptions[key]})
return value
properties = {k: _with_description(k, v) for k, v in properties.items()}
required = required or list(properties.keys())
openai_eval_llm_chat_completion_create: common.CompletionTextToSerializedJSON = make_fn_completion_text_to_serialized_json(
eval_llm_name=eval_llm_name,
properties=properties,
required=required,
openai_chat_completion_create=(openai_chat_completion_create or default_openai_chat_completion_create),
)
return Ok(
make_structured_llm_metric(
openai_eval_llm_chat_completion_create,
eval_llm_name=eval_llm_name,
pydantic_basemodel_type=pydantic_basemodel_type,
metric_name=metric_name,
metric_description=metric_description,
field_descriptions=field_descriptions,
)
)
## User interface
# 1. functions that return metrics intended to be called directly
def make_openai_structured_llm_metric(
eval_llm_name: str,
pydantic_basemodel_type: Type[common.T_BaseModel],
metric_name: str,
metric_description: str,
field_descriptions: dict[str, str] = {},
openai_chat_completion_create: OpenAIChatCompletionCreate | None = None,
) -> Metric[str, common.CustomMetricPydanticObject[common.T_BaseModel]]:
res_metric = _make_openai_structured_llm_metric_helper(
eval_llm_name=eval_llm_name,
pydantic_basemodel_type=pydantic_basemodel_type,
metric_name=metric_name,
metric_description=metric_description,
field_descriptions=field_descriptions,
openai_chat_completion_create=openai_chat_completion_create,
)
# User interface: unwrap and raise
match res_metric:
case Ok(metric):
return metric
case Err(e):
raise ValueError(f"Error making metric: {e}")
def substring_match(substring: str, case_sensitive: bool = True) -> Metric[str, bool]:
async def _fn(datum: str) -> bool:
return _check_substring(
output_datum=datum,
substring=substring,
case_sensitive=case_sensitive,
)
return Metric(
evaluation_fn=_fn,
metric_metadata=common.EvaluationMetricMetadata(
name="substring_match",
description="True (pass) if contains given substring",
best_value=True,
worst_value=False,
extra_metadata=dict(substring=substring, case_sensitive=case_sensitive),
),
)
# 2. literal metrics
brevity: Metric[str, int] = Metric(
evaluation_fn=_calculate_brevity,
metric_metadata=common.EvaluationMetricMetadata(
name="brevity",
description="Absolute text length",
best_value=1,
worst_value=sys.maxsize,
),
)
gpt3_5_text_ratings = make_openai_structured_llm_metric(
eval_llm_name="gpt-3.5-turbo-0613",
pydantic_basemodel_type=common.TextRatingsData,
metric_name="text_ratings",
metric_description="Text ratings",
field_descriptions=dict(
conciseness_rating="1 to 5 rating of conciseness",
conciseness_confidence="0 to 1.0 rating of confidence in conciseness rating",
conciseness_reasoning="reasoning behind the conciseness rating",
),
)
nltk_sentiment_scores_vader = make_sentiment_scores_metric(
get_polarity_scores=partial(_get_nltk_polarity_scores, model="vader_lexicon"),
make_evaluation_fn=make_get_sentiment_scores,
name="nltk_sentiment_scores_vader",
description="NLTK sentiment scores using Vader",
)
nltk_sentiment_class_vader = make_sentiment_scores_metric(
get_polarity_scores=partial(_get_nltk_polarity_scores, model="vader_lexicon"),
make_evaluation_fn=make_get_sentiment_class,
name="nltk_sentiment_class_vader",
description="Highest-probability NLTK sentiment class using Vader",
)
nltk_sentiment_score_overall_positive = make_sentiment_scores_metric(
get_polarity_scores=partial(_get_nltk_polarity_scores, model="vader_lexicon"),
make_evaluation_fn=make_get_overall_positive_sentiment,
name="nltk_sentiment_score_overall_positive",
description="Positive minus negative",
best_value=TextOverallPositiveSentiment(pos=1.0, neg=0.0),
worst_value=TextOverallPositiveSentiment(pos=0.0, neg=1.0),
)
| [] |
2024-01-10 | lastmile-ai/aiconfig | python~tests~test_run_config.py | import openai
import pytest
from aiconfig.Config import AIConfigRuntime
from mock import patch
from .conftest import mock_openai_chat_completion
from .util.file_path_utils import get_absolute_file_path_from_relative
@pytest.mark.asyncio
async def test_load_parametrized_data_config(set_temporary_env_vars):
"""Test loading a parametrized data config and resolving it
Config has 2 prompts. Prompt2 uses prompt1.output in its input.
"""
with patch.object(openai.chat.completions, "create", side_effect=mock_openai_chat_completion):
config_relative_path = "aiconfigs/parametrized_data_config.json"
config_absolute_path = get_absolute_file_path_from_relative(__file__, config_relative_path)
config = AIConfigRuntime.load(config_absolute_path)
prompt1_params = {
"sql_language": "MySQL",
"output_data": "total revenue from sales for each product category",
"table_relationships": "Employees are related to Departments through the 'DepartmentID' field.",
}
await config.run("prompt1", prompt1_params)
prompt2_resolved = await config.resolve("prompt2")
# assert prompt1_resolved == {'model': 'gpt-3.5-turbo', 'top_p': 1, 'max_tokens': 3000, 'temperature': 1, 'messages': [{'content': 'Write me a MySQL query to get this final output: total revenue from sales for each product category. Use the tables relationships defined here: Employees are related to Departments through the 'DepartmentID' field..', 'role': 'user'}]}
assert prompt2_resolved == {
"model": "gpt-4",
"top_p": 1,
"max_tokens": 3000,
"temperature": 1,
"messages": [
{
"content": "You are an expert at SQL. You will output nicely formatted SQL code with labels on columns. You will provide a short 1-2 sentence summary on the code. Name columns as one word using underscore and lowercase. Format Output in markdown ### SQL Query code block with SQL Query ### Summary short summary on code",
"role": "system",
},
{
"content": "Translate the following into PostgreSQL code:\n To calculate the total revenue from sales for each product category, we need to join multiple tables and perform aggregation. Assuming you have the following tables:\n\n1. Employees (with DepartmentID)\n2. Departments (with DepartmentID)\n3. Products (with ProductID)\n4. Sales (with EmployeeID, ProductID, and Revenue)\n\nHere is the MySQL query to obtain the desired output:\n\n```sql\nSELECT d.Category, SUM(s.Revenue) AS TotalRevenue\nFROM Sales s\nJOIN Employees e ON s.EmployeeID = e.EmployeeID\nJOIN Departments d ON e.DepartmentID = d.DepartmentID\nJOIN Products p ON s.ProductID = p.ProductID\nGROUP BY d.Category;\n```\n\nIn this query, we are joining the `Sales` table with the `Employees` table based on the `EmployeeID` column, then joining that result with the `Departments` table based on the `DepartmentID` column. Finally, we join this intermediate result with the `Products` table based on the `ProductID` column.\n\nThe `SUM(s.Revenue)` function calculates the total revenue for each group. We use the `GROUP BY` clause to group the results by the `Category` column from the `Departments` table.\n\nThis query will provide you with the final output of the total revenue from sales for each product category.",
"role": "user",
},
],
}
| [
"You are an expert at SQL. You will output nicely formatted SQL code with labels on columns. You will provide a short 1-2 sentence summary on the code. Name columns as one word using underscore and lowercase. Format Output in markdown ### SQL Query code block with SQL Query ### Summary short summary on code",
"{'sql_language': 'MySQL', 'output_data': 'total revenue from sales for each product category', 'table_relationships': \"Employees are related to Departments through the 'DepartmentID' field.\"}"
] |
2024-01-10 | lastmile-ai/aiconfig | python~src~aiconfig~ChatCompletion.py | """
wrapper around openai that will serialize prompts and save them to config
usage: see openai_wrapper.ipynb.
"""
import asyncio
import copy
from types import ModuleType
from typing import Any, Dict, Generator, List, cast
import lastmile_utils.lib.core.api as core_utils
import nest_asyncio
import openai
from aiconfig.Config import AIConfigRuntime
from aiconfig.default_parsers.openai import multi_choice_message_reducer
from aiconfig.schema import ExecuteResult, Output, Prompt
def validate_and_add_prompts_to_config(prompts: List[Prompt], aiconfig) -> None:
"""
Validates and adds new prompts to the AI configuration, ensuring no duplicates and updating outputs if necessary.
Args:
prompts (List[Prompt]): List of prompts to be validated and added.
aiconfig (AIConfigRuntime): Configuration runtime instance to which the prompts are to be added.
"""
for i, new_prompt in enumerate(prompts):
in_config = False
for config_prompt in aiconfig.prompts:
# check for duplicates (same input and settings.)
if config_prompt.input == new_prompt.input and new_prompt.metadata == config_prompt.metadata:
in_config = True
# update outputs if different
if config_prompt.outputs != new_prompt.outputs:
config_prompt.outputs = new_prompt.outputs
break
if not in_config:
new_prompt_name = "prompt_{}".format(str(len(aiconfig.prompts)))
new_prompt.name = new_prompt_name
aiconfig.add_prompt(new_prompt.name, new_prompt)
def extract_outputs_from_response(response) -> List[Output]:
"""
Extracts outputs from the OpenAI ChatCompletion response and transforms them into a structured format.
Args:
response (dict): The response dictionary received from OpenAI's ChatCompletion.
Returns:
List[Output]: A list of outputs extracted and formatted from the response.
"""
outputs = []
response = response.model_dump(exclude_none=True)
response_without_choices = {key: copy.deepcopy(value) for key, value in response.items() if key != "choices"}
for i, choice in enumerate(response.get("choices")):
response_without_choices.update({"finish_reason": choice.get("finish_reason")})
output = ExecuteResult(
**{
"output_type": "execute_result",
"data": choice["message"],
"execution_count": i,
"metadata": response_without_choices,
}
)
outputs.append(output)
return outputs
def async_run_serialize_helper(
aiconfig: AIConfigRuntime,
request_kwargs: Dict,
) -> List[Prompt]:
"""
Method serialize() of AIConfig is an async method. If not, create a new one and await serialize().
"""
in_event_loop = asyncio.get_event_loop().is_running()
serialized_prompts = None
async def run_and_await_serialize():
result = await aiconfig.serialize(request_kwargs.get("model"), request_kwargs, "prompt")
return result
# serialize prompts from ChatCompletion kwargs
if in_event_loop:
nest_asyncio.apply(loop=asyncio.get_event_loop())
serialized_prompts = asyncio.run(run_and_await_serialize())
else:
serialized_prompts = asyncio.run(run_and_await_serialize())
return serialized_prompts
# TODO type this
def create_and_save_to_config(
output_aiconfig_ref: str | AIConfigRuntime,
openai_api: Any | None = None,
aiconfig_settings: dict[str, Any] | None = None,
) -> Any:
"""
Return a drop-in replacement for openai chat completion create
with the side effect of saving an AIConfig to the given aiconfig reference.
output_aiconfig_ref: path to aiconfig json or an AIConfigRuntime object.
openai_api: openai module or instance of openai.Client
"""
if openai_api is None:
openai_api = openai
def _get_aiconfig_runtime(output_aiconfig_path: str) -> AIConfigRuntime:
try:
return AIConfigRuntime.load(output_aiconfig_path)
except IOError:
return AIConfigRuntime.create(**(aiconfig_settings or {}))
output_aiconfig = output_aiconfig_ref if isinstance(output_aiconfig_ref, AIConfigRuntime) else _get_aiconfig_runtime(output_aiconfig_ref)
output_config_file_path = output_aiconfig_ref if isinstance(output_aiconfig_ref, str) else output_aiconfig_ref.file_path
# TODO: openai makes it hard to statically annotate.
def _create_chat_completion_with_config_saving(*args, **kwargs) -> Any: # type: ignore
response = openai_api.chat.completions.create(*args, **kwargs)
serialized_prompts = async_run_serialize_helper(output_aiconfig, kwargs)
# serialize output from response
outputs = []
# Check if response is a stream
stream = kwargs.get("stream", False) is True and isinstance(response, openai.Stream)
# Convert Response to output for last prompt
if not stream:
outputs = extract_outputs_from_response(response)
# Add outputs to last prompt
serialized_prompts[-1].outputs = outputs
validate_and_add_prompts_to_config(serialized_prompts, output_aiconfig)
# Save config to file
output_aiconfig.save(output_config_file_path, include_outputs=True)
# Return original response
return response
else:
# If response is a stream, build the output as the stream iterated through. do_logic() becomes a generator.
# TODO: type
def generate_streamed_response() -> Generator[Any, None, None]:
stream_outputs = {}
messages = {}
for chunk in response:
chunk_dict = chunk.model_dump(exclude_none=True) # type: ignore [fixme]
# streaming only returns one chunk, one choice at a time. The order in which the choices are returned is not guaranteed.
messages = multi_choice_message_reducer(messages, chunk_dict)
for choice in chunk_dict["choices"]:
index = choice.get("index")
accumulated_message_for_choice = messages.get(index, {})
output = ExecuteResult(
output_type="execute_result",
data=copy.deepcopy(accumulated_message_for_choice),
execution_count=index,
metadata={"finish_reason": choice.get("finish_reason")},
)
stream_outputs[index] = output
yield chunk
stream_outputs = [stream_outputs[i] for i in sorted(list(stream_outputs.keys()))]
# Add outputs to last prompt
serialized_prompts[-1].outputs = stream_outputs
validate_and_add_prompts_to_config(serialized_prompts, output_aiconfig)
# Save config to file
output_aiconfig.save(output_config_file_path, include_outputs=True)
return generate_streamed_response()
return _create_chat_completion_with_config_saving
def get_completion_create_wrapped_openai(
output_aiconfig_ref: str | AIConfigRuntime,
aiconfig_settings: dict[str, Any] | None = None,
) -> ModuleType:
api = openai
new_module = core_utils.make_wrap_object(
api,
"chat.completions.create",
create_and_save_to_config(
output_aiconfig_ref=output_aiconfig_ref,
openai_api=api,
aiconfig_settings=aiconfig_settings,
),
)
return cast(ModuleType, new_module)
def get_completion_create_wrapped_openai_client(
output_aiconfig_ref: str | AIConfigRuntime,
client: openai.OpenAI | None = None,
aiconfig_settings: dict[str, Any] | None = None,
) -> openai.OpenAI:
api = client if client is not None else openai.Client()
wrapped = create_and_save_to_config(
output_aiconfig_ref=output_aiconfig_ref,
openai_api=api,
aiconfig_settings=aiconfig_settings,
)
client_mocked = core_utils.make_wrap_object(api, "chat.completions.create", wrapped)
return cast(openai.OpenAI, client_mocked)
| [
"None"
] |
2024-01-10 | lastmile-ai/aiconfig | python~tests~conftest.py | import os
import pytest
from openai.types.chat import ChatCompletion
# pytest patch side effect for mocking openai calls. Use as a decorator on a test function. `@patch.object(openai.ChatCompletion, "create", side_effect=mock_openai_chat_completion)`
def mock_openai_chat_completion(**kwargs):
response_map_list = [
# Define the response for each set of input parameters. Format: [[input_params, response]]
# Example 1
[
{
"model": "gpt-3.5-turbo",
"top_p": 1,
"max_tokens": 3000,
"temperature": 1,
"stream": False,
"messages": [
{
"content": "Write me a MySQL query to get this final output: total revenue from sales for each product category. Use the tables relationships defined here: Employees are related to Departments through the 'DepartmentID' field..",
"role": "user",
}
],
},
ChatCompletion(
**{
"id": "chatcmpl-8KZBExTKtsDnJGQjIF8rS9ZggECKn",
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": {
"content": "To calculate the total revenue from sales for each product category, we need to join multiple tables and perform aggregation. Assuming you have the following tables:\n\n1. Employees (with DepartmentID)\n2. Departments (with DepartmentID)\n3. Products (with ProductID)\n4. Sales (with EmployeeID, ProductID, and Revenue)\n\nHere is the MySQL query to obtain the desired output:\n\n```sql\nSELECT d.Category, SUM(s.Revenue) AS TotalRevenue\nFROM Sales s\nJOIN Employees e ON s.EmployeeID = e.EmployeeID\nJOIN Departments d ON e.DepartmentID = d.DepartmentID\nJOIN Products p ON s.ProductID = p.ProductID\nGROUP BY d.Category;\n```\n\nIn this query, we are joining the `Sales` table with the `Employees` table based on the `EmployeeID` column, then joining that result with the `Departments` table based on the `DepartmentID` column. Finally, we join this intermediate result with the `Products` table based on the `ProductID` column.\n\nThe `SUM(s.Revenue)` function calculates the total revenue for each group. We use the `GROUP BY` clause to group the results by the `Category` column from the `Departments` table.\n\nThis query will provide you with the final output of the total revenue from sales for each product category.",
"role": "assistant",
"function_call": None,
"tool_calls": None,
},
}
],
"created": 1699912484,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion",
"system_fingerprint": None,
"usage": {
"completion_tokens": 297,
"prompt_tokens": 53,
"total_tokens": 350,
},
}
),
],
# Example 2
[
{
"model": "gpt-3.5-turbo",
"temperature": 1,
"top_p": 1,
"stream": False,
"messages": [
{
"content": "Hi! Tell me 10 cool things to do in NYC.",
"role": "user",
}
],
},
ChatCompletion(
**{
"id": "chatcmpl-8KZCn0XkSMuHHxhoBik45t32eNbyi",
"choices": [
{
"finish_reason": "stop",
"index": 0,
"message": {
"content": "1. Visit Times Square: Experience the bright lights and bustling atmosphere of this iconic NYC landmark. Enjoy shopping, dining, and various entertainment options.\n\n2. Explore Central Park: Take a leisurely stroll or rent a bike to explore the beautiful landscapes, visit the Central Park Zoo, have a picnic, or even go horseback riding.\n\n3. Walk the High Line: This elevated park built on a historic freight rail line offers stunning views of the city skyline, beautiful gardens, art installations, and a unique perspective of NYC.\n\n4. Take a ferry to the Statue of Liberty: Visit the iconic Statue of Liberty on Liberty Island and enjoy breathtaking views of the city from the Crown or the pedestal. You can also explore Ellis Island's immigration museum nearby.\n\n5. Visit the Metropolitan Museum of Art: Explore the vast collections of art and artifacts from around the world at the Met and immerse yourself in the rich cultural history.\n\n6. Discover the vibrant neighborhoods: Explore the diverse neighborhoods of NYC, such as Chinatown, Little Italy, Greenwich Village, and Williamsburg. Enjoy authentic cuisine, unique shops, and immerse yourself in different cultures.\n\n7. Catch a Broadway show: Experience the magic of Broadway by watching a world-class performance at one of the many theaters in the Theater District.\n\n8. Walk across the Brooklyn Bridge: Enjoy panoramic views of the city as you walk or bike across the iconic Brooklyn Bridge, connecting Manhattan and Brooklyn.\n\n9. Explore the Museum of Modern Art (MoMA): Discover modern and contemporary art at MoMA, featuring masterpieces by artists like Van Gogh, Picasso, Warhol, and many more.\n\n10. Enjoy the food scene: NYC is a food lover's paradise. Indulge in diverse culinary experiences, from street food to Michelin-starred restaurants. Don't forget to try New York-style pizza, bagels, and the famous cronut.",
"role": "assistant",
"function_call": None,
"tool_calls": None,
},
}
],
"created": 1699912581,
"model": "gpt-3.5-turbo-0613",
"object": "chat.completion",
"system_fingerprint": None,
"usage": {
"prompt_tokens": 20,
"completion_tokens": 379,
"total_tokens": 399,
},
}
),
],
]
for input_params, response in response_map_list:
if kwargs == input_params:
return response
raise Exception("Unexpected arguments:\n {}".format(kwargs))
@pytest.fixture
def set_temporary_env_vars():
"""
A pytest fixture to set fake values for specific environment variables
and restore their original values after the test.
Usage:
Use this fixture by adding it as an argument to your test function.
Example:
def test_something(set_temporary_env_vars):
# Your test code here
This fixture sets fake values for the following environment variables:
- OPENAI_API_KEY
It restores the original values (if they existed) after the test is complete.
"""
# Define the environment variables to be manipulated
env_vars_to_set = {"OPENAI_API_KEY"}
# Store the original values of the environment variables
original_env_values = {}
for env_var in env_vars_to_set:
original_env_values[env_var] = os.environ.get(env_var)
try:
# Set fake values for the environment variables
for env_var in env_vars_to_set:
os.environ[env_var] = "fakekey"
# Yield control to the test function
yield
finally:
# Clean up: Restore the original values of the environment variables
for env_var in env_vars_to_set:
original_value = original_env_values.get(env_var)
if original_value is not None:
os.environ[env_var] = original_value
else:
del os.environ[env_var]
| [
"Write me a MySQL query to get this final output: total revenue from sales for each product category. Use the tables relationships defined here: Employees are related to Departments through the 'DepartmentID' field..",
"Hi! Tell me 10 cool things to do in NYC.",
"1. Visit Times Square: Experience the bright lights and bustling atmosphere of this iconic NYC landmark. Enjoy shopping, dining, and various entertainment options.\n\n2. Explore Central Park: Take a leisurely stroll or rent a bike to explore the beautiful landscapes, visit the Central Park Zoo, have a picnic, or even go horseback riding.\n\n3. Walk the High Line: This elevated park built on a historic freight rail line offers stunning views of the city skyline, beautiful gardens, art installations, and a unique perspective of NYC.\n\n4. Take a ferry to the Statue of Liberty: Visit the iconic Statue of Liberty on Liberty Island and enjoy breathtaking views of the city from the Crown or the pedestal. You can also explore Ellis Island's immigration museum nearby.\n\n5. Visit the Metropolitan Museum of Art: Explore the vast collections of art and artifacts from around the world at the Met and immerse yourself in the rich cultural history.\n\n6. Discover the vibrant neighborhoods: Explore the diverse neighborhoods of NYC, such as Chinatown, Little Italy, Greenwich Village, and Williamsburg. Enjoy authentic cuisine, unique shops, and immerse yourself in different cultures.\n\n7. Catch a Broadway show: Experience the magic of Broadway by watching a world-class performance at one of the many theaters in the Theater District.\n\n8. Walk across the Brooklyn Bridge: Enjoy panoramic views of the city as you walk or bike across the iconic Brooklyn Bridge, connecting Manhattan and Brooklyn.\n\n9. Explore the Museum of Modern Art (MoMA): Discover modern and contemporary art at MoMA, featuring masterpieces by artists like Van Gogh, Picasso, Warhol, and many more.\n\n10. Enjoy the food scene: NYC is a food lover's paradise. Indulge in diverse culinary experiences, from street food to Michelin-starred restaurants. Don't forget to try New York-style pizza, bagels, and the famous cronut."
] |
2024-01-10 | lastmile-ai/aiconfig | python~tests~test_eval_model_graded_openai.py | from typing import cast
import aiconfig.eval.openai as lib_openai
import openai.types.chat as openai_chat_types
import openai.types.chat.chat_completion as openai_chat_completion_types
import openai.types.chat.chat_completion_message_tool_call as openai_tool_call_types
import pytest
from aiconfig.eval import common
from aiconfig.eval.api import metrics, run_test_suite_outputs_only
from result import Ok, Result
def _mock_response(function_args: common.SerializedJSON) -> openai_chat_types.ChatCompletion:
return openai_chat_types.ChatCompletion(
id="123",
choices=[
openai_chat_completion_types.Choice(
index=0,
message=openai_chat_types.ChatCompletionMessage(
content=None,
role="assistant",
tool_calls=[
openai_chat_types.ChatCompletionMessageToolCall(
id="cm-tk-1",
type="function",
function=openai_tool_call_types.Function(
name="dummy",
arguments=function_args,
),
)
],
),
finish_reason="stop",
)
],
created=0,
model="",
object="chat.completion",
)
def _make_mock_openai_chat_completion_create(function_arguments_return: common.SerializedJSON) -> lib_openai.OpenAIChatCompletionCreate:
def _mock_openai_chat_completion_create(
completion_params: lib_openai.OpenAIChatCompletionParams,
) -> Result[openai_chat_types.ChatCompletion, str]:
return Ok(
_mock_response(
function_arguments_return,
)
)
return _mock_openai_chat_completion_create
@pytest.mark.asyncio
async def test_openai_structured_eval():
_mock_create = _make_mock_openai_chat_completion_create(
common.SerializedJSON('{"conciseness_rating": 5, "conciseness_confidence": 0.9, "conciseness_reasoning": "I think it\'s pretty concise."}')
)
mock_metric = metrics.make_openai_structured_llm_metric(
eval_llm_name="gpt-3.5-turbo-0613",
pydantic_basemodel_type=common.TextRatingsData,
metric_name="text_ratings",
metric_description="Text ratings",
field_descriptions=dict(
conciseness_rating="1 to 5 rating of conciseness",
conciseness_confidence="0 to 1.0 rating of confidence in conciseness rating",
conciseness_reasoning="reasoning behind the conciseness rating",
),
openai_chat_completion_create=_mock_create,
)
user_test_suite_outputs_only = [
("one two three", mock_metric),
]
df = await run_test_suite_outputs_only(user_test_suite_outputs_only)
metric_data = cast(common.CustomMetricPydanticObject[common.TextRatingsData], df.loc[0, "value"]).data
assert isinstance(metric_data, common.TextRatingsData)
metric_json = metric_data.to_dict()
assert metric_json == {"conciseness_rating": 5, "conciseness_confidence": 0.9, "conciseness_reasoning": "I think it's pretty concise."}
@pytest.mark.asyncio
async def test_bad_structured_eval_metric():
_mock_create = _make_mock_openai_chat_completion_create(
common.SerializedJSON('{"conciseness_rating": 5, "conciseness_confidence": 0.9, "conciseness_reasoning": "I think it\'s pretty concise."}')
)
with pytest.raises(ValueError) as exc:
_ = metrics.make_openai_structured_llm_metric(
eval_llm_name="gpt-3.5-turbo-0613",
pydantic_basemodel_type=common.TextRatingsData,
metric_name="text_ratings",
metric_description="Text ratings",
field_descriptions=dict(
fake_field="123",
conciseness_rating="1 to 5 rating of conciseness",
conciseness_confidence="0 to 1.0 rating of confidence in conciseness rating",
conciseness_reasoning="reasoning behind the conciseness rating",
),
openai_chat_completion_create=_mock_create,
)
assert "The following field_descriptions keys are not in the schema" in str(exc)
| [] |
2024-01-10 | lastmile-ai/aiconfig | python~src~aiconfig~default_parsers~anyscale_endpoint.py | import copy
import os
from typing import TYPE_CHECKING, Dict, List, Optional, Union
from aiconfig.callback import CallbackEvent
from aiconfig.model_parser import InferenceOptions
from openai import OpenAI
from openai.types.chat import ChatCompletionMessage
from aiconfig.schema import ExecuteResult, FunctionCallData, Output, OutputDataWithToolCallsValue, OutputDataWithValue, Prompt, ToolCallData
from .openai import OpenAIInference
if TYPE_CHECKING:
from aiconfig.Config import AIConfigRuntime
class AnyscaleEndpoint(OpenAIInference):
async def run_inference(
self,
prompt: Prompt,
aiconfig: "AIConfigRuntime",
options: InferenceOptions,
parameters: Optional[Dict],
) -> List[Output]:
"""
Invoked to run a prompt in the .aiconfig. This method should perform
the actual model inference based on the provided prompt and inference settings.
Args:
prompt (str): The input prompt.
inference_settings (dict): Model-specific inference settings.
Returns:
ExecuteResult: The response from the model.
"""
await aiconfig.callback_manager.run_callbacks(
CallbackEvent(
"on_run_start",
__name__,
{"prompt": prompt, "options": options, "parameters": parameters},
)
)
anyscale_api_key_name = "ANYSCALE_ENDPOINT_API_KEY"
openai_api_key_name = "OPENAI_API_KEY"
if anyscale_api_key_name not in os.environ:
if openai_api_key_name not in os.environ:
raise Exception(
f"Missing API keys '{anyscale_api_key_name}' and '{openai_api_key_name}' in environment. Expected one of them to be specified"
)
else:
api_key = os.environ[openai_api_key_name]
else:
api_key = os.environ[anyscale_api_key_name]
client = OpenAI(api_key=api_key, base_url="https://api.endpoints.anyscale.com/v1")
completion_data = await self.deserialize(prompt, aiconfig, parameters)
# if stream enabled in runtime options and config, then stream. Otherwise don't stream.
# const stream = options?.stream ?? completionParams.stream ?? true;
stream = True # Default value
if options is not None and options.stream is not None:
stream = options.stream
elif "stream" in completion_data:
stream = completion_data["stream"]
completion_data["stream"] = stream
response = client.chat.completions.create(**completion_data)
outputs = []
if not stream:
# # OpenAI>1.0.0 uses pydantic models for response
response = response.model_dump(exclude_none=True)
response_without_choices = {key: copy.deepcopy(value) for key, value in response.items() if key != "choices"}
for i, choice in enumerate(response.get("choices")):
output_message = choice["message"]
output_data = build_output_data(output_message)
response_without_choices.update({"finish_reason": choice.get("finish_reason")})
metadata = {"raw_response": output_message, **response_without_choices}
if output_message.get("role", None) is not None:
metadata["role"] = output_message.get("role")
output = ExecuteResult(
**{
"output_type": "execute_result",
"data": output_data,
"execution_count": i,
"metadata": metadata,
}
)
outputs.append(output)
else:
outputs = {}
messages = {}
for chunk in response:
# OpenAI>1.0.0 uses pydantic models. Chunk is of type ChatCompletionChunk; type is not directly importable from openai Library, will require some diffing
chunk = chunk.model_dump(exclude_none=True)
chunk_without_choices = {key: copy.deepcopy(value) for key, value in chunk.items() if key != "choices"}
# streaming only returns one chunk, one choice at a time (before 1.0.0). The order in which the choices are returned is not guaranteed.
messages = multi_choice_message_reducer(messages, chunk)
for i, choice in enumerate(chunk["choices"]):
index = choice.get("index")
accumulated_message_for_choice = messages.get(index, "")
delta = choice.get("delta")
if options and options.stream_callback:
options.stream_callback(delta, accumulated_message_for_choice, index)
output = ExecuteResult(
**{
"output_type": "execute_result",
"data": accumulated_message_for_choice,
"execution_count": index,
"metadata": chunk_without_choices,
}
)
outputs[index] = output
outputs = [outputs[i] for i in sorted(list(outputs.keys()))]
# Now that we have the complete outputs, we can parse it into our object model properly
for output in outputs:
output_message = output.data
output_data = build_output_data(output.data)
metadata = {"raw_response": output_message}
if output_message.get("role", None) is not None:
metadata["role"] = output_message.get("role")
output.data = output_data
output.metadata = {**output.metadata, **metadata}
# rewrite or extend list of outputs?
prompt.outputs = outputs
await aiconfig.callback_manager.run_callbacks(CallbackEvent("on_run_complete", __name__, {"result": prompt.outputs}))
return prompt.outputs
class DefaultAnyscaleEndpointParser(AnyscaleEndpoint):
def __init__(self, model_id: str):
super().__init__()
self.model_id = model_id
def id(self) -> str:
return self.model_id
class LLaMA2_7B_Chat(DefaultAnyscaleEndpointParser):
def __init__(self):
model_id = "meta-llama/Llama-2-7b-chat-hf"
super().__init__(model_id)
class LLaMA2_13B_Chat(DefaultAnyscaleEndpointParser):
def __init__(self):
model_id = "meta-llama/Llama-2-13b-chat-hf"
super().__init__(model_id)
class LLaMA2_70B_Chat(DefaultAnyscaleEndpointParser):
def __init__(self):
model_id = "meta-llama/Llama-2-70b-chat-hf"
super().__init__(model_id)
class LLaMAGuard_7B(DefaultAnyscaleEndpointParser):
def __init__(self):
model_id = "Meta-Llama/Llama-Guard-7b"
super().__init__(model_id)
class Mistral_7B_OpenOrca(DefaultAnyscaleEndpointParser):
def __init__(self):
model_id = "Open-Orca/Mistral-7B-OpenOrca"
super().__init__(model_id)
class CodeLLaMA_34B(DefaultAnyscaleEndpointParser):
def __init__(self):
model_id = "codellama/CodeLlama-34b-Instruct-hf"
super().__init__(model_id)
class Zephyr_7B(DefaultAnyscaleEndpointParser):
def __init__(self):
model_id = "HuggingFaceH4/zephyr-7b-beta"
super().__init__(model_id)
class Mistral_7B(DefaultAnyscaleEndpointParser):
def __init__(self):
model_id = "mistralai/Mistral-7B-Instruct-v0.1"
super().__init__(model_id)
class Mixtral_8x7B(DefaultAnyscaleEndpointParser):
def __init__(self):
model_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
super().__init__(model_id)
def reduce(acc, delta):
acc = copy.deepcopy(acc)
for key, value in delta.items():
if key not in acc:
# If the key doesn't exist in 'acc', add it with the 'value'
acc[key] = value
elif isinstance(acc[key], str) and isinstance(value, str):
# If both 'acc[key]' and 'value' are strings, concatenate them
acc[key] += value
elif isinstance(acc[key], dict) and not isinstance(acc[key], list):
# If 'acc[key]' is a dictionary (not a list), recursively merge it with 'value'
acc[key] = reduce(acc[key], value)
return acc
def multi_choice_message_reducer(messages: Union[Dict[int, dict], None], chunk: dict) -> Dict[int, dict]:
if messages is None:
messages = {}
# elif len(messages) != len(chunk["choices"]):
# raise ValueError("Invalid number of previous choices -- it should match the incoming number of choices")
for choice in chunk["choices"]:
index = choice["index"]
previous_message = messages.get(index, {})
updated_message = reduce(previous_message, choice["delta"])
messages[index] = updated_message
return messages
def build_output_data(
message: Union[ChatCompletionMessage, None],
) -> Union[OutputDataWithValue, str, None]:
if message is None:
return None
output_data: Union[OutputDataWithValue, str, None] = None
if message.get("content") is not None and message.get("content") != "":
output_data = message.get("content") # string
elif message.get("tool_calls") is not None:
tool_calls = []
for item in message.get("tool_calls"):
tool_call_type = item.get("type")
if tool_call_type != "function":
# It's possible that ChatCompletionMessageToolCall may
# support more than just function calls in the future
# so filter out other types of tool calls for now
continue
function = item.get("function")
tool_calls.append(
ToolCallData(
id=item.get("id"),
function=FunctionCallData(
arguments=function.get("arguments"),
name=function.get("name"),
),
type=tool_call_type,
)
)
output_data = OutputDataWithToolCallsValue(
kind="tool_calls",
value=tool_calls,
)
# Deprecated, use tool_calls instead
elif message.get("function_call") is not None:
function_call = message.get("function_call")
tool_calls = [
ToolCallData(
id="function_call_data", # value here does not matter
function=FunctionCallData(
arguments=function_call["arguments"],
name=function_call["name"],
),
type="function",
)
]
output_data = OutputDataWithToolCallsValue(
kind="tool_calls",
value=tool_calls,
)
return output_data
| [] |
2024-01-10 | lastmile-ai/aiconfig | cookbooks~Wizard-GPT~wizard-gpt.py | import asyncio
import os
import dotenv
# Create ~/.env file with this line: export OPENAI_API_KEY=<your key here>
# You can get your key from https://platform.openai.com/api-keys
import openai
from aiconfig import AIConfigRuntime, InferenceOptions, Prompt
dotenv.load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
async def main():
while True:
user_input = input("\nUser: ")
if user_input == "quit":
break
# Dynamically generate the prompt name and prompt object
new_prompt_name = f"prompt{len(config.prompts)+1}" # Prompt{number of prompts}
new_prompt = Prompt(name=new_prompt_name, input=user_input)
# Add the new prompt and run the model
config.add_prompt(new_prompt.name, new_prompt)
await config.run(new_prompt_name, options=inference_options)
# Persist the conversation into the aiconfig file
config.save()
if __name__ == "__main__":
inference_options = InferenceOptions()
config = AIConfigRuntime.load("wizard.aiconfig.json")
asyncio.run(main())
| [] |
2024-01-10 | lastmile-ai/aiconfig | cookbooks~Cli-Mate~cli-mate.py | import warnings
warnings.filterwarnings("ignore")
import argparse
import asyncio
import os
import signal
import sys
from types import FrameType
from typing import Any
import openai
from aiconfig.model_parser import InferenceOptions
from dotenv import load_dotenv
from prompt_toolkit import PromptSession
from aiconfig import AIConfigRuntime
from aiconfig.schema import ExecuteResult, Prompt
def deprefix(s: str, pfx: str) -> str:
if s.startswith(pfx): # Checks if the string starts with the given prefix
return s[len(pfx) :] # If true, returns the string without the prefix
else:
return s
class InterruptException(Exception):
pass
state = {"interrupt": False}
async def run_query(aiconfig_path: str, question: str) -> int:
answer = await query(aiconfig_path=aiconfig_path, question=question)
print(answer)
return 0
async def query(aiconfig_path: str, question: str) -> list[ExecuteResult]:
runtime = AIConfigRuntime.load(aiconfig_path)
inference_options = InferenceOptions(stream=True)
params = {
"the_input": question,
}
result = await runtime.run("query", params, inference_options)
print(f"result:\n{result}")
return result
async def get_mod_result(aiconfig_path: str, source_code: str, question: str) -> list[ExecuteResult]:
question_about_code = f"QUERY ABOUT SOURCE CODE:\n{question}\nSOURCE CODE:\n```{source_code}\n```"
return await query(aiconfig_path, question_about_code)
async def mod_code(aiconfig_path: str, source_code_file: str, question: str, update_file: bool = False):
# read source code from file
with open(source_code_file, "r", encoding="utf8") as file:
source_code = file.read()
answer = await get_mod_result(aiconfig_path, source_code, question)
# TODO
s_answer = str(answer)
if update_file:
# Here you would add your logic related to how the original code needs to be modified based on the answer
with open(source_code_file, "w", encoding="utf8") as file:
file.write(s_answer)
return 0
async def loop(aiconfig_path: str, source_code_file: str | None):
runtime = AIConfigRuntime.load(aiconfig_path)
event_loop = asyncio.get_event_loop()
session = PromptSession()
state["interrupt"] = False
def signal_handler(_: int, __: FrameType | None):
state["interrupt"] = True
print("\nStopping", flush=True)
signal.signal(signal.SIGINT, signal_handler)
i = 0
while True:
try:
user_input = await event_loop.run_in_executor(None, session.prompt, "Query: [ctrl-D to exit] ")
except KeyboardInterrupt:
continue
except EOFError:
print("Exiting")
break
if user_input.strip() == "":
continue
should_reload = user_input.strip().startswith("reload") or i == 0
if should_reload and source_code_file is not None:
user_input = deprefix(user_input.strip(), "reload")
with open(source_code_file.strip(), "r", encoding="utf8") as file:
source_code = file.read()
prompt = f"QUERY ABOUT SOURCE CODE:\n{user_input}\nSOURCE CODE:\n```{source_code}\n```"
else:
prompt = user_input
# Dynamically generate the prompt name and prompt object
new_prompt_name = f"prompt{len(runtime.prompts)+1}" # Prompt{number of prompts}
new_prompt = Prompt(name=new_prompt_name, input=prompt)
# Add the new prompt and run the model
runtime.add_prompt(new_prompt.name, new_prompt)
def callback(delta: Any, _: Any, __: int):
if state["interrupt"]:
raise InterruptException()
print(delta.get("content", ""), end="", flush=True)
options = InferenceOptions(stream=True, stream_callback=callback)
state["interrupt"] = False
try:
result = await runtime.run(new_prompt_name, {}, options=options)
# print(f"{result=}")
print(flush=True)
i += 1
except InterruptException:
continue
async def main():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--aiconfig-path", required=True)
subparsers = parser.add_subparsers(dest="command")
loop_parser = subparsers.add_parser("loop")
loop_parser.add_argument("-scf", "--source-code-file", help="Specify a source code file.")
args = parser.parse_args()
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
if args.command == "loop":
return await loop(args.aiconfig_path, args.source_code_file)
if __name__ == "__main__":
res = asyncio.run(main())
sys.exit(res)
| [
"QUERY ABOUT SOURCE CODE:\nPLACEHOLDER\nSOURCE CODE:\n```PLACEHOLDER\n```"
] |
2024-01-10 | rawcsav/Hinterview | src~gui_util.py | import os
from colorama import Fore, Style
from art import *
from config import configure_settings,get_config
from openai_util import embed_documents
def clear_screen():
os.system('cls' if os.name == 'nt' else 'clear')
def display_intro():
clear_screen()
# Generate the ASCII art text with 'slant' font
ascii_art = text2art("Hinterview", "slant")
# Print the ANSI escape codes for bright cyan color
print(Style.BRIGHT + Fore.CYAN, end="")
# Replace both the '/' and '_' characters with the desired colors
colored_ascii_art = ascii_art.replace("/", Fore.GREEN + "/" + Fore.CYAN)
colored_ascii_art = colored_ascii_art.replace("_", Fore.GREEN + "_" + Fore.CYAN)
# Print the generated ASCII art with the desired colors
print(colored_ascii_art)
print(Fore.CYAN + "──────────────────────────────────────────────────────────────────────────")
configure_settings()
folder_path = get_config('folder_path')
print("\nCurrent directory path:" + Fore.LIGHTGREEN_EX + Style.BRIGHT + f"{folder_path}\n")
def display_initial_menu():
print(Fore.YELLOW + "1. Continue to Program")
print(Fore.YELLOW + "2. Open Settings Menu")
choice = input(Fore.GREEN + "Please select an option (1-2): ")
return choice
def display_settings_menu():
clear_screen()
print(Fore.CYAN + "──────────────────────────────────────────────────────────────────────────")
print(Style.BRIGHT + Fore.GREEN + " SETTINGS")
print(Fore.YELLOW + "1. Folder Path")
print(Fore.YELLOW + "2. OpenAI API Key")
print(Fore.YELLOW + "3. Hotkey")
print(Fore.YELLOW + "4. Interview Mode")
print(Fore.YELLOW + "5. GPT Model")
print(Fore.YELLOW + "6. System Prompt")
print(Fore.YELLOW + "7. Temperature")
print(Fore.YELLOW + "8. Max Tokens")
print(Fore.YELLOW + "9. Resume Title")
print(Fore.YELLOW + "10. Job Description Title")
print(Fore.CYAN + "──────────────────────────────────────────────────────────────────────────")
print(Fore.GREEN + "0. Return to Main Menu")
choice = input(Fore.LIGHTGREEN_EX + "Please select an option (0-10): ")
return choice
def handle_settings_menu():
while True:
choice = display_settings_menu()
if choice == '0':
display_intro()
break
elif choice in ('1', '2', '3', '4', '5', '6', '7', '8', '9', '10'):
settings_options = {
'1': ('Enter the new folder path: ', 'folder_path'),
'2': ('Enter the new OpenAI API Key: ', 'openai_api_key'),
'3': ('Enter the new hotkey: ', 'hotkey'),
'4': ('Enter the new special option value: ', 'special_option'),
'5': ('Enter the new GPT model: ', 'gpt_model'),
'6': ('Enter the new system prompt: ', 'system_prompt'),
'7': ('Enter the new temperature value: ', 'temperature'),
'8': ('Enter the new max tokens value: ', 'max_tokens'),
'9': ('Enter the new resume title: ', 'resume_title'),
'10': ('Enter the new job description title: ', 'job_description_title'),
}
prompt, setting_name = settings_options[choice]
new_value = input(Fore.GREEN + prompt)
configure_settings(**{setting_name: new_value})
print(Fore.GREEN + "Setting updated successfully!")
clear_screen()
else:
print(Fore.RED + "Invalid choice. Please try again.")
def display_instructions():
print("\nPress and hold the hotkey (default: Option) to record a segment of your interview.")
print("Release the key to stop recording and get insights.")
def display_recording():
print(Fore.CYAN + "\n──────────────────────────────────────────────────────────────────────────")
print(Fore.YELLOW + "\n[STATUS] Recording...")
def display_transcribing():
print(Fore.BLUE + "[STATUS] Transcribing...")
def display_processing():
print(Fore.MAGENTA + "[STATUS] Fetching AI Response...")
def display_error(error_message):
print(Fore.CYAN + "\n──────────────────────────────────────────────────────────────────────────")
print(Fore.RED + "\nError:", error_message)
def primary_gui():
display_intro()
while True:
choice = display_initial_menu()
if choice == '1':
print(Fore.GREEN + "Continuing to the Program...\n")
break
elif choice == '2':
handle_settings_menu()
else:
print(Fore.RED + "Invalid choice. Please try again.")
FOLDER_PATH = get_config("folder_path")
df = embed_documents(FOLDER_PATH)
display_instructions()
return df
| [] |
2024-01-10 | satvik314/journal_x | chat_ui.py | import streamlit as st
from streamlit_chat import message
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationChain
from utils import insert_into_db, embed_text, create_prompt
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
from langchain.prompts import (
ChatPromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate
)
def chat_interface():
# Initialize session state variables
if 'responses' not in st.session_state:
st.session_state['responses'] = ["I am Journal X! You can chat with me."]
if 'requests' not in st.session_state:
st.session_state['requests'] = []
if 'buffer_memory' not in st.session_state:
st.session_state.buffer_memory = ConversationBufferWindowMemory(k=3, return_messages=True)
# Initialize ChatOpenAI and ConversationChain
llm = ChatOpenAI(model_name="gpt-3.5-turbo")
system_prompt = """You are a therapist which has the knowledge base of daily journals. You give short and conversational answers."""
prompt = ChatPromptTemplate.from_messages([
SystemMessagePromptTemplate.from_template(system_prompt),
MessagesPlaceholder(variable_name = "history"),
HumanMessagePromptTemplate.from_template("{input}")
])
conversation = ConversationChain(prompt=prompt, llm=llm, memory=st.session_state.buffer_memory)
response_container = st.container()
spinner_container = st.container()
text_container = st.container()
with text_container:
query = st.text_input("Query: ", key="input")
with spinner_container:
if query:
with st.spinner("typing..."):
# response = conversation.predict(input=query)
response = conversation.predict(input=create_prompt(query))
print(create_prompt(query))
st.session_state.requests.append(query)
st.session_state.responses.append(response)
with response_container:
if st.session_state['responses']:
for i in range(len(st.session_state['responses'])):
message(st.session_state['responses'][i], key=str(i))
if i < len(st.session_state['requests']):
message(st.session_state["requests"][i], is_user=True, key=str(i) + '_user') | [
"{input}",
"You are a therapist which has the knowledge base of daily journals. You give short and conversational answers."
] |
2024-01-10 | TsinghuaDatabaseGroup/DB-GPT | multiagents~message.py | from pydantic import BaseModel, Field
from typing import List, Tuple, Set, Union, Any
# from langchain.schema import AgentAction, ChatMessage
from utils.utils import AgentAction
class Message(BaseModel):
content: dict = Field(default={"diagnose": "", "solution": [], "knowledge": ""})
sender: str = Field(default="")
receiver: Set[str] = Field(default=set({"all"}))
tool_response: List[Tuple[AgentAction, str]] = Field(default=[])
class SolverMessage(Message):
pass
class CriticMessage(Message):
review: str
class ExecutorMessage(Message):
pass
class EvaluatorMessage(Message):
score: Union[bool, List[bool], int, List[int]]
advice: str = Field(default="")
class RoleAssignerMessage(Message):
pass
| [] |
2024-01-10 | TsinghuaDatabaseGroup/DB-GPT | multiagents~knowledge~info_retrieval_algorithm.py | import numpy as np
from typing import List
import heapq
import openai
# import editdistance
# from rank_bm25 import BM25Okapi
import math
import numpy as np
from multiprocessing import Pool, cpu_count
import json
import nltk
nltk.data.path.append('./nltk_data')
from nltk import pos_tag
from nltk.corpus import wordnet
# from nltk.stem import WordNetLemmatizer
# from nltk.corpus import wordnet, stopwords
# from nltk.tokenize import word_tokenize
# nltk.download('stopwords')
# nltk.download('punkt')
# nltk.download('averaged_perceptron_tagger')
# nltk.download('wordnet')
# wnl = WordNetLemmatizer()
# corpus = []
# with open("/Users/4paradigm/Desktop/work/2023_05_22/root_causes_dbmind.jsonl", 'r') as f:
# data = json.load(f)
# corpus = [example["desc"] for example in data]
# metrics = [example["metrics"] for example in data]
# stop_words = set(stopwords.words('english'))
# preprocessed_corpus = []
# for c in corpus:
# word_tokens = word_tokenize(c)
# preprocessed_corpus.append([wnl.lemmatize(w,pos='n') for w in word_tokens if not w in stop_words])
# def embedding(input:str):
# response = openai.Embedding.create(
# input=input,
# model="text-embedding-ada-002"
# )
# embeddings = response['data'][0]['embedding']
# # print("\n-----\ntext:{}\n embeddings:{}\n-----\n".format(input, embeddings))
# return embeddings
# def euclidean_distance(target:List[float], sample:List[float]):
# """
# return the euclidean distance of two vectors
# """
# return np.sqrt(np.sum(np.square(np.asarray(target) - np.asarray(sample))))
# def cosine_distance(target:List[float], sample:List[float]):
# """
# return the euclidean distance of two vectors
# """
# return 1 - np.dot(target,sample)/(np.linalg.norm(target)*np.linalg.norm(sample))
# def linear_search(k:int, target:List[float], samples:List[List[float]]):
# """
# k: the top-k examples
# target: incoming metrics
# samples: examples
# """
# func_distance = cosine_distance
# # func_distance = cosine_distance
# dist = []
# for s in samples:
# dist.append(func_distance(target, s))
# index = heapq.nlargest(k, range(len(dist)), dist.__getitem__)
# return index
# THRESHOLD = 0.5
# def editdis_linear(k:int, target:List[str], samples:List[List[str]]):
# dist = []
# for sample in samples:
# dis = len(target)
# for t in target:
# dis_samples = [editdistance.eval(t, s)/max(len(t), len(s)) for s in sample]
# if min(dis_samples) < THRESHOLD:
# dis -= 1
# dist.append(dis)
# index = heapq.nsmallest(k, range(len(dist)), dist.__getitem__)
# return index
# def get_wordnet_pos(tag):
# if tag.startswith('J'):
# return wordnet.ADJ
# elif tag.startswith('V'):
# return wordnet.VERB
# elif tag.startswith('N'):
# return wordnet.NOUN
# elif tag.startswith('R'):
# return wordnet.ADV
# else:
# return None
'''
Inherited from the BM25Okapi API of rank_bm25 package
'''
class BM25:
def __init__(self, corpus, tokenizer=None):
self.corpus_size = 0
self.avgdl = 0
self.doc_freqs = []
self.idf = {}
self.doc_len = []
self.tokenizer = tokenizer
if tokenizer:
corpus = self._tokenize_corpus(corpus)
nd = self._initialize(corpus)
self._calc_idf(nd)
def _initialize(self, corpus):
nd = {} # word -> number of documents with word
num_doc = 0
for document in corpus: # each document is a knowledge chunk
self.doc_len.append(len(document))
num_doc += len(document)
frequencies = {}
for word in document:
if word not in frequencies:
frequencies[word] = 0
frequencies[word] += 1
self.doc_freqs.append(frequencies)
for word, freq in frequencies.items():
try:
nd[word]+=1
except KeyError:
nd[word] = 1
self.corpus_size += 1
self.avgdl = num_doc / self.corpus_size
return nd
def _tokenize_corpus(self, corpus):
pool = Pool(cpu_count())
tokenized_corpus = pool.map(self.tokenizer, corpus)
return tokenized_corpus
def _calc_idf(self, nd):
raise NotImplementedError()
def get_scores(self, query):
raise NotImplementedError()
def get_batch_scores(self, query, doc_ids):
raise NotImplementedError()
def get_top_n(self, query, documents, n=5):
assert self.corpus_size == len(documents), "The documents given don't match the index corpus!"
scores = self.get_scores(query)
top_n = np.argsort(scores)[::-1][:n]
return [documents[i] for i in top_n]
class BM25_call(BM25):
def __init__(self, corpus, tokenizer=None, k1=1.5, b=0.75, epsilon=0.25):
self.k1 = k1
self.b = b
self.epsilon = epsilon
super().__init__(corpus, tokenizer)
def _calc_idf(self, nd):
"""
Calculates frequencies of terms in documents and in corpus.
This algorithm sets a floor on the idf values to eps * average_idf
"""
# collect idf sum to calculate an average idf for epsilon value
idf_sum = 0
# collect words with negative idf to set them a special epsilon value.
# idf can be negative if word is contained in more than half of documents
negative_idfs = []
for word, freq in nd.items():
idf = math.log(self.corpus_size - freq + 0.5) - math.log(freq + 0.5)
self.idf[word] = idf
idf_sum += idf
if idf < 0:
negative_idfs.append(word)
self.average_idf = idf_sum / len(self.idf)
eps = self.epsilon * self.average_idf
for word in negative_idfs:
self.idf[word] = eps
def get_scores(self, query):
"""
The ATIRE BM25 variant uses an idf function which uses a log(idf) score. To prevent negative idf scores,
this algorithm also adds a floor to the idf value of epsilon.
See [Trotman, A., X. Jia, M. Crane, Towards an Efficient and Effective Search Engine] for more info
:param query:
:return:
"""
score = np.zeros(self.corpus_size)
doc_len = np.array(self.doc_len)
for q in query:
q_freq = np.zeros(self.corpus_size)
synonymous_q = None
for i, doc in enumerate(self.doc_freqs):
for w in doc:
if are_synonyms(q, w):
q_freq[i] += doc[w]
synonymous_q = w
score += (self.idf.get(synonymous_q) or 0) * (q_freq * (self.k1 + 1) /
(q_freq + self.k1 * (1 - self.b + self.b * doc_len / self.avgdl)))
return score
def get_batch_scores(self, query, doc_ids):
"""
Calculate bm25 scores between query and subset of all docs
"""
assert all(di < len(self.doc_freqs) for di in doc_ids)
score = np.zeros(len(doc_ids))
doc_len = np.array(self.doc_len)[doc_ids]
for q in query:
q_freq = np.array([(self.doc_freqs[di].get(q) or 0) for di in doc_ids])
score += (self.idf.get(q) or 0) * (q_freq * (self.k1 + 1) /
(q_freq + self.k1 * (1 - self.b + self.b * doc_len / self.avgdl)))
return score.tolist()
def are_synonyms(word1, word2):
"""Check if two words are synonyms using WordNet."""
synonyms1 = {lemma.name() for synset in wordnet.synsets(word1) for lemma in synset.lemmas()}
synonyms2 = {lemma.name() for synset in wordnet.synsets(word2) for lemma in synset.lemmas()}
return not synonyms1.isdisjoint(synonyms2)
def simple_tok(sent:str):
return sent.split()
def bm25(k, target:List[str], sample:List[List[str]]):
tok_corpus = sample
bm25 = BM25_call(tok_corpus)
query = target
scores = bm25.get_scores(query)
best_docs = sorted(range(len(scores)), key=lambda i: scores[i], reverse=True)[:k]
best_docs_none_zero = []
for d in best_docs:
if scores[d] != 0:
best_docs_none_zero.append(d)
return best_docs_none_zero
| [] |
2024-01-10 | TsinghuaDatabaseGroup/DB-GPT | multiagents~response_formalize_scripts~combine_similar_answer.py | import requests
import os
import numpy as np
import re
from scipy import spatial # for calculating vector similarities for search
import pdb
import time
import logging
from openai import OpenAI
from multiagents.llms.sentence_embedding import sentence_embedding
similarity = lambda x, y: 1 - spatial.distance.cosine(x, y)
def combine_similar_answers(text, output_format='str'):
if output_format == 'str':
text = text.strip()
tmp_sentences = re.split(r'(?<=[^.])\.(?:\s|\n|$)|\n', text)
else:
tmp_sentences = text
# compute text embedding (1536 dimen) for each sentence
sentences = []
for sentence in tmp_sentences:
sentence = sentence.strip()
if sentence != '':
embedding = sentence_embedding(sentence)
sentences.append({"text": sentence, "embedding": embedding})
combined_sentences = []
checked_indices = []
for i in range(len(sentences)):
if i in checked_indices:
continue
similar_sentences = [sentences[i]["text"]]
for j in range(i + 1, len(sentences)):
score = similarity(sentences[i]["embedding"], sentences[j]["embedding"])
if score > 0.8:
similar_sentences.append(sentences[j]["text"])
checked_indices.append(j)
if len(similar_sentences) > 1:
# for s in similar_sentences:
# print(s+'\n'+'====')
pass
if similar_sentences[0][-1] != '.':
similar_sentences[0] = similar_sentences[0] + '.'
combined_sentences.append(similar_sentences[0])
if output_format == 'str':
updated_response = '\n'.join(combined_sentences)
else:
updated_response = combined_sentences
return updated_response
if __name__ == '__main__':
text = """
[Chief DBA]: The identified root causes of the anomaly are high CPU usage due to the high number of running processes and high load averages, slow queries, and high memory usage due to complex queries, insufficient memory allocation, and the possibility of disk spill.
Optimize the slow queries by analyzing their execution plans, identifying any missing indexes, and tuning the queries accordingly
Polisih the slow queries by checking their execution plans, identifying any missing indexes, and tuning the queries accordingly
Monitor and manage the number of running processes on the node, ensuring that it remains within acceptable limits
Investigate and address any system or infrastructure issues that may be contributing to the high load averages
Consider scaling up or adding more resources to the system if the current CPU usage is consistently high and impacting performance
Add the recommended indexes: ['movie_info#movie_id', 'movie_companies#movie_id', 'keyword#keyword', 'aka_name#person_id'] to reduce cost
Further investigate and optimize the slow queries for better memory utilization
Monitor and adjust the memory allocation parameters like 'work_mem' and 'shared_buffers' to optimize memory usage
Analyze the query plans and consider adjustments to parameters like 'work_mem' and 'shared_buffers' to help reduce or eliminate disk spilling
"""
print(combine_similar_answers(text))
| [] |
2024-01-10 | TsinghuaDatabaseGroup/DB-GPT | multiagents~llms~sentence_embedding.py | import logging
import os
from typing import Dict, List, Optional, Union
from pydantic import BaseModel, Field
from multiagents.llms.base import LLMResult
from . import llm_registry
import requests
import json
import aiohttp
import asyncio
import time
import random
import re
from termcolor import colored
from tqdm import tqdm
from openai import OpenAI
import pickle
from sentence_transformers import SentenceTransformer
#def sentence_embedding(sentence: str, model: str = "text-embedding-ada-002"):
def sentence_embedding(sentence: str, model: str = "sentence-transformer"):
if model == "sentence-transformer":
model_path = './localized_llms/sentence_embedding/sentence-transformer/'
try:
embedder = SentenceTransformer(model_path)
except FileNotFoundError:
# load from remote
embedder = SentenceTransformer('sentence-transformers/all-mpnet-base-v2')
embedding = embedder.encode([sentence], convert_to_tensor=True)[0]
elif model == "text-embedding-ada-002":
api_key = os.environ.get("OPENAI_API_KEY")
client = OpenAI(api_key=api_key)
timeout=10
ok = 0
while timeout>0:
try:
response = client.embeddings.create(input=[sentence], model=model)
ok = 1
break
except Exception as e:
time.sleep(.01)
timeout -= 1
if ok == 0:
raise Exception("Failed to get response from API!")
embedding = response.data[0].embedding
# payload = {
# "input": [sentence],
# "model": model
# }
# url = "https://api.aiaiapi.com/v1/embeddings"
# headers = {
# "Content-Type": "application/json",
# "Authorization": "Bearer " + api_key
# }
# timeout=10
# ok = 0
# while timeout>0:
# try:
# response = requests.post(url, json=payload, headers=headers)
# ok = 1
# break
# except Exception as e:
# time.sleep(.01)
# timeout -= 1
# if ok == 0:
# raise Exception("Failed to get response from openai API!")
# embedding = json.loads(response.text)['data'][0]['embedding']
return embedding
| [] |
2024-01-10 | TsinghuaDatabaseGroup/DB-GPT | doc2knowledge~utilss.py | from pyheaven import *
from prompts import *
import time
from openai import OpenAI
import itertools
# config utils
def get_config(key):
return LoadJson("config.json")[key]
def get_cache(key):
key = key.lower().strip()
if not ExistFile("cache.json"):
SaveJson(dict(), "cache.json")
cache = LoadJson("cache.json")
if key in cache:
return cache[key]
# crea
return None
# cache utils
def update_cache(key, value, update=False):
key = key.lower().strip()
cache = LoadJson("cache.json")
if update or (key not in cache):
cache[key] = value
SaveJson(cache, "cache.json")
def clear_cache():
SaveJson(dict(), "cache.json")
# file utils
def str2id(id_str):
return tuple(int(j) for j in id_str.split('.'))
def parse_id(file_name):
return str2id(file_name.split(' ')[0])
def parse_depth(file_name):
return len(parse_id(file_name))
def is_father(file_name1, file_name2):
return parse_id(file_name1) == parse_id(file_name2)[:-1]
def id_sort(nodes, reverse=False):
return sorted(nodes, key=lambda x: x['id'], reverse=reverse)
def topo_sort(nodes):
nodes = id_sort(nodes)
for i, node in enumerate(nodes):
nodes[i]['book'] = len(node['children'])
nodes = {node['id_str']: node for node in nodes}
sorted_nodes = [nodes[key] for key in nodes if nodes[key]['book']==0]; head = 0
while head < len(sorted_nodes):
v = sorted_nodes[head]; head += 1
if v['father']:
nodes[v['father']]['book'] -= 1
if not nodes[v['father']]['book']:
sorted_nodes.append(nodes[v['father']])
return [{k:v for k,v in node.items() if k!='book'} for node in sorted_nodes]
def read_txt(file_path):
assert ExistFile(file_path), f"File not found: {file_path}"
with open(file_path, "r") as f:
return f.read().strip()
# openai utils
class LLMCore(object):
def __init__(self, backend="openai_gpt-3.5-turbo"):
self.backend = backend
if self.backend.startswith("openai_"):
self.config = get_config('openai-api')
self.client = OpenAI(
api_key=self.config['api_key'],
organization = self.config['organization']
)
self.model = backend.split('_')[-1]
else:
pass
def Query(self, messages, temperature=0, functions=list(), retry_gap=0.1, timeout=3):
identifier = "|".join([self.backend, str(messages)] + ([str(functions)] if functions else []))
#response = get_cache(identifier)
response = None
if response is not None:
return response
while timeout>0:
try:
if functions:
assert (self.model=='gpt-4'), f"Functions are only supported in 'gpt-4'!"
response = self.client.chat.completions.create(
model = self.model,
messages = messages,
functions = functions,
temperature = temperature,
)
else:
response = self.client.chat.completions.create(
model = self.model,
messages = messages,
temperature = temperature,
)
response = response.choices[0].message
# update_cache(identifier, response)
return response
except KeyboardInterrupt as e:
exit(0)
except NotImplementedError as e:
exit(0)
except Exception as e:
print(ERROR(e))
time.sleep(retry_gap)
print(ERROR(f"Retrying..."))
timeout -= 1
return None | [] |
2024-01-10 | TsinghuaDatabaseGroup/DB-GPT | doc2knowledge~knowledge_clustering.py | from multiagents.llms.sentence_embedding import sentence_embedding
from openai import OpenAI
import numpy as np
import json
from sklearn.cluster import DBSCAN
from sklearn.decomposition import PCA
from sklearn.decomposition import FastICA
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
import matplotlib.pyplot as plt
import os
import requests
import time
import ast
existing_knowledge_dir = './docs/extracted_knowledge_chunks'
embedding_file_name = './embeddings_array_v2.npy'
file_names = os.listdir(existing_knowledge_dir)
texts = []
labels = []
for i,file_name in enumerate(file_names):
print(file_name)
if "jsonl" in file_name:
# read content split by '\n\n'
with open(existing_knowledge_dir+f'/{file_name}', 'r') as rf:
prefix= file_name.split('.')[0]
content = rf.read()
content = content.split('\n\n')
for text in content:
if text == '':
continue
labels.append(i)
text = text.strip()
# json.loads(data_string.replace("'", "\"").replace("\\\"", "'"))
try:
text = ast.literal_eval(text)
except:
print(f"[invalid chunk] ({prefix})", text)
if prefix not in text['name']:
text['name'] = prefix + '_' + text['name']
texts.append(text)
if not os.path.exists(embedding_file_name):
# Get embeddings for each text
embeddings = []
for i,text in enumerate(texts):
embedding = sentence_embedding(text["name"])
embeddings.append(embedding)
print(f"embedded {i} text")
# Convert embeddings list to a NumPy array
embeddings_array = np.array(embeddings)
np.save(embedding_file_name, embeddings_array)
else:
# reload embeddings_array from file
embeddings_array = np.load(embedding_file_name)
svd = PCA(n_components=3)
reduced_embeddings = svd.fit_transform(embeddings_array)
# Plotting in 3-D
fig = plt.figure(figsize=(10, 6))
fig.patch.set_facecolor('none')
ax = fig.add_subplot(projection='3d')
ax.xaxis.pane.fill = False
ax.yaxis.pane.fill = False
ax.zaxis.pane.fill = False
ax.grid(True, linestyle='dotted', linewidth=0.5, color='black')
scatter = ax.scatter(reduced_embeddings[:, 0], reduced_embeddings[:, 1], reduced_embeddings[:, 2], c=labels, cmap='viridis')
# plt.title("Knowledge Clustering (Ada-002)")
cbar = fig.colorbar(scatter, ax=ax, shrink=0.7)
#plt.colorbar(scatter)
ax.set_xlabel("PCA 1")
ax.set_ylabel("PCA 2")
ax.set_zlabel("PCA 3")
unique_labels = np.unique(labels)
label_names = ["index", "", "workload", "I/O", "writes", "memory", "", "CPU", "query", ""]
ax.text(-0.12357282, -0.02821038, -0.08682948, "index", fontsize=12, weight='bold', ha='center', va='center')
ax.text(0.24026489, -0.00548978, 0.10369949, "workload", fontsize=12, weight='bold', ha='center', va='center')
ax.text(-0.16701542, -0.0196591 , 0.22820786, "I/O", fontsize=12, weight='bold', ha='center', va='center')
ax.text(-0.14342373, -0.06689665, 0.00210631, "writes", fontsize=12, weight='bold', ha='center', va='center')
ax.text(-0.15936546, 0.1986972 , -0.06664728, "memory", fontsize=12, weight='bold', ha='center', va='center')
ax.text(-0.11849676, 0.17963724, -0.004809, "CPU", fontsize=12, weight='bold', ha='center', va='center')
ax.text(-0.18277633, -0.22516701, -0.21521835, "query", fontsize=12, weight='bold', ha='center', va='center')
ax.set_xlim(-0.3, 0.2)
ax.set_ylim(-0.2, 0.45)
ax.set_zlim(-0.3, 0.2)
# for label in unique_labels:
# centroid = np.mean(reduced_embeddings[labels == label], axis=0)
# ax.text(centroid[0], centroid[1], centroid[2], str(label_names[int(label)]), fontsize=12, weight='bold', ha='center', va='center')
plt.savefig('./knowledge_clustering_3d.png')
| [] |
2024-01-10 | TsinghuaDatabaseGroup/DB-GPT | prompt_template_scripts~index_tuning~run_index_tuning.py | import random
import openai
import os
import re
import numpy as np
import pickle
from data.load_data import load_data
import logging
import sys
import json
import copy
sys.path.append('..')
from automatic_prompt_engineer import ape, config, template, utils
from evaluation.exec_eval import exec_instruction_evaluator
from evaluation.eval_result import SQLEvaluationResult
from evaluation.utility import get_sql_score_index_tuning, create_indexes, drop_indexes
from utils.database import Database, DBArgs
eval_template = "Instruction: [PROMPT]\n\nInput: [INPUT]\nOutput: [OUTPUT]"
prompt_gen_template = " I gave a friend an instruction.\
The friend followed the instruction to output create index statements based on the input SQL queries.\
The resulting create index statements can be executed on a PostgreSQL database and can reduce the execution latency of the input SQL queries.\
Based on the instruction they produced the following input-output pairs:\n\n[full_DEMO]\n\nInstruction: [APE]"
demos_template = "Input: [INPUT]\nOutput: [OUTPUT]"
def generate_prompts(eval_template,
demos_template,
prompt_gen_data,
eval_data,
conf,
few_shot_data=None,
prompt_gen_template=None):
prompts = ape.find_prompts(eval_template=eval_template,
prompt_gen_data=prompt_gen_data,
eval_data=eval_data,
conf=conf,
few_shot_data=few_shot_data,
demos_template=demos_template,
prompt_gen_template=prompt_gen_template)
filtered_prompts = []
for p in prompts:
prompt = re.sub(r'Input\:.+?;', '', p, flags=re.DOTALL)
prompt = re.sub(r'Output\:.+?;', '', prompt, flags=re.DOTALL)
prompt = prompt.replace('\n', '').strip()
filtered_prompts.append(prompt)
filtered_prompts = list(set(filtered_prompts))
logging.info('Deduplicated to {} prompts.'.format(len(filtered_prompts)))
return filtered_prompts
def eval_prompts(prompts, eval_template, eval_data, demos_template, config):
logging.info('Evaluating prompts...')
eval_template_instance = template.EvalTemplate(eval_template)
demos_template_instance = template.DemosTemplate(demos_template)
inputs, _, model_outputs, answers = exec_instruction_evaluator(prompts, eval_template_instance, eval_data, demos_template_instance, config)
logging.info('Finished evaluating.')
return inputs, model_outputs, answers
# Step 1. get the params.
parser = utils.get_parser()
args = parser.parse_args()
task = 'index_tuning'
timeout = -1 # timeout=-1 means no timeout of the query execution time
use_schema = 1 # take data info as LLM input
dbargs = DBArgs("pgsql", utils.get_conf(args.db_conf, 'pgsql_server'), args.eval_dataset)
# db = Database(dbargs, timeout)
# Step 2. set the seed and OpenAI api_key.
openai.api_key = os.environ["OPENAI_API_KEY"]
random.seed(args.seed)
# Step 3. create the directory to store the `result`.
assert not os.path.exists(os.path.dirname(args.logdir.format(args.exp_id))), \
f"`{os.path.dirname(args.logdir.format(args.exp_id))}` dir already existed! " \
f"And we don't intend to overwrite anything."
os.makedirs(os.path.dirname(args.logdir.format(args.exp_id)))
os.makedirs(os.path.dirname(args.model_save.format(args.exp_id, 0)))
os.makedirs(os.path.dirname(args.data_save.format(args.exp_id, 0)))
utils.set_logger(args.runlog.format(args.exp_id))
logging.info("Start Adversarial Workload Generation.")
logging.info(f"Create the directory `{os.path.dirname(args.logdir.format(args.exp_id))}` to save experiment result.")
# Step 4. load the training and evaluation data.
induce_data, eval_data = load_data(args.train_data, task, 'train', use_schema), load_data(args.eval_data, task, 'eval', use_schema)
conf = {
'generation': {
'num_subsamples': args.gen_sample, # 2 workload samples by default, avoiding exceeding the token limit of LLM (many queries in each workload)
'num_demos':args.gen_demo,
'num_prompts_per_subsample': args.gen_prompt_per_sample,
'model': {
'gpt_config': {
'model': args.gen_model,
'max_tokens': args.gen_max_tokens
}
}
},
'evaluation': {
'task': task,
'num_samples': min(args.eval_sample, len(eval_data[0])),
'model': {
'gpt_config': {
'model': args.eval_model,
'max_tokens': args.eval_max_tokens
}
}
}
}
conf = config.update_config(conf, args.algo_conf)
# Step 5. generate multiple candidate prompts for the task.
prompts = generate_prompts(eval_template=eval_template,
prompt_gen_data=induce_data,
eval_data=eval_data,
conf=conf,
few_shot_data=induce_data,
demos_template=demos_template,
prompt_gen_template=prompt_gen_template)
train_dict = dict()
train_dict["prompts"] = prompts
train_pik = args.model_save.format(args.exp_id) + task + "_ape_train.dat"
with open(train_pik, "wb") as f:
pickle.dump(train_dict, f) # save the prompts
# Step 6. evaluate each candidate prompt.
# Step 6.1. get the ape indexes (in list) for each eval workload
### involve column (#-rows, distinct value ratio) in the prompt
inputs, model_outputs, answers = eval_prompts(prompts, eval_template, eval_data, demos_template, conf['evaluation'])
# Step 6.2. reload the SOTA indexes (in list) for each eval workload
num_valid_samples = args.eval_sample
with open(args.eval_data, 'r') as f:
data = json.load(f)
examples = data['examples']
inputs, outputs = [], []
for i in range(min(args.eval_sample, len(examples))):
data = examples[str(i + 1)]
input_, output_ = data['input'], data['output']
inputs.append(input_)
outputs.append(output_)
eval_data = (inputs, outputs)
# Step 6.3. 1) (greedy) filter indexes that are out of storage limit; 2) compute the cost reduction of ape indexes
### ape indexes of [args.eval_sample] workloads of the [args.gen_prompt_per_sample] prompts
indexes_ape_under_budget = [[[] for j in range(args.eval_sample)] for i in range(len(prompts))]
total_index_cost = 0
for j, prediction, ans_ in enumerate(zip(model_outputs, answers)):
indexes = prediction.split(";")
updated_indexes = []
for index in indexes:
if "CREATE INDEX" in index:
index = index.replace("\n", "")
index = index + ";"
updated_indexes.append(index)
flag, total_cost, reduced_cost, indexes_under_budget = get_sql_score_index_tuning(indexes, eval_data[0][j], args.storage_budget, args)
indexes_ape_under_budget[j] = indexes_under_budget
# compute the cost reduction with the index
total_index_reduction = total_index_reduction + reduced_cost
total_index_cost = total_index_cost + total_cost
logging.info("total index cost reduction / total_index_cost: {}/{}".format(total_index_reduction, total_index_cost))
# Step 6.3. compute the latency under selected indexes
latency_database = 0.0000001
latency_sota = 0.0000001
### the latency of workloads in default database (latency_database)
for i,workload in enumerate(eval_data[0]):
# print("================ workload {} ================".format(i))
tmpdb = Database(dbargs, timeout)
for sql in workload:
query_latency = tmpdb.pgsql_actual_time(sql)
latency_database = latency_database + query_latency
# print("query latency: {}".format(query_latency))
tmpdb.conn.commit()
tmpdb.conn.close()
######################################################################################################
### clear up the query result cache bfore conducting the next step!! (easier to achieve this in jupyter)
######################################################################################################
### the latency of workloads in default database (latency_sota)
for k,indexes in enumerate(eval_data[1]):
# print("================ workload {} ================".format(k))
indexes = indexes.split(";")
updated_indexes = []
for index in indexes:
if "create index" in index:
index = index.replace("\n", "")
index = index + ";"
updated_indexes.append(index)
create_indexes(updated_indexes)
# print("create {} indexes".format(len(updated_indexes)))
tmpdb = Database(dbargs, timeout)
for sql in eval_data[0][k]:
query_latency = tmpdb.pgsql_actual_time(sql)
latency_sota = latency_sota + query_latency
print("query latency: {}".format(query_latency))
tmpdb.conn.commit()
tmpdb.conn.close()
drop_indexes(updated_indexes)
print("drop {} indexes".format(len(updated_indexes)))
######################################################################################################
### clear up the query result cache bfore conducting the next step!!
######################################################################################################
### the latency of workloads in database with ape indexes (latency_ape)
results = []
flags = []
costs = []
for j, prompt in enumerate(prompts):
latency_ape = 0 # compute the total latency of all the eval workloads
for i,workload in enumerate(eval_data[0]):
print("================ workload {} ================".format(i))
updated_indexes = indexes_ape_under_budget[i]
create_indexes(updated_indexes, args.storage_budget)
tmpdb = Database(dbargs, timeout)
for sql in workload:
query_latency = tmpdb.pgsql_actual_time(sql)
latency_ape = latency_ape + query_latency
# print("query latency: {}".format(query_latency))
tmpdb.conn.commit()
tmpdb.conn.close()
drop_indexes(updated_indexes)
results.append([flag,
"{:.4f}".format(latency_ape),
"{:.4f}".format((latency_database-latency_ape)/latency_database), # latency reduction ratio
"{:.4f}".format(latency_sota),
"{:.4f}".format((latency_database-latency_sota)/latency_database),
prompt])
sorted_results = sorted(results, key=lambda x: float(x[2]))
for acc, ape_latency, ape_latency_reduction, sota_latency, sota_latency_reduction, prompt in sorted_results[:10]:
logging.info(f' {acc}, {ape_latency}, {ape_latency_reduction}, {sota_latency}, {sota_latency_reduction}: {prompt}')
eval_dict = dict()
eval_dict["task"] = task
eval_dict["config"] = conf
eval_dict['prompts'] = prompts
eval_dict["inputs"] = inputs
eval_dict["outputs"] = model_outputs
eval_dict["answers"] = answers
eval_dict["scores"] = results
eval_pik = args.data_save.format(args.exp_id) + task + "_ape_eval.dat"
with open(eval_pik, "wb") as f:
pickle.dump(eval_dict, f)
######################################################################################################
### clear up the query result cache in the end!!
###################################################################################################### | [
"\n",
"Input\\:.+?;",
"['PLACEHOLDER']",
"Output\\:.+?;",
" I gave a friend an instruction.The friend followed the instruction to output create index statements based on the input SQL queries. The resulting create index statements can be executed on a PostgreSQL database and can reduce the execution latency of the input SQL queries. Based on the instruction they produced the following input-output pairs:\n\n[full_DEMO]\n\nInstruction: [APE]",
"Input: [INPUT]\nOutput: [OUTPUT]",
"Instruction: [PROMPT]\n\nInput: [INPUT]\nOutput: [OUTPUT]",
"[]"
] |
2024-01-10 | TsinghuaDatabaseGroup/DB-GPT | prompt_template_scripts~query_rewrite~run_query_rewrite.py | import random
import openai
import os
import re
import numpy as np
import pickle
from data.load_data import load_data
import logging
import sys
sys.path.append('..')
from automatic_prompt_engineer import ape, config, template, utils
from evaluation.exec_eval import exec_instruction_evaluator
from evaluation.eval_result import SQLEvaluationResult
from evaluation.utility import get_sql_latc_cost
from utils.database import DBArgs
eval_template = "Instruction: [PROMPT]\n\nInput: [INPUT]\nOutput: [OUTPUT]"
prompt_gen_template = "I gave a friend an instruction. The friend followed the instruction to rewrite the input SQL query to produce an equivalent SQL query. The resulting output SQL query can be executed on a PostgreSQL database with decreased latency. Based on the instruction they produced the following input-output pairs:\n\n[full_DEMO]\n\nInstruction: [APE]"
# Accordingly they produced the following input-output pairs:
demos_template = "Input: [INPUT]\nOutput: [OUTPUT]"
def generate_prompts(eval_template,
demos_template,
prompt_gen_data,
eval_data,
conf,
few_shot_data=None,
prompt_gen_template=None):
prompts = ape.find_prompts(eval_template=eval_template,
prompt_gen_data=prompt_gen_data,
eval_data=eval_data,
conf=conf,
few_shot_data=few_shot_data,
demos_template=demos_template,
prompt_gen_template=prompt_gen_template)
filtered_prompts = []
for p in prompts:
prompt = re.sub(r'Input\:.+?;', '', p, flags=re.DOTALL)
prompt = re.sub(r'Output\:.+?;', '', prompt, flags=re.DOTALL)
prompt = prompt.replace('\n', '').strip()
filtered_prompts.append(prompt)
filtered_prompts = list(set(filtered_prompts))
logging.info('Deduplicated to {} prompts.'.format(len(filtered_prompts)))
return filtered_prompts
'''
The following code is used to evaluate the generated prompts.
'''
def eval_prompts(prompts, eval_template, eval_data, demos_template, config):
logging.info('Evaluating prompts...')
eval_template_instance = template.EvalTemplate(eval_template)
demos_template_instance = template.DemosTemplate(demos_template)
inputs, _, model_outputs, answers = exec_instruction_evaluator(prompts, eval_template_instance, eval_data, demos_template_instance, config)
logging.info('Finished evaluating.')
return inputs, model_outputs, answers
# Step 1. get the params.
parser = utils.get_parser()
args = parser.parse_args()
task = 'query_rewrite'
# Step 2. set the seed and OpenAI api_key.
openai.api_key = os.environ["OPENAI_API_KEY"]
random.seed(args.seed)
# Step 3. create the directory to store the `result`.
assert not os.path.exists(os.path.dirname(args.logdir.format(args.exp_id))), \
f"`{os.path.dirname(args.logdir.format(args.exp_id))}` dir already existed! " \
f"And we don't intend to overwrite anything."
os.makedirs(os.path.dirname(args.logdir.format(args.exp_id)))
os.makedirs(os.path.dirname(args.model_save.format(args.exp_id, 0)))
os.makedirs(os.path.dirname(args.data_save.format(args.exp_id, 0)))
utils.set_logger(args.runlog.format(args.exp_id))
logging.info("Start Adversarial Workload Generation.")
logging.info(f"Create the directory `{os.path.dirname(args.logdir.format(args.exp_id))}` to save experiment result.")
# Step 4. load the training and evaluation data.
induce_data, eval_data = load_data(args.train_data, task, 'train'), load_data(args.eval_data, task, 'eval')
conf = {
'generation': {
'num_subsamples': args.gen_sample,
'num_demos':args.gen_demo,
'num_prompts_per_subsample': args.gen_prompt_per_sample,
'model': {
'gpt_config': {
'model': args.gen_model,
'max_tokens': args.gen_max_tokens
}
}
},
'evaluation': {
'task': task,
'num_samples': min(args.eval_sample, len(eval_data[0])),
'model': {
'gpt_config': {
'model': args.eval_model,
'max_tokens': args.eval_max_tokens
}
}
}
}
print(os.path.join( os.path.dirname(__file__), args.algo_conf))
conf = config.update_config(conf, os.path.join( os.path.dirname(__file__), args.algo_conf))
# Step 5. generate instructions.
prompts = generate_prompts(eval_template=eval_template,
prompt_gen_data=induce_data,
eval_data=eval_data,
conf=conf,
few_shot_data=induce_data,
demos_template=demos_template,
prompt_gen_template=prompt_gen_template)
train_dict = dict()
train_dict["prompts"] = prompts
train_pik = args.model_save.format(args.exp_id) + task + "_ape_train.dat"
with open(train_pik, "wb") as f:
pickle.dump(train_dict, f)
# Step 6. evaluate instructions.
inputs, model_outputs, answers = eval_prompts(prompts, eval_template, eval_data, demos_template, conf['evaluation'])
scores = []
flags = []
costs = []
for prediction, ans_ in zip(model_outputs, answers):
dbargs = DBArgs("pgsql", utils.get_conf(args.db_conf, 'pgsql_server'), ans_['dataset'])
flag, cost, latency = get_sql_latc_cost(prediction, ans_, dbargs, 60)
scores.append([flag, latency, float(ans_['input_latency']), float(ans_['output_latency']), cost, float(ans_['input_cost']), float(ans_['output_cost'])])
scores = np.array(scores).reshape(len(prompts), conf['evaluation']['num_samples'], 7)
res = SQLEvaluationResult(prompts, scores)
sorted_results = res.sorted_latc()
logging.info('Accuracy\tNormalized Rewrite Latency\tRewrite Latency\tInput Latency\tAnswer Latency\tPrompt')
for acc, norm_latency, latency, input_latency, output_latency, cost, input_cost, output_cost, prompt in sorted_results[:10]:
logging.info(f'{acc}\t{norm_latency}\t{latency}\t{input_latency}\t{output_latency}\t{prompt}')
eval_dict = dict()
eval_dict["task"] = task
eval_dict["config"] = conf
eval_dict['prompts'] = prompts
eval_dict["inputs"] = inputs
eval_dict["outputs"] = model_outputs
eval_dict["answers"] = answers
eval_dict["scores"] = scores
eval_dict["results"] = sorted_results
eval_pik = args.data_save.format(args.exp_id) + task + "_ape_eval.dat"
with open(eval_pik, "wb") as f:
pickle.dump(eval_dict, f)
| [
"\n",
"Input\\:.+?;",
"Output\\:.+?;",
"['PLACEHOLDER']",
"Input: [INPUT]\nOutput: [OUTPUT]",
"Instruction: [PROMPT]\n\nInput: [INPUT]\nOutput: [OUTPUT]",
"I gave a friend an instruction. The friend followed the instruction to rewrite the input SQL query to produce an equivalent SQL query. The resulting output SQL query can be executed on a PostgreSQL database with decreased latency. Based on the instruction they produced the following input-output pairs:\n\n[full_DEMO]\n\nInstruction: [APE]",
"[]"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.