date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | eldarsilver/DQN_Pytorch_ROS | openai_ros~openai_ros~src~openai_ros~task_envs~cartpole_stay_up~stay_up.py | from gym import utils
from openai_ros.robot_envs import cartpole_env
from gym.envs.registration import register
from gym import error, spaces
import rospy
import math
import numpy as np
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class CartPoleStayUpEnv(cartpole_env.CartPoleEnv):
def __init__(self):
ros_ws_abspath = rospy.get_param("/cartpole_v0/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path " + ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p " + ros_ws_abspath + \
"/src;cd " + ros_ws_abspath + ";catkin_make"
ROSLauncher(rospackage_name="cartpole_description",
launch_file_name="start_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/cartpole_stay_up/config",
yaml_file_name="stay_up.yaml")
self.get_params()
self.action_space = spaces.Discrete(self.n_actions)
high = np.array([
2.5 * 2,
np.finfo(np.float32).max,
0.7 * 2,
np.finfo(np.float32).max])
self.observation_space = spaces.Box(-high, high)
# TODO: Remove when working
"""
cartpole_env.CartPoleEnv.__init__(
self, control_type=self.control_type
)
"""
# Here we will add any init functions prior to starting the MyRobotEnv
super(CartPoleStayUpEnv, self).__init__(control_type=self.control_type,
ros_ws_abspath=ros_ws_abspath)
def get_params(self):
# get configuration parameters
self.n_actions = rospy.get_param('/cartpole_v0/n_actions')
self.min_pole_angle = rospy.get_param('/cartpole_v0/min_pole_angle')
self.max_pole_angle = rospy.get_param('/cartpole_v0/max_pole_angle')
self.max_base_velocity = rospy.get_param(
'/cartpole_v0/max_base_velocity')
self.min_base_pose_x = rospy.get_param('/cartpole_v0/min_base_pose_x')
self.max_base_pose_x = rospy.get_param('/cartpole_v0/max_base_pose_x')
self.pos_step = rospy.get_param('/cartpole_v0/pos_step')
self.running_step = rospy.get_param('/cartpole_v0/running_step')
self.init_pos = rospy.get_param('/cartpole_v0/init_pos')
self.wait_time = rospy.get_param('/cartpole_v0/wait_time')
self.control_type = rospy.get_param('/cartpole_v0/control_type')
def _set_action(self, action):
# Take action
if action == 0: # LEFT
rospy.loginfo("GO LEFT...")
self.pos[0] -= self.pos_step
elif action == 1: # RIGHT
rospy.loginfo("GO RIGHT...")
self.pos[0] += self.pos_step
elif action == 2: # LEFT BIG
rospy.loginfo("GO LEFT BIG...")
self.pos[0] -= self.pos_step * 10
elif action == 3: # RIGHT BIG
rospy.loginfo("GO RIGHT BIG...")
self.pos[0] += self.pos_step * 10
# Apply action to simulation.
rospy.loginfo("MOVING TO POS=="+str(self.pos))
# 1st: unpause simulation
#rospy.logdebug("Unpause SIM...")
# self.gazebo.unpauseSim()
self.move_joints(self.pos)
rospy.logdebug(
"Wait for some time to execute movement, time="+str(self.running_step))
rospy.sleep(self.running_step) # wait for some time
rospy.logdebug(
"DONE Wait for some time to execute movement, time=" + str(self.running_step))
# 3rd: pause simulation
#rospy.logdebug("Pause SIM...")
# self.gazebo.pauseSim()
def _get_obs(self):
data = self.joints
# base_postion base_velocity pole angle pole velocity
#obs = [round(data.position[1],1), round(data.velocity[1],1), round(data.position[0],1), round(data.velocity[0],1)]
obs = [data.position[1], data.velocity[1],
data.position[0], data.velocity[0]]
return np.array(obs)
def _is_done(self, observations):
done = False
data = self.joints
rospy.loginfo("BASEPOSITION=="+str(observations[0]))
rospy.loginfo("POLE ANGLE==" + str(observations[2]))
# check if the base is still within the ranges of (-2, 2)
if (self.min_base_pose_x >= observations[0] or observations[0] >= self.max_base_pose_x):
rospy.logerr("Base Outside Limits==>min="+str(self.min_base_pose_x) +
",pos="+str(observations[0])+",max="+str(self.max_base_pose_x))
done = True
# check if pole has toppled over
if (self.min_pole_angle >= observations[2] or observations[2] >= self.max_pole_angle):
rospy.logerr(
"Pole Angle Outside Limits==>min=" + str(self.min_pole_angle) + ",pos=" + str(observations[2]) + ",max=" + str(
self.max_pole_angle))
done = True
rospy.loginfo("FINISHED get _is_done")
return done
def _compute_reward(self, observations, done):
"""
Gives more points for staying upright, gets data from given observations to avoid
having different data than other previous functions
:return:reward
"""
rospy.logdebug("START _compute_reward")
if not done:
reward = 1.0
elif self.steps_beyond_done is None:
# Pole just fell!
self.steps_beyond_done = 0
reward = 1.0
else:
if self.steps_beyond_done == 0:
logger.warning("You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.")
self.steps_beyond_done += 1
reward = 0.0
rospy.logdebug("END _compute_reward")
return reward
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
self.steps_beyond_done = None
def _set_init_pose(self):
"""
Sets joints to initial position [0,0,0]
:return:
"""
self.check_publishers_connection()
# Reset Internal pos variable
self.init_internal_vars(self.init_pos)
self.move_joints(self.pos)
| [] |
2024-01-10 | eldarsilver/DQN_Pytorch_ROS | turtle2_openai_ros_example~src~deepq.py | #!/usr/bin/env python
import gym
from gym import wrappers
# ROS packages required
import rospy
import rospkg
from openai_ros.openai_ros_common import StartOpenAI_ROS_Environment
from functools import reduce
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
import os
import time
import random
import numpy as np
import matplotlib.pyplot as plt
from collections import namedtuple
import math
import glob
import io
import base64
from memory import ReplayMemory
import datetime
import json
class DQN(nn.Module):
# hidden_size=64
def __init__(self, inputs, outputs, hidden_size=128):
super(DQN, self).__init__()
self.fc1 = nn.Linear(in_features=inputs, out_features=hidden_size)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.5)
self.bn1 = nn.BatchNorm1d(num_features=hidden_size)
self.bn2 = nn.BatchNorm1d(num_features=64)
self.bn3 = nn.BatchNorm1d(num_features=32)
self.fc2 = nn.Linear(in_features=hidden_size, out_features=64)
self.fc3 = nn.Linear(in_features=64, out_features=32)
self.fc4 = nn.Linear(in_features=32, out_features=outputs)
#self.fc5 = nn.Linear(in_features=16, out_features=outputs)
def forward(self, x):
x = self.fc1(x)
x = self.bn1(x)
x = self.relu(x)
#x = self.dropout(x)
x = self.fc2(x)
x = self.bn2(x)
x = self.relu(x)
#x = self.dropout(x)
x = self.fc3(x)
x = self.bn3(x)
x = self.relu(x)
#x = self.dropout(x)
x = self.fc4(x)
#x = self.relu(x)
#x = self.dropout(x)
#x = self.fc5(x)
return x
def compute_eps_threshold(step, eps_start, eps_end, eps_decay):
# eps_start = 1.0, eps_end = 0.1, eps_decay = num_steps = 9e4
# 0.1 + 0.9 * math.exp(-1. * step / 9e4)
# with step = 9e4: 0.1 + 0.9 * 0.36787944117 = 0.1 + 0.33109149705 = 0.43109149705
# with step = 0: 0.1 + 0.9 * math.exp(-1 * 0) = 0.1 + 0.9 * 1 = 1.0
#return eps_end + (eps_start - eps_end) * math.exp(-1. * step / eps_decay)
return eps_end + (eps_start - eps_end) * math.exp(-1. * step / eps_decay)
def select_action(policy, state, device, env, eps_greedy_threshold, n_actions):
rospy.logwarn("eps_greedy_threshold: " + str(eps_greedy_threshold))
if random.random() > eps_greedy_threshold:
rospy.logwarn("Entering select action random.random() > eps_greedy_threshold...")
policy_used = True
#rospy.logwarn("state.shape: ")
#rospy.logwarn(state.shape)
#rospy.logwarn("n_actions Env.action_space.n%d", n_actions)
with torch.no_grad():
# t.max(1) will return largest column value of each row.
# second column on max result is index of where max element was
# found, so we pick action with the larger expected reward.
policy.eval()
action = policy(state).max(1)[1].view(1, 1)
policy_act = action
policy.train()
else:
rospy.logwarn("Entering select action random.random() < eps_greedy_threshold...")
policy_used = False
action = torch.tensor([[random.randrange(n_actions)]], device=device, dtype=torch.long)
with torch.no_grad():
policy.eval()
policy_act = policy(state).max(axis=1)[1].view(1, 1)
return action, policy_act, policy_used
def train(policy_net, target_net, optimizer, scheduler, memory, batch_size, gamma, device, env):
if len(memory) < batch_size:
return
full_memory = memory.sample(len(memory))
full_memory_fields = memory.Transition(*zip(*full_memory))
full_rewards = torch.cat(full_memory_fields.reward).float()
#full_states = torch.cat(full_memory_fields.state)
transitions = memory.sample(batch_size)
# This converts batch-array of Transitions to Transition of batch-arrays.
# list of Transitions: [(s, a, r, s', d), (s, a, r, s', d), ...]
# will become:
# Transition((s0, s1, s2, ...), (a0, a1, a2, ...), ...)
batch = memory.Transition(*zip(*transitions))
# Compute a mask of non-final states and concatenate the batch elements
# (a final state would've been the one after which simulation ended)
# self.Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward'))
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, batch.next_state)), device=device, dtype=torch.bool)
non_final_next_states = torch.cat([s for s in batch.next_state if s is not None])
e = np.finfo(np.float32).eps.item()
state_batch = torch.cat(batch.state)
#state_batch = (state_batch - full_states.mean()) / (full_states.std() + e)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward) #.float()
#reward_batch = (reward_batch - full_rewards.mean()) / (full_rewards.std() + e)
# Compute Q(s_t, a) - the model computes Q(s_t) for all a, then we select the columns of actions taken.
#rospy.logwarn("state_batch.shape: ")
#rospy.logwarn(state_batch.shape)
#rospy.logwarn("n_inputs Env.observation_space: ")
#rospy.logwarn(env.observation_space)
#rospy.logwarn("n_inputs Env.observation_space.shape: ")
#rospy.logwarn(env.observation_space.shape)
#rospy.logwarn("n_inputs Env.observation_space.shape[0] %d", n_inputs)
#rospy.logwarn("n_actions Env.action_space.n %d", n_actions)
state_action_values = policy_net(state_batch).gather(1, action_batch)
#rospy.logwarn("state_action_values.shape: ")
#rospy.logwarn(state_action_values.shape)
#rospy.logwarn("state_action_values: ")
#rospy.logwarn(state_action_values)
# Compute Q(s_{t+1}) for all next states.
# Expected values of actions for non_final_next_states are computed based
# on the "older" target_net; selecting their best reward with max(1)[0].
# This is merged based on the mask, such that we'll have either the expected
# state value or 0 in case the state was final.
# Note the call to detach() on Q(s_{t+1}), which prevents gradient flow
next_state_values = torch.zeros(batch_size, device=device)
next_state_values[non_final_mask] = target_net(non_final_next_states).max(dim=1)[0].detach()
# Compute targets for Q values: y_t = r_t + gamma * max(Q_{t+1})
expected_state_action_values = reward_batch + (gamma * next_state_values)
"""
rospy.logwarn("expected_state_action_values.shape: ")
rospy.logwarn(expected_state_action_values.shape)
rospy.logwarn("expected_state_action_values: ")
rospy.logwarn(expected_state_action_values)
rospy.logwarn("expected_state_action_values.unsqueeze(1): ")
rospy.logwarn(expected_state_action_values.unsqueeze(1))
"""
# Compute Pseudo-Huber loss between predicted Q values and targets y
loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))
# Take an SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
for name, weight in policy_net.named_parameters():
"""
print("\nname: ")
print(name)
print("\nweight: ")
print(weight)
print("\nweight.grad: ")
print(weight.grad)
"""
writer.add_histogram(name, weight, step_count)
writer.add_histogram(str(name) + '/grad', weight.grad, step_count)
def test(env, policy_net, device, test_global_step, render=False):
state, ep_reward, done = env.reset(), 0, False
rospy.logwarn("Entering test method ...")
test_local_step = 0
while not done:
if render:
env.render()
state = torch.from_numpy(np.array(state)).float().unsqueeze(0).to(device)
action, policy_act, policy_used = select_action(policy_net, state, device, env, eps_greedy_threshold=0., n_actions=1)
state, reward, done, _ = env.step(action.item())
test_local_step += 1
test_global_step += 1
rospy.logwarn("Testing: Reward of this step: ")
rospy.logwarn(reward)
#writer.add_scalar("Test_step_Reward", reward, global_step=test_local_step)
ep_reward += reward
rospy.logwarn("Testing: Cumulative Reward of this episode: ")
rospy.logwarn(ep_reward)
writer.add_scalar("Test_Cumulative_Rewards", ep_reward, global_step=test_global_step)
return ep_reward, test_global_step
if __name__ == '__main__':
rospy.init_node('example_turtlebot2_maze_dqn', anonymous=True, log_level=rospy.WARN)
# Init OpenAI_ROS ENV
task_and_robot_environment_name = rospy.get_param('/turtlebot2/task_and_robot_environment_name')
# Create the Gym environment
env = StartOpenAI_ROS_Environment(task_and_robot_environment_name)
rospy.loginfo("Gym environment done")
rospy.loginfo("Starting Learning")
# Set the logging system
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('turtle2_openai_ros_example')
outdir = pkg_path + '/training_results'
env = wrappers.Monitor(env, outdir, force=True)
rospy.loginfo("Monitor Wrapper started")
last_time_steps = np.ndarray(0)
"""
# Loads parameters from the ROS param server
# Parameters are stored in a yaml file inside the config directory
# They are loaded at runtime by the launch file
Alpha = rospy.get_param("/turtlebot2/alpha")
Epsilon = rospy.get_param("/turtlebot2/epsilon")
Gamma = rospy.get_param("/turtlebot2/gamma")
epsilon_discount = rospy.get_param("/turtlebot2/epsilon_discount")
nepisodes = rospy.get_param("/turtlebot2/nepisodes")
nsteps = rospy.get_param("/turtlebot2/nsteps")
running_step = rospy.get_param("/turtlebot2/running_step")
"""
# Hyperparameters
gamma = 0.999 # initially 0.99 discount factor
seed = 543 # random seed
log_interval = 25 # controls how often we log progress, in episodes
num_steps = 15e4 # 11e4 number of steps to train on
batch_size = 512 # batch size for optimization
lr = 1e-3 # 1e-4learning rate
eps_start = 1.0 # initial value for epsilon (in epsilon-greedy)
eps_end = 0.1 # final value for epsilon (in epsilon-greedy)
eps_decay = 9e4 # 8e4 num_steps, length of epsilon decay, in env steps
target_update = 1000 # how often to update target net, in env steps
test_global_step = 0 # Global number of testing steps for tracking cummulative rewards in Tensorboard
# If gpu is to be used
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Fix random seed (for reproducibility)
env.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# Get number of actions from gym action space
#n_inputs = env.observation_space.shape[0] # 76
n_inputs = 5 # 76, 720, 360
n_actions = env.action_space.n
#rospy.logwarn("n_inputs Env.observation_space.shape[0] %d", n_inputs)
#rospy.logwarn("n_actions Env.action_space.n %d", n_actions)
policy_net = DQN(n_inputs, n_actions).to(device)
target_net = DQN(n_inputs, n_actions).to(device)
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()
optimizer = torch.optim.Adam(policy_net.parameters(), lr=lr)
# Decays the learning rate of each parameter group by gamma every step_size epochs. Notice that such decay can happen simultaneously with other changes to the learning rate from outside this scheduler. When last_epoch=-1, sets initial lr as lr.
#scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size= 5, gamma=0.9)
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, total_steps=int(num_steps))
memory = ReplayMemory(10000)
############################################################################
#logdir = os.path.join("$HOME/python3_ws/src/turtle2_openai_ros_example/src/logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
basedir = os.path.dirname(__file__)
basedirpathlogs = os.path.join(basedir, "logs")
logdir = os.path.join(basedirpathlogs, datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
writer = SummaryWriter(log_dir=logdir)
#tracedir = "$HOME/python3_ws/src/turtle2_openai_ros_example/src/trace"
tracedir = os.path.join(basedir, "trace")
############################################################################
print("Target reward: {}".format(env.spec.reward_threshold))
step_count = 0
ep_rew_history = []
i_episode, ep_reward = 0, -float('inf')
while step_count < num_steps:
rospy.logdebug("############### START EPISODE=>" + str(i_episode))
# Initialize the environment and state
# type(state): <class 'list'>
state, done = env.reset(), False
state = [round(num, 1) for num in state]
list_state = state
#print("\n type(state): ")
#print(type(state))
rospy.logwarn("# state we are => " + str(state))
state = torch.from_numpy(np.array(state)).float().unsqueeze(0).to(device)
while not done:
rospy.logwarn("i_episode: " + str(i_episode))
rospy.logwarn("step_count: " + str(step_count))
# Select an action
eps_greedy_threshold = compute_eps_threshold(step_count, eps_start, eps_end, eps_decay)
action, policy_act, policy_used = select_action(policy_net, state, device, env, eps_greedy_threshold, n_actions)
rospy.logwarn("Next action is:%d", action)
# Perform action in env
next_state, reward, done, _ = env.step(action.item())
next_state = [round(num, 1) for num in next_state]
list_next_state = next_state
#rospy.logwarn(str(next_state) + " " + str(reward))
# Bookkeeping
next_state = torch.from_numpy(np.array(next_state)).float().unsqueeze(0).to(device)
#reward = reward_shaper(reward, done)
reward = torch.tensor([reward], device=device)
step_count += 1
# Store the transition in memory
memory.push(state, action, next_state, reward)
memory.push_trace(i_episode, step_count, list_state, action.item(), list_next_state, reward.item(), policy_act.item(), eps_greedy_threshold, policy_used)
"""
# Make the algorithm learn based on the results
rospy.logwarn("# state we were=>" + str(state))
rospy.logwarn("# action that we took=>" + str(action))
rospy.logwarn("# reward that action gave=>" + str(reward))
#rospy.logwarn("# episode cumulated_reward=>" + str(cumulated_reward))
rospy.logwarn("# State in which we will start next step=>" + str(next_state))
"""
# Move to the next state
state = next_state
list_state = list_next_state
# Perform one step of the optimization (on the policy network)
train(policy_net, target_net, optimizer, scheduler, memory, batch_size, gamma, device, env)
"""
for name, weight in policy_net.named_parameters():
writer.add_histogram(name, weight, step_count)
writer.add_histogram(str(name) + '/grad', weight.grad, step_count)
"""
# Update the target network, copying all weights and biases in DQN
if step_count % target_update == 0:
target_net.load_state_dict(policy_net.state_dict())
if not os.path.exists('checkpoints'):
os.makedirs('checkpoints')
#############################################################################################################
#torch.save(policy_net.state_dict(), '$HOME/python3_ws/src/turtle2_openai_ros_example/src/checkpoints/dqn-episode-{0}-step-{1}.pt'.format(str(i_episode), str(step_count)))
model_dir = os.path.dirname(__file__)
MODEL_PATH = os.path.join(model_dir, 'checkpoints/dqn-episode-{0}-step-{1}.pt'.format(str(i_episode), str(step_count)))
torch.save(policy_net.state_dict(), MODEL_PATH)
#torch.save(policy_net.state_dict(), 'checkpoints/dqn-episode-{0}-step-{1}.pt'.format(str(i_episode), str(step_count)))
fname = datetime.datetime.now().strftime("%Y_%m_%d-%H:%M:%S") + ".json"
list_namedtuple = memory.get_memtrace()
with open(os.path.join(tracedir, fname), 'w') as f:
json.dump([elem._asdict() for elem in list_namedtuple[-1000:-1]], f)
#torch.save(policy_net.state_dict(), '/home/eldar/python3_ws/src/turtle2_openai_ros_example/src/checkpoints/dqn-{}.pt'.format(datetime.datetime.now().strftime("%Y%m%d-%H%M%S")))
i_episode += 1
for name, weight in policy_net.named_parameters():
"""
print("\nname: ")
print(name)
print("\nweight: ")
print(weight)
print("\nweight.grad: ")
print(weight.grad)
"""
writer.add_histogram(name, weight, step_count)
#writer.add_histogram('grad', weight.grad, step_count)
# Evaluate greedy policy
if i_episode % log_interval == 0 or step_count >= num_steps:
ep_reward, test_global_step = test(env, policy_net, device, test_global_step)
ep_rew_history.append((i_episode, ep_reward))
print('Episode {}\tSteps: {:.2f}k'
'\tEval reward: {:.2f}'.format(
i_episode, step_count/1000., ep_reward))
print("\nFinished training! Eval reward: {:.2f}".format(ep_reward))
print("\nFinished training! List of Eval rewards: ")
print(ep_rew_history)
if not os.path.exists('checkpoints'):
os.makedirs('checkpoints')
str_i_episode = str(i_episode)
#######################################################################################
#torch.save(policy_net.state_dict(), '$HOME/python3_ws/src/turtle2_openai_ros_example/src/checkpoints/dqn-final-episode-{0}-step-{1}.pt'.format(str_i_episode, str(step_count)))
model_dir = os.path.dirname(__file__)
MODEL_PATH = os.path.join(model_dir, 'checkpoints/dqn-episode-{0}-step-{1}.pt'.format(str(i_episode), str(step_count)))
torch.save(policy_net.state_dict(), MODEL_PATH)
| [] |
2024-01-10 | eldarsilver/DQN_Pytorch_ROS | openai_ros~openai_ros~src~openai_ros~task_envs~turtlebot2~turtlebot2_maze.py | import rospy
import numpy
import time
import math
from gym import spaces
from openai_ros.robot_envs import turtlebot2_env
from gym.envs.registration import register
from sensor_msgs.msg import LaserScan
from std_msgs.msg import Header
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
from openai_ros.openai_ros_common import ROSLauncher
import os
class TurtleBot2MazeEnv(turtlebot2_env.TurtleBot2Env):
def __init__(self):
"""
This Task Env is designed for having the TurtleBot2 in some kind of maze.
It will learn how to move around the maze without crashing.
"""
# This is the path where the simulation files, the Task and the Robot gits will be downloaded if not there
# This parameter HAS to be set up in the MAIN launch of the AI RL script
ros_ws_abspath = rospy.get_param("/turtlebot2/ros_ws_abspath", None)
assert ros_ws_abspath is not None, "You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \'YOUR/SIM_WS/PATH\'"
assert os.path.exists(ros_ws_abspath), "The Simulation ROS Workspace path "+ros_ws_abspath + \
" DOESNT exist, execute: mkdir -p "+ros_ws_abspath + \
"/src;cd "+ros_ws_abspath+";catkin_make"
ROSLauncher(rospackage_name="turtlebot_gazebo",
launch_file_name="start_world_maze_loop_brick.launch",
ros_ws_abspath=ros_ws_abspath)
# Load Params from the desired Yaml file
LoadYamlFileParamsTest(rospackage_name="openai_ros",
rel_path_from_package_to_file="src/openai_ros/task_envs/turtlebot2/config",
yaml_file_name="turtlebot2_maze.yaml")
# Here we will add any init functions prior to starting the MyRobotEnv
super(TurtleBot2MazeEnv, self).__init__(ros_ws_abspath)
# Only variable needed to be set here
number_actions = rospy.get_param('/turtlebot2/n_actions')
self.action_space = spaces.Discrete(number_actions)
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-numpy.inf, numpy.inf)
#number_observations = rospy.get_param('/turtlebot2/n_observations')
"""
We set the Observation space for the 6 observations
cube_observations = [
round(current_disk_roll_vel, 0),
round(y_distance, 1),
round(roll, 1),
round(pitch, 1),
round(y_linear_speed,1),
round(yaw, 1),
]
"""
# Actions and Observations
self.dec_obs = rospy.get_param(
"/turtlebot2/number_decimals_precision_obs", 1)
self.linear_forward_speed = rospy.get_param(
'/turtlebot2/linear_forward_speed')
self.linear_turn_speed = rospy.get_param(
'/turtlebot2/linear_turn_speed')
self.angular_speed = rospy.get_param('/turtlebot2/angular_speed')
self.init_linear_forward_speed = rospy.get_param(
'/turtlebot2/init_linear_forward_speed')
self.init_linear_turn_speed = rospy.get_param(
'/turtlebot2/init_linear_turn_speed')
self.n_observations = rospy.get_param('/turtlebot2/n_observations')
self.min_range = rospy.get_param('/turtlebot2/min_range')
self.max_laser_value = rospy.get_param('/turtlebot2/max_laser_value')
self.min_laser_value = rospy.get_param('/turtlebot2/min_laser_value')
# We create two arrays based on the binary values that will be assigned
# In the discretization method.
#laser_scan = self._check_laser_scan_ready()
laser_scan = self.get_laser_scan()
rospy.logdebug("laser_scan len===>"+str(len(laser_scan.ranges)))
# Laser data
self.laser_scan_frame = laser_scan.header.frame_id
# Number of laser reading jumped
self.new_ranges = int(
math.ceil(float(len(laser_scan.ranges)) / float(self.n_observations)))
rospy.logdebug("n_observations===>"+str(self.n_observations))
rospy.logdebug(
"new_ranges, jumping laser readings===>"+str(self.new_ranges))
high = numpy.full((self.n_observations), self.max_laser_value)
low = numpy.full((self.n_observations), self.min_laser_value)
# We only use two integers
self.observation_space = spaces.Box(low, high)
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>" +
str(self.observation_space))
# Rewards
self.forwards_reward = rospy.get_param("/turtlebot2/forwards_reward")
self.turn_reward = rospy.get_param("/turtlebot2/turn_reward")
self.end_episode_points = rospy.get_param(
"/turtlebot2/end_episode_points")
self.cumulated_steps = 0.0
self.laser_filtered_pub = rospy.Publisher(
'/turtlebot2/laser/scan_filtered', LaserScan, queue_size=1)
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
self.move_base(self.init_linear_forward_speed,
self.init_linear_turn_speed,
epsilon=0.05,
update_rate=10,
min_laser_distance=-1)
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
# For Info Purposes
self.cumulated_reward = 0.0
# Set to false Done, because its calculated asyncronously
self._episode_done = False
# We wait a small ammount of time to start everything because in very fast resets, laser scan values are sluggish
# and sometimes still have values from the prior position that triguered the done.
time.sleep(1.0)
# TODO: Add reset of published filtered laser readings
laser_scan = self.get_laser_scan()
discretized_ranges = laser_scan.ranges
self.publish_filtered_laser_scan(laser_original_data=laser_scan,
new_filtered_laser_range=discretized_ranges)
def _set_action(self, action):
"""
This set action will Set the linear and angular speed of the turtlebot2
based on the action number given.
:param action: The action integer that set s what movement to do next.
"""
rospy.logdebug("Start Set Action ==>"+str(action))
# We convert the actions to speed movements to send to the parent class CubeSingleDiskEnv
if action == 0: # FORWARD
linear_speed = self.linear_forward_speed
angular_speed = 0.0
self.last_action = "FORWARDS"
elif action == 1: # LEFT
linear_speed = self.linear_turn_speed
angular_speed = self.angular_speed
self.last_action = "TURN_LEFT"
elif action == 2: # RIGHT
linear_speed = self.linear_turn_speed
angular_speed = -1*self.angular_speed
self.last_action = "TURN_RIGHT"
# We tell TurtleBot2 the linear and angular speed to set to execute
self.move_base(linear_speed,
angular_speed,
epsilon=0.05,
update_rate=10,
min_laser_distance=self.min_range)
rospy.logdebug("END Set Action ==>"+str(action) +
", NAME="+str(self.last_action))
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
To know which Variables we have acces to, we need to read the
TurtleBot2Env API DOCS
:return:
"""
rospy.logdebug("Start Get Observation ==>")
# We get the laser scan data
laser_scan = self.get_laser_scan()
rospy.logdebug("BEFORE DISCRET _episode_done==>" +
str(self._episode_done))
discretized_observations = self.discretize_observation(laser_scan,
self.new_ranges
)
rospy.logdebug("Observations==>"+str(discretized_observations))
rospy.logdebug("AFTER DISCRET_episode_done==>"+str(self._episode_done))
rospy.logdebug("END Get Observation ==>")
return discretized_observations
def _is_done(self, observations):
if self._episode_done:
rospy.logdebug("TurtleBot2 is Too Close to wall==>" +
str(self._episode_done))
else:
rospy.logerr("TurtleBot2 is Ok ==>")
return self._episode_done
def _compute_reward(self, observations, done):
if not done:
if self.last_action == "FORWARDS":
reward = self.forwards_reward
else:
reward = self.turn_reward
else:
reward = -1*self.end_episode_points
rospy.logdebug("reward=" + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward=" + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps=" + str(self.cumulated_steps))
return reward
# Internal TaskEnv Methods
def discretize_observation(self, data, new_ranges):
"""
Discards all the laser readings that are not multiple in index of new_ranges
value.
"""
self._episode_done = False
discretized_ranges = []
filtered_range = []
#mod = len(data.ranges)/new_ranges
mod = new_ranges
max_laser_value = data.range_max
min_laser_value = data.range_min
rospy.logdebug("data = " + str(data))
rospy.logwarn("len(data.ranges) = " + str(len(data.ranges)))
rospy.logwarn("mod=" + str(mod))
"""
idx_ranges = [0, 5, 10, 15, 20, 25, 30, 35, 40, 44, 50, 55, 60, 65, 70, 75, 80, 85, 89, 119, 144, 160, 165, 170, 175, 179, 183, 185, 187, 190, 193, 195, 198, 200, 203, 206, 208, 210, 213, 215, 218, 220, 224, 227, 230, 233, 237, 240, 243, 246, 249, 252, 255, 258, 261, 264, 267, 269, 272, 275, 280, 285, 290, 295, 300, 305, 310, 314, 319, 325, 330, 335, 340, 345, 350, 355]
"""
idx_ranges = [89, 135, 179, 224, 269]
#for item, _ in enumerate(data.ranges):
for item in idx_ranges:
if data.ranges[item] == float('Inf') or numpy.isinf(data.ranges[item]):
# discretized_ranges.append(self.max_laser_value)
discretized_ranges.append(round(max_laser_value, self.dec_obs))
elif numpy.isnan(data.ranges[item]):
# discretized_ranges.append(self.min_laser_value)
discretized_ranges.append(round(min_laser_value, self.dec_obs))
else:
# discretized_ranges.append(int(item))
discretized_ranges.append(round(data.ranges[item], self.dec_obs))
if (self.min_range > data.ranges[item] > 0):
rospy.logerr("done Validation >>> data.ranges[" + str(item) + "]=" + str(data.ranges[item])+"< "+str(self.min_range))
self._episode_done = True
else:
rospy.logwarn("NOT done Validation >>> data.ranges[" + str(item) + "]=" + str(data.ranges[item])+"< "+str(self.min_range))
"""
for i, item in enumerate(data.ranges):
if (i % mod == 0):
if item == float('Inf') or numpy.isinf(item):
# discretized_ranges.append(self.max_laser_value)
discretized_ranges.append(
round(max_laser_value, self.dec_obs))
elif numpy.isnan(item):
# discretized_ranges.append(self.min_laser_value)
discretized_ranges.append(
round(min_laser_value, self.dec_obs))
else:
# discretized_ranges.append(int(item))
discretized_ranges.append(round(item, self.dec_obs))
if (self.min_range > item > 0):
rospy.logerr("done Validation >>> item=" +
str(item)+"< "+str(self.min_range))
self._episode_done = True
else:
rospy.logwarn("NOT done Validation >>> item=" +
str(item)+"< "+str(self.min_range))
# We add last value appended
filtered_range.append(discretized_ranges[-1])
else:
# We add value zero
filtered_range.append(0.1)
"""
rospy.logdebug(
"Size of observations, discretized_ranges==>"+str(len(discretized_ranges)))
self.publish_filtered_laser_scan(laser_original_data=data,
new_filtered_laser_range=discretized_ranges)
return discretized_ranges
def publish_filtered_laser_scan(self, laser_original_data, new_filtered_laser_range):
rospy.logdebug("new_filtered_laser_range==>" +
str(new_filtered_laser_range))
laser_filtered_object = LaserScan()
h = Header()
# Note you need to call rospy.init_node() before this will work
h.stamp = rospy.Time.now()
h.frame_id = laser_original_data.header.frame_id
laser_filtered_object.header = h
laser_filtered_object.angle_min = laser_original_data.angle_min
laser_filtered_object.angle_max = laser_original_data.angle_max
new_angle_incr = abs(laser_original_data.angle_max -
laser_original_data.angle_min) / len(new_filtered_laser_range)
#laser_filtered_object.angle_increment = laser_original_data.angle_increment
laser_filtered_object.angle_increment = new_angle_incr
laser_filtered_object.time_increment = laser_original_data.time_increment
laser_filtered_object.scan_time = laser_original_data.scan_time
laser_filtered_object.range_min = laser_original_data.range_min
laser_filtered_object.range_max = laser_original_data.range_max
laser_filtered_object.ranges = []
laser_filtered_object.intensities = []
for item in new_filtered_laser_range:
if item == 0.0:
laser_distance = 0.1
else:
laser_distance = item
laser_filtered_object.ranges.append(laser_distance)
laser_filtered_object.intensities.append(item)
self.laser_filtered_pub.publish(laser_filtered_object)
| [] |
2024-01-10 | eldarsilver/DQN_Pytorch_ROS | turtle2_openai_ros_example~src~my_start_qlearning_maze_v2.py | #!/usr/bin/env python
import gym
import numpy
import time
import qlearn
from gym import wrappers
# ROS packages required
import rospy
import rospkg
from openai_ros.openai_ros_common import StartOpenAI_ROS_Environment
from functools import reduce
import pickle
if __name__ == '__main__':
rospy.init_node('example_turtlebot2_maze_qlearn', anonymous=True, log_level=rospy.WARN)
# Init OpenAI_ROS ENV
task_and_robot_environment_name = rospy.get_param(
'/turtlebot2/task_and_robot_environment_name')
env = StartOpenAI_ROS_Environment(
task_and_robot_environment_name)
# Create the Gym environment
rospy.loginfo("Gym environment done")
rospy.loginfo("Starting Learning")
# Set the logging system
rospack = rospkg.RosPack()
pkg_path = rospack.get_path('turtle2_openai_ros_example')
outdir = pkg_path + '/training_results'
env = wrappers.Monitor(env, outdir, force=True)
rospy.loginfo("Monitor Wrapper started")
last_time_steps = numpy.ndarray(0)
# Loads parameters from the ROS param server
# Parameters are stored in a yaml file inside the config directory
# They are loaded at runtime by the launch file
Alpha = rospy.get_param("/turtlebot2/alpha")
Epsilon = rospy.get_param("/turtlebot2/epsilon")
Gamma = rospy.get_param("/turtlebot2/gamma")
epsilon_discount = rospy.get_param("/turtlebot2/epsilon_discount")
nepisodes = rospy.get_param("/turtlebot2/nepisodes")
nsteps = rospy.get_param("/turtlebot2/nsteps")
running_step = rospy.get_param("/turtlebot2/running_step")
rospy.logwarn("env.action_space %s" % env.action_space)
rospy.logwarn("env.action_space.n %s" % env.action_space.n)
rospy.logwarn("range(env.action_space.n) %s" % range(env.action_space.n))
# Initialises the algorithm that we are going to use for learning
qlearn = qlearn.QLearn(actions=range(env.action_space.n),
alpha=Alpha, gamma=Gamma, epsilon=Epsilon)
initial_epsilon = qlearn.epsilon
start_time = time.time()
highest_reward = 0
q_file_mid = open("/home/eldar/python3_ws/src/turtle2_openai_ros_example/src/qdictmid.pkl", "wb")
q_file_end = open("/home/eldar/python3_ws/src/turtle2_openai_ros_example/src/qdict.pkl", "wb")
#env._max_episode_steps = nsteps
# Starts the main training loop: the one about the episodes to do
for x in range(nepisodes-1):
rospy.logdebug("############### START EPISODE=>" + str(x))
cumulated_reward = 0
done = False
if qlearn.epsilon > 0.05:
qlearn.epsilon *= epsilon_discount
# Initialize the environment and get first state of the robot
observation = env.reset()
state = ''.join(map(str, observation))
# Show on screen the actual situation of the robot
# env.render()
# for each episode, we test the robot for nsteps
#for i in range(nsteps-1):
i = 0
while (i < nsteps) and (not done):
rospy.logwarn("############### Start Step=>" + str(i) + " of episode ==> " + str(x))
# Pick an action based on the current state
action = qlearn.chooseAction(state)
rospy.logwarn("Next action is:%d", action)
# Execute the action in the environment and get feedback
observation, reward, done, info = env.step(action)
rospy.logwarn(str(observation) + " " + str(reward))
cumulated_reward += reward
if highest_reward < cumulated_reward:
highest_reward = cumulated_reward
nextState = ''.join(map(str, observation))
# Make the algorithm learn based on the results
rospy.logwarn("# state we were=>" + str(state))
rospy.logwarn("# action that we took=>" + str(action))
rospy.logwarn("# reward that action gave=>" + str(reward))
rospy.logwarn("# episode cumulated_reward=>" + str(cumulated_reward))
rospy.logwarn("# State in which we will start next step=>" + str(nextState))
qlearn.learn(state, action, reward, nextState)
if not (done):
rospy.logwarn("NOT DONE")
state = nextState
else:
rospy.logwarn("DONE")
last_time_steps = numpy.append(last_time_steps, [int(i + 1)])
#break
rospy.logwarn("############### END Step=>" + str(i))
i += 1
#raw_input("Next Step...PRESS KEY")
# rospy.sleep(2.0)
if x == (nepisodes - 2):
q_dict = qlearn.returnQ()
pickle.dump(q_dict, q_file_end)
q_file_end.close()
rospy.logwarn("Saving final pickle")
elif x == 200:
q_dict = qlearn.returnQ()
pickle.dump(q_dict, q_file_mid)
q_file_mid.close()
rospy.logwarn("Saving pickle for 200 episodes")
m, s = divmod(int(time.time() - start_time), 60)
h, m = divmod(m, 60)
rospy.logerr(("EP: " + str(x + 1) + " - [alpha: " + str(round(qlearn.alpha, 2)) + " - gamma: " + str(
round(qlearn.gamma, 2)) + " - epsilon: " + str(round(qlearn.epsilon, 2)) + "] - Reward: " + str(
cumulated_reward) + " Time: %d:%02d:%02d" % (h, m, s)))
rospy.loginfo((" nepisodes " + str(nepisodes) + " alpha " + str(qlearn.alpha) + " gamma " + str(qlearn.gamma) + " initial epsilon " + str(
initial_epsilon) + " epsilon discount " + str(epsilon_discount) + " highest reward " + str(highest_reward) + " "))
l = last_time_steps.tolist()
l.sort()
# print("Parameters: a="+str)
rospy.loginfo("Overall score: {:0.2f}".format(last_time_steps.mean()))
rospy.loginfo("Best 100 score: {:0.2f}".format(reduce(lambda x, y: x + y, l[-100:]) / len(l[-100:])))
env.close()
| [] |
2024-01-10 | eldarsilver/DQN_Pytorch_ROS | turtle2_openai_ros_example~src~deploy_robot_qlearning.py | #!/usr/bin/env python
import rospy
import numpy
import time
import math
from gym import spaces
#from openai_ros.robot_envs import turtlebot2_env
#from gym.envs.registration import register
from sensor_msgs.msg import LaserScan
from std_msgs.msg import Header
from openai_ros.task_envs.task_commons import LoadYamlFileParamsTest
#from openai_ros.openai_ros_common import ROSLauncher
import os
from cv_bridge import CvBridge, CvBridgeError
from datetime import datetime
from std_msgs.msg import String
#from sensor_msgs.msg import Image
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
import pickle
class rlComponent(object):
def __init__(self):
"""
This Task Env is designed for having the TurtleBot2 in some kind of maze.
It will learn how to move around the maze without crashing.
"""
# Only variable needed to be set here
number_actions = rospy.get_param('~n_actions')
self.action_space = spaces.Discrete(number_actions)
# We set the reward range, which is not compulsory but here we do it.
self.reward_range = (-numpy.inf, numpy.inf)
#number_observations = rospy.get_param('/turtlebot2/n_observations')
# Actions and Observations
self.dec_obs = rospy.get_param(
"~number_decimals_precision_obs", 1)
self.linear_forward_speed = rospy.get_param(
'~linear_forward_speed')
self.linear_turn_speed = rospy.get_param(
'~linear_turn_speed')
self.angular_speed = rospy.get_param('~angular_speed')
self.init_linear_forward_speed = rospy.get_param(
'~init_linear_forward_speed')
self.init_linear_turn_speed = rospy.get_param(
'~init_linear_turn_speed')
self.n_observations = rospy.get_param('~n_observations')
self.min_range = rospy.get_param('~min_range')
self.max_laser_value = rospy.get_param('~max_laser_value')
self.min_laser_value = rospy.get_param('~min_laser_value')
self.actions = range(number_actions)
self._cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
self.last_action = "FORWARDS"
self.laser_scan = None
rospy.Subscriber("/scan", LaserScan, self._laser_scan_callback)
laser_scan = self._check_laser_scan_ready()
rospy.logdebug("laser_scan len===>"+str(len(laser_scan.ranges)))
# Number of laser reading jumped
self.new_ranges = int(
math.ceil(float(len(laser_scan.ranges)) / float(self.n_observations)))
rospy.logdebug("n_observations===>"+str(self.n_observations))
rospy.logdebug(
"new_ranges, jumping laser readings===>"+str(self.new_ranges))
high = numpy.full((self.n_observations), self.max_laser_value)
low = numpy.full((self.n_observations), self.min_laser_value)
# We only use two integers
self.observation_space = spaces.Box(low, high)
rospy.logdebug("ACTION SPACES TYPE===>"+str(self.action_space))
rospy.logdebug("OBSERVATION SPACES TYPE===>" +
str(self.observation_space))
# Rewards
self.forwards_reward = rospy.get_param("~forwards_reward")
self.turn_reward = rospy.get_param("~turn_reward")
self.end_episode_points = rospy.get_param(
"~end_episode_points")
self.cumulated_steps = 0.0
self.laser_filtered_pub = rospy.Publisher(
'/scan_filtered', LaserScan, queue_size=1)
self._init_env_variables()
self._set_init_pose()
rospy.spin()
def _laser_scan_callback(self, data):
self.laser_scan = data
def get_laser_scan(self):
return self.laser_scan
def _check_laser_scan_ready(self):
#self.laser_scan = None
rospy.logdebug("Waiting for /scan to be READY...")
while self.laser_scan is None and not rospy.is_shutdown():
try:
self.laser_scan = rospy.wait_for_message("/scan", LaserScan, timeout=5.0)
rospy.logdebug("Current /scan READY=>")
except:
rospy.logerr("Current /scan not ready yet, retrying for getting laser_scan")
return self.laser_scan
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
self.move_base(self.init_linear_forward_speed,
self.init_linear_turn_speed,
epsilon=0.05,
update_rate=10,
min_laser_distance=-1)
return True
def _init_env_variables(self):
"""
Inits variables needed to be initialised each time we reset at the start
of an episode.
:return:
"""
# For Info Purposes
self.cumulated_reward = 0.0
# Set to false Done, because its calculated asyncronously
self._episode_done = False
# We wait a small ammount of time to start everything because in very fast resets, laser scan values are sluggish
# and sometimes still have values from the prior position that triguered the done.
time.sleep(1.0)
# TODO: Add reset of published filtered laser readings
#laser_scan = self.get_laser_scan()
discretized_ranges = self.laser_scan.ranges
self.publish_filtered_laser_scan(laser_original_data=self.laser_scan,
new_filtered_laser_range=discretized_ranges)
with open("/home/eldar/python3_ws/src/turtle2_openai_ros_example/src/qdictmid1.pkl","rb") as f:
self.q = pickle.load(f)
#rospy.logwarn("self.q %s" % str(self.q))
self.step()
def _get_obs(self):
"""
Here we define what sensor data defines our robots observations
To know which Variables we have acces to, we need to read the
TurtleBot2Env API DOCS
:return:
"""
rospy.logdebug("Start Get Observation ==>")
# We get the laser scan data
laser_scan = self.get_laser_scan()
rospy.logdebug("BEFORE DISCRET _episode_done==>" +
str(self._episode_done))
discretized_observations = self.discretize_observation(laser_scan,
self.new_ranges
)
rospy.logdebug("Observations==>"+str(discretized_observations))
rospy.logdebug("AFTER DISCRET_episode_done==>"+str(self._episode_done))
rospy.logdebug("END Get Observation ==>")
return discretized_observations
def _is_done(self, observations):
if self._episode_done:
rospy.logdebug("TurtleBot2 is Too Close to wall==>" +
str(self._episode_done))
else:
rospy.logerr("TurtleBot2 is Ok ==>")
return self._episode_done
def _compute_reward(self, observations, done):
if not done:
if self.last_action == "FORWARDS":
reward = self.forwards_reward
else:
reward = self.turn_reward
else:
reward = -1*self.end_episode_points
rospy.logdebug("reward=" + str(reward))
self.cumulated_reward += reward
rospy.logdebug("Cumulated_reward=" + str(self.cumulated_reward))
self.cumulated_steps += 1
rospy.logdebug("Cumulated_steps=" + str(self.cumulated_steps))
return reward
# Internal TaskEnv Methods
def discretize_observation(self, data, new_ranges):
"""
Discards all the laser readings that are not multiple in index of new_ranges
value.
"""
self._episode_done = False
discretized_ranges = []
filtered_range = []
#mod = len(data.ranges)/new_ranges
mod = new_ranges
max_laser_value = data.range_max
min_laser_value = data.range_min
rospy.logdebug("data=" + str(data))
rospy.logwarn("data.range_max= %s" % data.range_max)
rospy.logwarn("data.range_min= %s" % data.range_min)
rospy.logwarn("len(data.ranges)= %s" % len(data.ranges))
rospy.logwarn("data.angle_min)= %s" % data.angle_min)
rospy.logwarn("data.angle_max)= %s" % data.angle_max)
rospy.logwarn("data.angle_increment= %s" % data.angle_increment)
rospy.logwarn("mod=" + str(mod))
rospy.loginfo('right data.ranges[89] %s' % data.ranges[89])
rospy.loginfo('left data.ranges[269] %s ' % data.ranges[269])
rospy.loginfo('back data.ranges[359] %s' % data.ranges[359])
rospy.loginfo('back data.ranges[0] %s' % data.ranges[0])
rospy.loginfo('front data.ranges[179] %s' % data.ranges[179])
#idx_ranges = [0, 89, 179, 269]
idx_ranges = [0, 44, 89, 144, 179, 224, 269, 314]
for item in idx_ranges:
if data.ranges[item] == float('Inf') or numpy.isinf(data.ranges[item]):
# discretized_ranges.append(self.max_laser_value)
discretized_ranges.append(round(max_laser_value, self.dec_obs))
elif numpy.isnan(data.ranges[item]):
# discretized_ranges.append(self.min_laser_value)
discretized_ranges.append(round(min_laser_value, self.dec_obs))
else:
# discretized_ranges.append(int(item))
discretized_ranges.append(round(data.ranges[item], self.dec_obs))
if (self.min_range > data.ranges[item] > 0):
rospy.logerr("done Validation >>> data.ranges[item]=" + str(data.ranges[item])+"< "+str(self.min_range))
self._episode_done = True
else:
rospy.logwarn("NOT done Validation >>> data.ranges[item]=" + str(data.ranges[item])+"< "+str(self.min_range))
rospy.logdebug("Size of observations, discretized_ranges==>"+str(len(discretized_ranges)))
return discretized_ranges
"""
for i, item in enumerate(data.ranges):
if (i % mod == 0):
if item == float('Inf') or numpy.isinf(item):
# discretized_ranges.append(self.max_laser_value)
discretized_ranges.append(
round(max_laser_value, self.dec_obs))
elif numpy.isnan(item):
# discretized_ranges.append(self.min_laser_value)
discretized_ranges.append(
round(min_laser_value, self.dec_obs))
else:
# discretized_ranges.append(int(item))
discretized_ranges.append(round(item, self.dec_obs))
if (self.min_range > item > 0):
rospy.logerr("done Validation >>> item=" +
str(item)+"< "+str(self.min_range))
self._episode_done = True
else:
rospy.logwarn("NOT done Validation >>> item=" +
str(item)+"< "+str(self.min_range))
# We add last value appended
filtered_range.append(discretized_ranges[-1])
else:
# We add value zero
filtered_range.append(0.1)
rospy.logdebug(
"Size of observations, discretized_ranges==>"+str(len(discretized_ranges)))
self.publish_filtered_laser_scan(laser_original_data=data,
new_filtered_laser_range=discretized_ranges)
return discretized_ranges
"""
def publish_filtered_laser_scan(self, laser_original_data, new_filtered_laser_range):
rospy.logdebug("new_filtered_laser_range==>" +
str(new_filtered_laser_range))
laser_filtered_object = LaserScan()
h = Header()
# Note you need to call rospy.init_node() before this will work
h.stamp = rospy.Time.now()
h.frame_id = laser_original_data.header.frame_id
laser_filtered_object.header = h
laser_filtered_object.angle_min = laser_original_data.angle_min
laser_filtered_object.angle_max = laser_original_data.angle_max
new_angle_incr = abs(laser_original_data.angle_max -
laser_original_data.angle_min) / len(new_filtered_laser_range)
#laser_filtered_object.angle_increment = laser_original_data.angle_increment
laser_filtered_object.angle_increment = new_angle_incr
laser_filtered_object.time_increment = laser_original_data.time_increment
laser_filtered_object.scan_time = laser_original_data.scan_time
laser_filtered_object.range_min = laser_original_data.range_min
laser_filtered_object.range_max = laser_original_data.range_max
laser_filtered_object.ranges = []
laser_filtered_object.intensities = []
for item in new_filtered_laser_range:
if item == 0.0:
laser_distance = 0.1
else:
laser_distance = item
laser_filtered_object.ranges.append(laser_distance)
laser_filtered_object.intensities.append(item)
self.laser_filtered_pub.publish(laser_filtered_object)
def move_base(self, linear_speed, angular_speed, epsilon=0.05, update_rate=10, min_laser_distance=-1):
"""
It will move the base based on the linear and angular speeds given.
It will wait untill those twists are achived reading from the odometry topic.
:param linear_speed: Speed in the X axis of the robot base frame
:param angular_speed: Speed of the angular turning of the robot base frame
:param epsilon: Acceptable difference between the speed asked and the odometry readings
:param update_rate: Rate at which we check the odometry.
:return:
"""
cmd_vel_value = Twist()
cmd_vel_value.linear.x = linear_speed
cmd_vel_value.angular.z = angular_speed
rospy.logdebug("TurtleBot2 Base Twist Cmd>>" + str(cmd_vel_value))
#self._check_publishers_connection()
self._cmd_vel_pub.publish(cmd_vel_value)
time.sleep(0.2)
#time.sleep(0.02)
"""
self.wait_until_twist_achieved(cmd_vel_value,
epsilon,
update_rate,
min_laser_distance)
"""
def _set_action(self, action):
"""
This set action will Set the linear and angular speed of the turtlebot2
based on the action number given.
:param action: The action integer that set s what movement to do next.
"""
rospy.logdebug("Start Set Action ==>"+str(action))
# We convert the actions to speed movements to send to the parent class CubeSingleDiskEnv
if action == 0: # FORWARD
linear_speed = self.linear_forward_speed
angular_speed = 0.0
self.last_action = "FORWARDS"
elif action == 1: # LEFT
linear_speed = self.linear_turn_speed
angular_speed = self.angular_speed
self.last_action = "TURN_LEFT"
elif action == 2: # RIGHT
linear_speed = self.linear_turn_speed
angular_speed = -1*self.angular_speed
self.last_action = "TURN_RIGHT"
elif action == 3: # Stop
linear_speed = 0.0
angular_speed = 0.0
self.last_action = "STOP"
# We tell TurtleBot2 the linear and angular speed to set to execute
"""
self.move_base(linear_speed,
angular_speed,
epsilon=0.05,
update_rate=10,
min_laser_distance=self.min_range)
"""
rospy.logdebug("END Set Action ==>"+str(action) +
", NAME="+str(self.last_action))
def chooseAction(self, state):
qv = [self.q[(state, a)] for a in self.actions if (state, a) in self.q]
if len(qv) > 0:
maxQ = max(qv)
count = qv.count(maxQ)
# In case there're several state-action max values
# we select a random one among them
if count > 1:
#best = [i for i in range(len(self.actions)) if qv[i] == maxQ]
best = [i for i in range(len(qv)) if qv[i] == maxQ]
i = random.choice(best)
else:
i = qv.index(maxQ)
action = self.actions[i]
else:
action = -1
return action
def step(self):
obs = self._get_obs()
rospy.loginfo("obs %s" % obs)
while obs != [] and self.last_action != "STOP":
state = ''.join(map(str, obs))
rospy.loginfo('state %s' % state)
# Pick an action based on the current state
#action = qlearn.chooseAction(state)
actionq = self.chooseAction(state)
if obs[2] < 1: # 180 front
if obs[1] < 1: # 120 right
if obs[3] < 1: # 240 left
action = 3 # stop
else:
action = 1 # left
else:
action = 2 # right
else:
action = 0 # front
rospy.logwarn("Next action is:%d", action)
rospy.logwarn("Next actionq is:%d", actionq)
# Execute the action in the environment and get feedback
self._set_action(action)
obs = self._get_obs()
if __name__ == '__main__':
try:
rospy.init_node('re_fr', anonymous=False)
rlComp = rlComponent()
#while frComp.ok():
# pass
except rospy.ROSInterruptException:
pass
| [] |
2024-01-10 | eldarsilver/DQN_Pytorch_ROS | openai_ros~openai_ros~src~openai_ros~robot_envs~sawyer_env.py | import numpy
import rospy
import time
import tf
from openai_ros import robot_gazebo_env
import intera_interface
import intera_external_devices
from intera_interface import CHECK_VERSION
from intera_core_msgs.msg import JointLimits
from sensor_msgs.msg import Image
from openai_ros.openai_ros_common import ROSLauncher
class SawyerEnv(robot_gazebo_env.RobotGazeboEnv):
"""Superclass for all SawyerEnv environments.
"""
def __init__(self, ros_ws_abspath):
"""
Initializes a new SawyerEnv environment.
To check any topic we need to have the simulations running, we need to do two things:
1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations
that are pause for whatever the reason
2) If the simulation was running already for some reason, we need to reset the controlers.
This has to do with the fact that some plugins with tf, dont understand the reset of the simulation
and need to be reseted to work properly.
The Sensors: The sensors accesible are the ones considered usefull for AI learning.
Sensor Topic List:
* /robot/joint_limits: Odometry of the Base of Wamv
Actuators Topic List:
* As actuator we will use a class to interface with the movements through commands.
Args:
"""
rospy.logdebug("Start SawyerEnv INIT...")
# Variables that we give through the constructor.
# None in this case
# We launch the ROSlaunch that spawns the robot into the world
ROSLauncher(rospackage_name="sawyer_gazebo",
launch_file_name="put_sawyer_in_world.launch",
ros_ws_abspath=ros_ws_abspath)
# Internal Vars
# Doesnt have any accesibles
self.controllers_list = []
# It doesnt use namespace
self.robot_name_space = ""
# We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv
super(SawyerEnv, self).__init__(controllers_list=self.controllers_list,
robot_name_space=self.robot_name_space,
reset_controls=False,
start_init_physics_parameters=False,
reset_world_or_sim="WORLD")
rospy.logdebug("SawyerEnv unpause...")
self.gazebo.unpauseSim()
# self.controllers_object.reset_controllers()
# TODO: Fill it with the sensors
self._check_all_systems_ready()
rospy.Subscriber("/io/internal_camera/head_camera/image_raw",
Image, self._head_camera_image_raw_callback)
rospy.Subscriber("/io/internal_camera/right_hand_camera/image_raw",
Image, self._right_hand_camera_image_raw_callback)
self._setup_tf_listener()
self._setup_movement_system()
self.gazebo.pauseSim()
rospy.logdebug("Finished SawyerEnv INIT...")
# Methods needed by the RobotGazeboEnv
# ----------------------------
def _check_all_systems_ready(self):
"""
Checks that all the sensors, publishers and other simulation systems are
operational.
"""
rospy.logdebug("SawyerEnv check_all_systems_ready...")
self._check_all_sensors_ready()
rospy.logdebug("END SawyerEnv _check_all_systems_ready...")
return True
# CubeSingleDiskEnv virtual methods
# ----------------------------
def _check_all_sensors_ready(self):
rospy.logdebug("START ALL SENSORS READY")
# TODO: Here go the sensors like cameras and joint states
self._check_head_camera_image_raw_ready()
self._check_right_hand_camera_image_raw_ready()
rospy.logdebug("ALL SENSORS READY")
def _check_head_camera_image_raw_ready(self):
self.head_camera_image_raw = None
rospy.logdebug(
"Waiting for /io/internal_camera/head_camera/image_raw to be READY...")
while self.head_camera_image_raw is None and not rospy.is_shutdown():
try:
self.head_camera_image_raw = rospy.wait_for_message(
"/io/internal_camera/head_camera/image_raw", Image, timeout=5.0)
rospy.logdebug(
"Current /io/internal_camera/head_camera/image_raw READY=>")
except:
rospy.logerr(
"Current /io/internal_camera/head_camera/image_raw not ready yet, retrying for getting head_camera_image_raw")
return self.head_camera_image_raw
def _check_right_hand_camera_image_raw_ready(self):
self.right_hand_camera_image_raw = None
rospy.logdebug(
"Waiting for /io/internal_camera/right_hand_camera/image_raw to be READY...")
while self.right_hand_camera_image_raw is None and not rospy.is_shutdown():
try:
self.right_hand_camera_image_raw = rospy.wait_for_message(
"/io/internal_camera/right_hand_camera/image_raw", Image, timeout=5.0)
rospy.logdebug(
"Current /io/internal_camera/right_hand_camera/image_raw READY=>")
except:
rospy.logerr(
"Current /io/internal_camera/right_hand_camera/image_raw not ready yet, retrying for getting right_hand_camera_image_raw")
return self.right_hand_camera_image_raw
def _head_camera_image_raw_callback(self, data):
self.head_camera_image_raw = data
def _right_hand_camera_image_raw_callback(self, data):
self.right_hand_camera_image_raw = data
def _setup_tf_listener(self):
"""
Set ups the TF listener for getting the transforms you ask for.
"""
self.listener = tf.TransformListener()
def _setup_movement_system(self):
"""
Setup of the movement system.
:return:
"""
rp = intera_interface.RobotParams()
valid_limbs = rp.get_limb_names()
if not valid_limbs:
rp.log_message(("Cannot detect any limb parameters on this robot. "
"Exiting."), "ERROR")
return
rospy.loginfo("Valid Sawyer Limbs==>"+str(valid_limbs))
print("Getting robot state... ")
rs = intera_interface.RobotEnable(CHECK_VERSION)
init_state = rs.state().enabled
rospy.loginfo("Enabling robot...")
rs.enable()
self._map_actions_to_movement()
def _map_actions_to_movement(self, side="right", joint_delta=0.1):
self.limb = intera_interface.Limb(side)
try:
self.gripper = intera_interface.Gripper(side + '_gripper')
except:
self.has_gripper = False
rospy.loginfo("The electric gripper is not detected on the robot.")
else:
self.has_gripper = True
self.joints = self.limb.joint_names()
self.bindings = {
self.joints[0]+"_increase": (self.set_j, [self.joints[0], joint_delta], self.joints[0]+" increase"),
self.joints[0]+"_decrease": (self.set_j, [self.joints[0], -joint_delta], self.joints[0]+" decrease"),
self.joints[1]+"_increase": (self.set_j, [self.joints[1], joint_delta], self.joints[1]+" increase"),
self.joints[1]+"_decrease": (self.set_j, [self.joints[1], -joint_delta], self.joints[1]+" decrease"),
self.joints[2]+"_increase": (self.set_j, [self.joints[2], joint_delta], self.joints[2]+" increase"),
self.joints[2]+"_decrease": (self.set_j, [self.joints[2], -joint_delta], self.joints[2]+" decrease"),
self.joints[3]+"_increase": (self.set_j, [self.joints[3], joint_delta], self.joints[3]+" increase"),
self.joints[3]+"_decrease": (self.set_j, [self.joints[3], -joint_delta], self.joints[3]+" decrease"),
self.joints[4]+"_increase": (self.set_j, [self.joints[4], joint_delta], self.joints[4]+" increase"),
self.joints[4]+"_decrease": (self.set_j, [self.joints[4], -joint_delta], self.joints[4]+" decrease"),
self.joints[5]+"_increase": (self.set_j, [self.joints[5], joint_delta], self.joints[5]+" increase"),
self.joints[5]+"_decrease": (self.set_j, [self.joints[5], -joint_delta], self.joints[5]+" decrease"),
self.joints[6]+"_increase": (self.set_j, [self.joints[6], joint_delta], self.joints[6]+" increase"),
self.joints[6]+"_decrease": (self.set_j, [self.joints[6], -joint_delta], self.joints[6]+" decrease")
}
if self.has_gripper:
self.bindings.update({
"close": (self.set_g, "close", side+" gripper close"),
"open": (self.set_g, "open", side+" gripper open"),
"calibrate": (self.set_g, "calibrate", side+" gripper calibrate")
})
rospy.loginfo("Controlling joints...")
# Methods that the TrainingEnvironment will need to define here as virtual
# because they will be used in RobotGazeboEnv GrandParentClass and defined in the
# TrainingEnvironment.
# ----------------------------
def _set_init_pose(self):
"""Sets the Robot in its init pose
"""
raise NotImplementedError()
def _init_env_variables(self):
"""Inits variables needed to be initialised each time we reset at the start
of an episode.
"""
raise NotImplementedError()
def _compute_reward(self, observations, done):
"""Calculates the reward to give based on the observations given.
"""
raise NotImplementedError()
def _set_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _get_obs(self):
raise NotImplementedError()
def _is_done(self, observations):
"""Checks if episode done based on observations given.
"""
raise NotImplementedError()
# Methods that the TrainingEnvironment will need.
# ----------------------------
def execute_movement(self, action_id):
"""
It executed the command given through an id. This will move any joint
of Sawyer, including the gripper if it has it.
:param: action_id: These are the possible action_id values and the action asociated.
self.joints[0]+"_increase",
self.joints[0]+_decrease,
self.joints[1]+"_increase",
self.joints[1]+"_decrease",
self.joints[2]+"_increase",
self.joints[2]+"_decrease",
self.joints[3]+"_increase",
self.joints[3]+"_decrease",
self.joints[4]+"_increase",
self.joints[4]+"_decrease",
self.joints[5]+"_increase",
self.joints[5]+"_decrease",
self.joints[6]+"_increase",
self.joints[6]+"_decrease",
gripper_close,
gripper_open,
gripper_calibrate
"""
if action_id in self.bindings:
cmd = self.bindings[action_id]
if action_id == "gripper_close" or action_id == "gripper_open" or action_id == "gripper_calibrate":
cmd[0](cmd[1])
rospy.loginfo("command: %s" % (cmd[2],))
else:
# expand binding to something like "self.set_j(right, 'j0', joint_delta)"
cmd[0](*cmd[1])
rospy.loginfo("command: %s" % (cmd[2],))
else:
rospy.logerr("NOT VALID key binding, it should be one of these: ")
for key, val in sorted(self.bindings.items(),
key=lambda x: x[1][2]):
rospy.logerr(" %s: %s" % (key, val[2]))
def set_j(self, joint_name, delta):
current_position = self.limb.joint_angle(joint_name)
joint_command = {joint_name: current_position + delta}
self.limb.set_joint_positions(joint_command)
def set_g(self, action):
if self.has_gripper:
if action == "close":
self.gripper.close()
elif action == "open":
self.gripper.open()
elif action == "calibrate":
self.gripper.calibrate()
def move_joints_to_angle_blocking(self, joint_positions_dict, timeout=15.0, threshold=0.008726646):
"""
It moves all the joints to the given position and doesnt exit until it reaches that position
"""
self.limb.move_to_joint_positions(positions=joint_positions_dict,
timeout=15.0,
threshold=0.008726646,
test=None)
def get_limb_joint_names_array(self):
"""
Returns the Joint Names array of the Limb.
"""
return self.joints
def get_all_limb_joint_angles(self):
"""
Return dictionary dict({str:float}) with all the joints angles
"""
return self.limb.joint_angles()
def get_all_limb_joint_efforts(self):
"""
Returns a dictionary dict({str:float}) with all the joints efforts
"""
return self.limb.joint_efforts()
def get_tf_start_to_end_frames(self, start_frame_name, end_frame_name):
"""
Given two frames, it returns the transform from the start_frame_name to the end_frame_name.
It will only return something different to None if the TFs of the Two frames are in TF topic
published and are connected through the TF tree.
:param: start_frame_name: Start Frame of the TF transform
end_frame_name: End Frame of the TF transform
:return: trans,rot of the transform between the start and end frames.
"""
start_frame = "/"+start_frame_name
end_frame = "/"+end_frame_name
trans, rot = None, None
while (trans is None or rot is None) and not rospy.is_shutdown():
try:
(trans, rot) = self.listener.lookupTransform(
start_frame, end_frame, rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rospy.logerr("TF start to end not ready YET...")
duration_obj = rospy.Duration.from_sec(1.0)
rospy.sleep(duration_obj)
return trans, rot
def check_joint_limits_ready(self):
self.joint_limits = None
rospy.logdebug("Waiting for /robot/joint_limits to be READY...")
while self.joint_limits is None and not rospy.is_shutdown():
try:
self.joint_limits = rospy.wait_for_message(
"/robot/joint_limits", JointLimits, timeout=3.0)
rospy.logdebug("Current /robot/joint_limits READY=>")
except:
rospy.logerr(
"Current /robot/joint_limits not ready yet, retrying for getting joint_limits")
return self.joint_limits
def get_joint_limits(self):
return self.joint_limits
def get_head_camera_image_raw(self):
return self.head_camera_image_raw
def get_right_hand_camera_image_raw(self):
return self.right_hand_camera_image_raw
def init_joint_limits(self):
"""
Get the Joint Limits, in the init fase where we need to unpause the simulation to get them
:return: joint_limits: The Joint Limits Dictionary, with names, angles, vel and effort limits.
"""
self.gazebo.unpauseSim()
joint_limits = self.check_joint_limits_ready()
self.gazebo.pauseSim()
return joint_limits
| [] |
2024-01-10 | Jaywhisker/AI_design_framework | Data_Collection~Design_Opportunities~GPT_keyword.py | #####################################################################################################
# This is the functions to retrieve the product specification, flaws and strengths from GPT-3
# imports: openai, ast and helper functions from Helper.
######################################################################################################
import openai
import ast
import nltk
from ...Helper import *
#function to generate the product specification
def get_specifications(search_terms, model, apikey):
openai.api_key = apikey
prompt = "give me the design specifications of the" + " ".join(search_terms) + "in a python dictionary in the form of specifications:value."
output = generate_texts(prompt, model)
#sometimes the output may be output = {}, if so remove output =
try:
index = output.index("{")
output = output[index:].strip()
except:
pass
design_specifications = ast.literal_eval(output) #remove the string as output = '{}' such that it is a dictionary
print("design specifications", design_specifications)
return design_specifications
#edited ver of get_specification for product flaws
def get_flaws(search_terms, model, apikey):
openai.api_key = apikey
prompt = "give me the design flaws of the" + " ".join(search_terms) + "in a python dictionary in the form of specifications:value."
output = generate_texts(prompt, model)
try:
index = output.index("{")
output = output[index:].strip()
except:
pass
design_flaws = ast.literal_eval(output)
print("design flaws", design_flaws)
return design_flaws
#edited ver of get_specification for product strengths
def get_strength(search_terms, model, apikey):
openai.api_key = apikey
prompt = "give me the design strengths of the" + " ".join(search_terms) + "in a python dictionary in the form of specifications:value."
output = generate_texts(prompt, model)
try:
index = output.index("{")
output = output[index:].strip()
except:
pass
design_strengths = ast.literal_eval(output)
print("design strengths", design_strengths)
return design_strengths
#edited ver of get_competitors for product
def get_competitors(search_terms, model, apikey):
openai.api_key = apikey
prompt = "give me the competitors of the" + " ".join(search_terms) + "in a python dictionary in the form of product:company."
output = generate_texts(prompt, model)
try:
index = output.index("{")
output = output[index:].strip()
except:
pass
competitors = ast.literal_eval(output)
print("competitors", competitors)
return competitors
| [
"give me the design strengths of the",
"in a python dictionary in the form of specifications:value.",
"give me the competitors of the",
" ",
"give me the design specifications of the",
"give me the design flaws of the",
"in a python dictionary in the form of product:company."
] |
2024-01-10 | Jaywhisker/AI_design_framework | Generating_Design_Opportunities~Transcript_Summariser.py | #####################################################################################################
# This is the functions to summarise youtube transcript
# imports: ast, open ai and helper functions from Helper.
######################################################################################################
import openai
import ast
from ...Helper import *
#function to summarise one youtube transcript into its pros and cons
#require: data is a singular paragraph containing the youtube transcript to summarise
def single_transcript_summariser(data, search_terms, model = "text-davinci-003", apikey):
openai.api_key = apikey
prompt = "Summarise the flaws and strengths of the" + " ".join(search_terms) + ":" + data
summary = generate_text(prompt, model)
return summary
#function to summarise all youtube transcripts into its pros and cons
#require: nested list of paragraph containing each youtube transcript
def transcript_summariser(youtube_transcript, search_terms, model = "text-davinci-003", apikey):
for transcripts in youtube_transcripts:
result = single_transcript_summariser(transcripts, search_terms,model, apikey)
summarised_reviews += result.strip() #remove whitespace
return summarised_reviews
#function to summarise the opinions of each category for the product based on the youtube transcript summary
#require: finalised design outcomes keywords, data = singular paragraph containing summarised youtube transcript
def features_extractor(categories, data, model = "text-davinci-003", apikey):
openai.api_key = apikey
prompt = "based on this paragraph:" + data + "\n summarise why the product should be improved or maintained for each of these categories: " + str(categories) + "and return the output in a python dictionary"
print("prompt": prompt)
result = generate_text(prompt, model)
try:
index = result.index("{") #just in case the result: output = {}
result = result[index:].strip() #remove output =
except:
pass
return ast.literal_eval(result)
| [
"Summarise the flaws and strengths of the",
" ",
"based on this paragraph:PLACEHOLDER\n summarise why the product should be improved or maintained for each of these categories: PLACEHOLDERand return the output in a python dictionary"
] |
2024-01-10 | Jaywhisker/AI_design_framework | Generating_Design_Opportunities~Design_Opportunities.py | #####################################################################################################
# This is the functions to generate design opportunities based on all the data collected
# Data include: Categorised reviews and comments + summarised youtube transcripts
# imports: openai and helper functions from Helper.
######################################################################################################
import openai
from ...Helper import *
#function to give suggestions on what to improve for each respective category with regards to the product
#require the keyword, the product name and data which is the list of negative comments related to the category of that product
def suggestion_maker(data, keyword, search_term, model = "text-davinci-003", apikey):
openai.api_key = apikey
prompt = "based on the paragraph below, what is the best way to improve the" + " ".join(search_term) + " with regards to " + keyword + "only ?\n" + str(data)
model = "text-davinci-003"
suggestion = generate_text(prompt, model)
return suggestion.strip()
#function to state what should be maintained for each respective category with regards to the product
#require the keyword, the product name and data which is the list of positive comments related to the category of that product
def maintain_maker(data, keyword, search_term, model = "text-davinci-003",apikey):
openai.api_key = apikey
prompt = "based on the paragraph below, what is the best way to maintain in the" + " ".join(search_term) + " with regards to " + keyword + "only ?\n" + str(data)
model = "text-davinci-003"
maintainence = generate_text(prompt, model, apikey)
return maintainence.strip()
#function that creates the total_opportunities through GPT-3
#requires: all negative keywords, top 5 posiitve keywords, all categorised data, the summarised yt transcript, search_terms and api key
#the code will extract all negative comments categorised under the negative keywords and ask GPT-3 for suggestions before merging it with the summarised yt transcript
#the code will repeat with the top 5 positive keywords
def reviews_design_outcomes(negative_design_outcomes, positive_design_outcomes, categorical_data, summarised_transcript, search_terms, model = "text-davinci-003",apikey):
total_opportunities = {} #dictionary to hold all the suggestions (merges features_extractor and suggestion/maintain_maker outputs)
#negative design outcomes
for n_outcomes in negative_design_outcomes: #iterate through every negative keyword
negative_key = n_outcomes[0] #get negative keyword
negative_comments = categorical_data[negative_key]['negative'] #get negative comments related to category
#update total_opportunities with suggestions from yt transcript, yt comments, shopee and amazon reviews for negative keywords
total_opportunities[negative_key] = summarised_transcript[negative_key] + " " + suggestion_maker(negative_comments, negative_key, search_terms, model = "text-davinci-003",apikey)
print(total_opportunities)
#positive design outcomes
for p_outcomes in positive_design_outcomes:
positive_key = p_outcomes[0] #get positive keyword
positive_comments = categorical_data[positive_key]['positive'] #get positive comments related to category
#update total_opportunities on what to maintain from yt transcript, yt comments, shopee and amazon reviews
total_opportunities[positive_key] = summarised_transcript[positive_key] + " " + maintain_maker(positive_comments, positive_key, search_terms, model = "text-davinci-003",apikey)
print(total_opportunities)
return total_opportunities
#function that tells GPT-3 to give us a new product specification
#the prompt must mainly contain the reviews (containing things to maintain and suggestions for improvements) as well as prompts to compare with current product specifcations
#after which you can save the file
def generate_design_outcomes(design_outcomes, search_terms, model = "text-davinci-003", apikey):
openai.api_key = apikey
prompt = "Imagine you are a product designer and these are the reviews you have received. Using the current " + " ".join(search_terms) + " specifications, provide a new set of product specifications with comparison to the current one to design an improved " + " ".join(search_terms) + " that meets the demands of the reviews. \n Reviews:" + str(design_outcomes)
final_design_outcomes = generate_text(prompt, model)
return final_design_outcomes.strip()
| [
" that meets the demands of the reviews. \n Reviews:",
"Imagine you are a product designer and these are the reviews you have received. Using the current ",
" specifications, provide a new set of product specifications with comparison to the current one to design an improved ",
"based on the paragraph below, what is the best way to maintain in the",
"based on the paragraph below, what is the best way to improve the",
" ",
" with regards to ",
"only ?\n"
] |
2024-01-10 | Panny777/Insta-Caption-Generator | trial.py |
import os
import openai
openai.api_key = ""
openai.Completion.create(
engine="text-davinci-002",
prompt="Say this is a test",
max_tokens=5
)
| [
"Say this is a test"
] |
2024-01-10 | Matthew-Redrup/agentic-experiment | agentic_edu~agents~agents.py | from typing import Optional, List, Dict, Any
from agentic_edu.agents.instruments import PostgresAgentInstruments
from agentic_edu.modules import orchestrator
from agentic_edu.agents import agent_config
import autogen
import guidance
# ------------------------ PROMPTS ------------------------
USER_PROXY_PROMPT = "A human admin. Interact with the Product Manager to discuss the plan. Plan execution needs to be approved by this admin."
DATA_ENGINEER_PROMPT = "A Data Engineer. Generate the initial SQL based on the requirements provided. Send it to the Sr Data Analyst to be executed. "
SR_DATA_ANALYST_PROMPT = "Sr Data Analyst. You run the SQL query using the run_sql function, send the raw response to the data viz team. You use the run_sql function exclusively."
GUIDANCE_SCRUM_MASTER_SQL_NLQ_PROMPT = """
Is the following block of text a SQL Natural Language Query (NLQ)? Please rank from 1 to 5, where:
1: Definitely not NLQ
2: Likely not NLQ
3: Neutral / Unsure
4: Likely NLQ
5: Definitely NLQ
Return the rank as a number exclusively using the rank variable to be casted as an integer.
Block of Text: {{potential_nlq}}
{{#select "rank" logprobs='logprobs'}} 1{{or}} 2{{or}} 3{{or}} 4{{or}} 5{{/select}}
"""
DATA_INSIGHTS_GUIDANCE_PROMPT = """
You're a data innovator. You analyze SQL databases table structure and generate 3 novel insights for your team to reflect on and query.
Format your insights in JSON format.
```json
[{{#geneach 'insight' num_iterations=3 join=','}}
{
"insight": "{{gen 'insight' temperature=0.7}}",
"actionable_business_value": "{{gen 'actionable_value' temperature=0.7}}",
"sql": "{{gen 'new_query' temperature=0.7}}"
}
{{/geneach}}]
```"""
INSIGHTS_FILE_REPORTER_PROMPT = "You're a data reporter. You write json data you receive directly into a file using the write_innovation_file function."
# unused prompts
COMPLETION_PROMPT = "If everything looks good, respond with APPROVED"
PRODUCT_MANAGER_PROMPT = (
"Product Manager. Validate the response to make sure it's correct"
+ COMPLETION_PROMPT
)
TEXT_REPORT_ANALYST_PROMPT = "Text File Report Analyst. You exclusively use the write_file function on a summarized report."
JSON_REPORT_ANALYST_PROMPT = "Json Report Analyst. You exclusively use the write_json_file function on the report."
YML_REPORT_ANALYST_PROMPT = "Yaml Report Analyst. You exclusively use the write_yml_file function on the report."
# ------------------------ BUILD AGENT TEAMS ------------------------
def build_data_eng_team(instruments: PostgresAgentInstruments):
"""
Build a team of agents that can generate, execute, and report an SQL query
"""
# create a set of agents with specific roles
# admin user proxy agent - takes in the prompt and manages the group chat
user_proxy = autogen.UserProxyAgent(
name="Admin",
system_message=USER_PROXY_PROMPT,
code_execution_config=False,
human_input_mode="NEVER",
)
# data engineer agent - generates the sql query
data_engineer = autogen.AssistantAgent(
name="Engineer",
llm_config=agent_config.base_config,
system_message=DATA_ENGINEER_PROMPT,
code_execution_config=False,
human_input_mode="NEVER",
)
sr_data_analyst = autogen.AssistantAgent(
name="Sr_Data_Analyst",
llm_config=agent_config.run_sql_config,
system_message=SR_DATA_ANALYST_PROMPT,
code_execution_config=False,
human_input_mode="NEVER",
function_map={
"run_sql": instruments.run_sql,
},
)
return [
user_proxy,
data_engineer,
sr_data_analyst,
]
def build_data_viz_team(instruments: PostgresAgentInstruments):
# admin user proxy agent - takes in the prompt and manages the group chat
user_proxy = autogen.UserProxyAgent(
name="Admin",
system_message=USER_PROXY_PROMPT,
code_execution_config=False,
human_input_mode="NEVER",
)
# text report analyst - writes a summary report of the results and saves them to a local text file
text_report_analyst = autogen.AssistantAgent(
name="Text_Report_Analyst",
llm_config=agent_config.write_file_config,
system_message=TEXT_REPORT_ANALYST_PROMPT,
human_input_mode="NEVER",
function_map={
"write_file": instruments.write_file,
},
)
# json report analyst - writes a summary report of the results and saves them to a local json file
json_report_analyst = autogen.AssistantAgent(
name="Json_Report_Analyst",
llm_config=agent_config.write_json_file_config,
system_message=JSON_REPORT_ANALYST_PROMPT,
human_input_mode="NEVER",
function_map={
"write_json_file": instruments.write_json_file,
},
)
yaml_report_analyst = autogen.AssistantAgent(
name="Yml_Report_Analyst",
llm_config=agent_config.write_yaml_file_config,
system_message=YML_REPORT_ANALYST_PROMPT,
human_input_mode="NEVER",
function_map={
"write_yml_file": instruments.write_yml_file,
},
)
return [
user_proxy,
text_report_analyst,
json_report_analyst,
yaml_report_analyst,
]
def build_scrum_master_team(instruments: PostgresAgentInstruments):
user_proxy = autogen.UserProxyAgent(
name="Admin",
system_message=USER_PROXY_PROMPT,
code_execution_config=False,
human_input_mode="NEVER",
)
scrum_agent = DefensiveScrumMasterAgent(
name="Scrum_Master",
llm_config=agent_config.base_config,
system_message=GUIDANCE_SCRUM_MASTER_SQL_NLQ_PROMPT,
human_input_mode="NEVER",
)
return [user_proxy, scrum_agent]
def build_insights_team(instruments: PostgresAgentInstruments):
user_proxy = autogen.UserProxyAgent(
name="Admin",
system_message=USER_PROXY_PROMPT,
code_execution_config=False,
human_input_mode="NEVER",
)
insights_agent = InsightsAgent(
name="Insights",
llm_config=agent_config.base_config,
system_message=DATA_INSIGHTS_GUIDANCE_PROMPT,
human_input_mode="NEVER",
)
insights_data_reporter = autogen.AssistantAgent(
name="Insights_Data_Reporter",
llm_config=agent_config.write_innovation_file_config,
system_message=INSIGHTS_FILE_REPORTER_PROMPT,
human_input_mode="NEVER",
function_map={
"write_innovation_file": instruments.write_innovation_file,
},
)
return [user_proxy, insights_agent, insights_data_reporter]
# ------------------------ ORCHESTRATION ------------------------
def build_team_orchestrator(
team: str,
agent_instruments: PostgresAgentInstruments,
validate_results: callable = None,
) -> orchestrator.Orchestrator:
"""
Based on a team name, build a team of agents and return an orchestrator
"""
if team == "data_eng":
return orchestrator.Orchestrator(
name="data_eng_team",
agents=build_data_eng_team(agent_instruments),
instruments=agent_instruments,
validate_results_func=validate_results,
)
elif team == "data_viz":
return orchestrator.Orchestrator(
name="data_viz_team",
agents=build_data_viz_team(agent_instruments),
validate_results_func=validate_results,
)
elif team == "scrum_master":
return orchestrator.Orchestrator(
name="scrum_master_team",
agents=build_scrum_master_team(agent_instruments),
instruments=agent_instruments,
validate_results_func=validate_results,
)
elif team == "data_insights":
return orchestrator.Orchestrator(
name="data_insights_team",
agents=build_insights_team(agent_instruments),
instruments=agent_instruments,
validate_results_func=validate_results,
)
raise Exception("Unknown team: " + team)
# ------------------------ CUSTOM AGENTS ------------------------
class DefensiveScrumMasterAgent(autogen.ConversableAgent):
"""
Custom agent that uses the guidance function to determine if a message is a SQL NLQ
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Register the new reply function for this specific agent
self.register_reply(self, self.check_sql_nlq, position=0)
def check_sql_nlq(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[autogen.Agent] = None,
config: Optional[Any] = None, # Persistent state.
):
# Check the last received message
last_message = messages[-1]["content"]
# Use the guidance string to determine if the message is a SQL NLQ
response = guidance(
GUIDANCE_SCRUM_MASTER_SQL_NLQ_PROMPT, potential_nlq=last_message
)
# You can return the exact response or just a simplified version,
# here we are just returning the rank for simplicity
rank = response.get("choices", [{}])[0].get("rank", "3")
return True, rank
class InsightsAgent(autogen.ConversableAgent):
"""
Custom agent that uses the guidance function to generate insights in JSON format
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.register_reply(self, self.generate_insights, position=0)
def generate_insights(
self,
messages: Optional[List[Dict]] = None,
sender: Optional[autogen.Agent] = None,
config: Optional[Any] = None,
):
insights = guidance(DATA_INSIGHTS_GUIDANCE_PROMPT)
return True, insights
| [
"Json Report Analyst. You exclusively use the write_json_file function on the report.",
"A Data Engineer. Generate the initial SQL based on the requirements provided. Send it to the Sr Data Analyst to be executed. ",
"\nIs the following block of text a SQL Natural Language Query (NLQ)? Please rank from 1 to 5, where:\n1: Definitely not NLQ\n2: Likely not NLQ\n3: Neutral / Unsure\n4: Likely NLQ\n5: Definitely NLQ\n\nReturn the rank as a number exclusively using the rank variable to be casted as an integer.\n\nBlock of Text: {{potential_nlq}}\n{{#select \"rank\" logprobs='logprobs'}} 1{{or}} 2{{or}} 3{{or}} 4{{or}} 5{{/select}}\n",
"\nYou're a data innovator. You analyze SQL databases table structure and generate 3 novel insights for your team to reflect on and query. \nFormat your insights in JSON format.\n```json\n[{{#geneach 'insight' num_iterations=3 join=','}}\n{\n \"insight\": \"{{gen 'insight' temperature=0.7}}\",\n \"actionable_business_value\": \"{{gen 'actionable_value' temperature=0.7}}\",\n \"sql\": \"{{gen 'new_query' temperature=0.7}}\"\n}\n{{/geneach}}]\n```",
"Yaml Report Analyst. You exclusively use the write_yml_file function on the report.",
"You're a data reporter. You write json data you receive directly into a file using the write_innovation_file function.",
"A human admin. Interact with the Product Manager to discuss the plan. Plan execution needs to be approved by this admin.",
"Product Manager. Validate the response to make sure it's correctIf everything looks good, respond with APPROVED",
"Text File Report Analyst. You exclusively use the write_file function on a summarized report.",
"Sr Data Analyst. You run the SQL query using the run_sql function, send the raw response to the data viz team. You use the run_sql function exclusively.",
"If everything looks good, respond with APPROVED"
] |
2024-01-10 | bobilan/AudioGPT | audio-chatgpt.py | import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'NeuralSeq'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'text_to_audio/Make_An_Audio'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'audio_detection'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'mono2binaural'))
import gradio as gr
import matplotlib
import librosa
import torch
from langchain.agents.initialize import initialize_agent
from langchain.agents.tools import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.llms.openai import OpenAI
import re
import uuid
import soundfile
from PIL import Image
import numpy as np
from omegaconf import OmegaConf
from einops import repeat
from ldm.util import instantiate_from_config
from ldm.data.extract_mel_spectrogram import TRANSFORMS_16000
from vocoder.bigvgan.models import VocoderBigVGAN
from ldm.models.diffusion.ddim import DDIMSampler
import whisper
from utils.hparams import set_hparams
from utils.hparams import hparams as hp
import scipy.io.wavfile as wavfile
import librosa
from audio_infer.utils import config as detection_config
from audio_infer.pytorch.models import PVT
import clip
import numpy as np
AUDIO_CHATGPT_PREFIX = """AudioGPT
AudioGPT can not directly read audios, but it has a list of tools to finish different speech, audio, and singing voice tasks. Each audio will have a file name formed as "audio/xxx.wav". When talking about audios, AudioGPT is very strict to the file name and will never fabricate nonexistent files.
AudioGPT is able to use tools in a sequence, and is loyal to the tool observation outputs rather than faking the audio content and audio file name. It will remember to provide the file name from the last tool observation, if a new audio is generated.
Human may provide new audios to AudioGPT with a description. The description helps AudioGPT to understand this audio, but AudioGPT should use tools to finish following tasks, rather than directly imagine from the description.
Overall, AudioGPT is a powerful audio dialogue assistant tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics.
TOOLS:
------
AudioGPT has access to the following tools:"""
AUDIO_CHATGPT_FORMAT_INSTRUCTIONS = """To use a tool, please use the following format:
```
Thought: Do I need to use a tool? Yes
Action: the action to take, should be one of [{tool_names}]
Action Input: the input to the action
Observation: the result of the action
```
When you have a response to say to the Human, or if you do not need to use a tool, you MUST use the format:
```
Thought: Do I need to use a tool? No
{ai_prefix}: [your response here]
```
"""
AUDIO_CHATGPT_SUFFIX = """You are very strict to the filename correctness and will never fake a file name if not exists.
You will remember to provide the audio file name loyally if it's provided in the last tool observation.
Begin!
Previous conversation history:
{chat_history}
New input: {input}
Thought: Do I need to use a tool? {agent_scratchpad}"""
def cut_dialogue_history(history_memory, keep_last_n_words = 500):
tokens = history_memory.split()
n_tokens = len(tokens)
print(f"history_memory:{history_memory}, n_tokens: {n_tokens}")
if n_tokens < keep_last_n_words:
return history_memory
else:
paragraphs = history_memory.split('\n')
last_n_tokens = n_tokens
while last_n_tokens >= keep_last_n_words:
last_n_tokens = last_n_tokens - len(paragraphs[0].split(' '))
paragraphs = paragraphs[1:]
return '\n' + '\n'.join(paragraphs)
def merge_audio(audio_path_1, audio_path_2):
merged_signal = []
sr_1, signal_1 = wavfile.read(audio_path_1)
sr_2, signal_2 = wavfile.read(audio_path_2)
merged_signal.append(signal_1)
merged_signal.append(signal_2)
merged_signal = np.hstack(merged_signal)
merged_signal = np.asarray(merged_signal, dtype=np.int16)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
wavfile.write(audio_filename, sr_2, merged_signal)
return audio_filename
class T2I:
def __init__(self, device):
from transformers import AutoModelForCausalLM, AutoTokenizer
from diffusers import StableDiffusionPipeline
from transformers import pipeline
print("Initializing T2I to %s" % device)
self.device = device
self.pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
self.text_refine_tokenizer = AutoTokenizer.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion")
self.text_refine_model = AutoModelForCausalLM.from_pretrained("Gustavosta/MagicPrompt-Stable-Diffusion")
self.text_refine_gpt2_pipe = pipeline("text-generation", model=self.text_refine_model, tokenizer=self.text_refine_tokenizer, device=self.device)
self.pipe.to(device)
def inference(self, text):
image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
refined_text = self.text_refine_gpt2_pipe(text)[0]["generated_text"]
print(f'{text} refined to {refined_text}')
image = self.pipe(refined_text).images[0]
image.save(image_filename)
print(f"Processed T2I.run, text: {text}, image_filename: {image_filename}")
return image_filename
class ImageCaptioning:
def __init__(self, device):
from transformers import BlipProcessor, BlipForConditionalGeneration
print("Initializing ImageCaptioning to %s" % device)
self.device = device
self.processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
self.model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to(self.device)
def inference(self, image_path):
inputs = self.processor(Image.open(image_path), return_tensors="pt").to(self.device)
out = self.model.generate(**inputs)
captions = self.processor.decode(out[0], skip_special_tokens=True)
return captions
class T2A:
def __init__(self, device):
print("Initializing Make-An-Audio to %s" % device)
self.device = device
self.sampler = self._initialize_model('text_to_audio/Make_An_Audio/configs/text_to_audio/txt2audio_args.yaml', 'text_to_audio/Make_An_Audio/useful_ckpts/ta40multi_epoch=000085.ckpt', device=device)
self.vocoder = VocoderBigVGAN('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',device=device)
def _initialize_model(self, config, ckpt, device):
config = OmegaConf.load(config)
model = instantiate_from_config(config.model)
model.load_state_dict(torch.load(ckpt, map_location='cpu')["state_dict"], strict=False)
model = model.to(device)
model.cond_stage_model.to(model.device)
model.cond_stage_model.device = model.device
sampler = DDIMSampler(model)
return sampler
def txt2audio(self, text, seed = 55, scale = 1.5, ddim_steps = 100, n_samples = 3, W = 624, H = 80):
SAMPLE_RATE = 16000
prng = np.random.RandomState(seed)
start_code = prng.randn(n_samples, self.sampler.model.first_stage_model.embed_dim, H // 8, W // 8)
start_code = torch.from_numpy(start_code).to(device=self.device, dtype=torch.float32)
uc = self.sampler.model.get_learned_conditioning(n_samples * [""])
c = self.sampler.model.get_learned_conditioning(n_samples * [text])
shape = [self.sampler.model.first_stage_model.embed_dim, H//8, W//8] # (z_dim, 80//2^x, 848//2^x)
samples_ddim, _ = self.sampler.sample(S = ddim_steps,
conditioning = c,
batch_size = n_samples,
shape = shape,
verbose = False,
unconditional_guidance_scale = scale,
unconditional_conditioning = uc,
x_T = start_code)
x_samples_ddim = self.sampler.model.decode_first_stage(samples_ddim)
x_samples_ddim = torch.clamp((x_samples_ddim+1.0)/2.0, min=0.0, max=1.0) # [0, 1]
wav_list = []
for idx,spec in enumerate(x_samples_ddim):
wav = self.vocoder.vocode(spec)
wav_list.append((SAMPLE_RATE,wav))
best_wav = self.select_best_audio(text, wav_list)
return best_wav
def select_best_audio(self, prompt, wav_list):
from wav_evaluation.models.CLAPWrapper import CLAPWrapper
clap_model = CLAPWrapper('text_to_audio/Make_An_Audio/useful_ckpts/CLAP/CLAP_weights_2022.pth', 'text_to_audio/Make_An_Audio/useful_ckpts/CLAP/config.yml',
use_cuda=torch.cuda.is_available())
text_embeddings = clap_model.get_text_embeddings([prompt])
score_list = []
for data in wav_list:
sr, wav = data
audio_embeddings = clap_model.get_audio_embeddings([(torch.FloatTensor(wav), sr)], resample=True)
score = clap_model.compute_similarity(audio_embeddings, text_embeddings,
use_logit_scale=False).squeeze().cpu().numpy()
score_list.append(score)
max_index = np.array(score_list).argmax()
print(score_list, max_index)
return wav_list[max_index]
def inference(self, text, seed = 55, scale = 1.5, ddim_steps = 100, n_samples = 3, W = 624, H = 80):
melbins,mel_len = 80,624
with torch.no_grad():
result = self.txt2audio(
text = text,
H = melbins,
W = mel_len
)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
soundfile.write(audio_filename, result[1], samplerate = 16000)
print(f"Processed T2I.run, text: {text}, audio_filename: {audio_filename}")
return audio_filename
class I2A:
def __init__(self, device):
print("Initializing Make-An-Audio-Image to %s" % device)
self.device = device
self.sampler = self._initialize_model('text_to_audio/Make_An_Audio/configs/img_to_audio/img2audio_args.yaml', 'text_to_audio/Make_An_Audio/useful_ckpts/ta54_epoch=000216.ckpt', device=device)
self.vocoder = VocoderBigVGAN('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',device=device)
def _initialize_model(self, config, ckpt, device):
config = OmegaConf.load(config)
model = instantiate_from_config(config.model)
model.load_state_dict(torch.load(ckpt, map_location='cpu')["state_dict"], strict=False)
model = model.to(device)
model.cond_stage_model.to(model.device)
model.cond_stage_model.device = model.device
sampler = DDIMSampler(model)
return sampler
def img2audio(self, image, seed = 55, scale = 3, ddim_steps = 100, W = 624, H = 80):
SAMPLE_RATE = 16000
n_samples = 1 # only support 1 sample
prng = np.random.RandomState(seed)
start_code = prng.randn(n_samples, self.sampler.model.first_stage_model.embed_dim, H // 8, W // 8)
start_code = torch.from_numpy(start_code).to(device=self.device, dtype=torch.float32)
uc = self.sampler.model.get_learned_conditioning(n_samples * [""])
#image = Image.fromarray(image)
image = Image.open(image)
image = self.sampler.model.cond_stage_model.preprocess(image).unsqueeze(0)
image_embedding = self.sampler.model.cond_stage_model.forward_img(image)
c = image_embedding.repeat(n_samples, 1, 1)
shape = [self.sampler.model.first_stage_model.embed_dim, H//8, W//8] # (z_dim, 80//2^x, 848//2^x)
samples_ddim, _ = self.sampler.sample(S=ddim_steps,
conditioning=c,
batch_size=n_samples,
shape=shape,
verbose=False,
unconditional_guidance_scale=scale,
unconditional_conditioning=uc,
x_T=start_code)
x_samples_ddim = self.sampler.model.decode_first_stage(samples_ddim)
x_samples_ddim = torch.clamp((x_samples_ddim+1.0)/2.0, min=0.0, max=1.0) # [0, 1]
wav_list = []
for idx,spec in enumerate(x_samples_ddim):
wav = self.vocoder.vocode(spec)
wav_list.append((SAMPLE_RATE,wav))
best_wav = wav_list[0]
return best_wav
def inference(self, image, seed = 55, scale = 3, ddim_steps = 100, W = 624, H = 80):
melbins,mel_len = 80,624
with torch.no_grad():
result = self.img2audio(
image=image,
H=melbins,
W=mel_len
)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
soundfile.write(audio_filename, result[1], samplerate = 16000)
print(f"Processed I2a.run, image_filename: {image}, audio_filename: {audio_filename}")
return audio_filename
class TTS:
def __init__(self, device=None):
from inference.tts.PortaSpeech import TTSInference
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("Initializing PortaSpeech to %s" % device)
self.device = device
self.exp_name = 'checkpoints/ps_adv_baseline'
self.set_model_hparams()
self.inferencer = TTSInference(self.hp, device)
def set_model_hparams(self):
set_hparams(exp_name=self.exp_name, print_hparams=False)
self.hp = hp
def inference(self, text):
self.set_model_hparams()
inp = {"text": text}
out = self.inferencer.infer_once(inp)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
soundfile.write(audio_filename, out, samplerate=22050)
return audio_filename
class T2S:
def __init__(self, device= None):
from inference.svs.ds_e2e import DiffSingerE2EInfer
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("Initializing DiffSinger to %s" % device)
self.device = device
self.exp_name = 'checkpoints/0831_opencpop_ds1000'
self.config= 'NeuralSeq/egs/egs_bases/svs/midi/e2e/opencpop/ds1000.yaml'
self.set_model_hparams()
self.pipe = DiffSingerE2EInfer(self.hp, device)
self.default_inp = {
'text': '你 说 你 不 SP 懂 为 何 在 这 时 牵 手 AP',
'notes': 'D#4/Eb4 | D#4/Eb4 | D#4/Eb4 | D#4/Eb4 | rest | D#4/Eb4 | D4 | D4 | D4 | D#4/Eb4 | F4 | D#4/Eb4 | D4 | rest',
'notes_duration': '0.113740 | 0.329060 | 0.287950 | 0.133480 | 0.150900 | 0.484730 | 0.242010 | 0.180820 | 0.343570 | 0.152050 | 0.266720 | 0.280310 | 0.633300 | 0.444590'
}
def set_model_hparams(self):
set_hparams(config=self.config, exp_name=self.exp_name, print_hparams=False)
self.hp = hp
def inference(self, inputs):
self.set_model_hparams()
val = inputs.split(",")
key = ['text', 'notes', 'notes_duration']
try:
inp = {k: v for k, v in zip(key, val)}
wav = self.pipe.infer_once(inp)
except:
print('Error occurs. Generate default audio sample.\n')
inp = self.default_inp
wav = self.pipe.infer_once(inp)
#if inputs == '' or len(val) < len(key):
# inp = self.default_inp
#else:
# inp = {k:v for k,v in zip(key,val)}
#wav = self.pipe.infer_once(inp)
wav *= 32767
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
wavfile.write(audio_filename, self.hp['audio_sample_rate'], wav.astype(np.int16))
print(f"Processed T2S.run, audio_filename: {audio_filename}")
return audio_filename
class t2s_VISinger:
def __init__(self, device=None):
from espnet2.bin.svs_inference import SingingGenerate
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("Initializing VISingere to %s" % device)
tag = 'AQuarterMile/opencpop_visinger1'
self.model = SingingGenerate.from_pretrained(
model_tag=str_or_none(tag),
device=device,
)
phn_dur = [[0. , 0.219 ],
[0.219 , 0.50599998],
[0.50599998, 0.71399999],
[0.71399999, 1.097 ],
[1.097 , 1.28799999],
[1.28799999, 1.98300004],
[1.98300004, 7.10500002],
[7.10500002, 7.60400009]]
phn = ['sh', 'i', 'q', 'v', 'n', 'i', 'SP', 'AP']
score = [[0, 0.50625, 'sh_i', 58, 'sh_i'], [0.50625, 1.09728, 'q_v', 56, 'q_v'], [1.09728, 1.9832100000000001, 'n_i', 53, 'n_i'], [1.9832100000000001, 7.105360000000001, 'SP', 0, 'SP'], [7.105360000000001, 7.604390000000001, 'AP', 0, 'AP']]
tempo = 70
tmp = {}
tmp["label"] = phn_dur, phn
tmp["score"] = tempo, score
self.default_inp = tmp
def inference(self, inputs):
val = inputs.split(",")
key = ['text', 'notes', 'notes_duration']
try: # TODO: input will be update
inp = {k: v for k, v in zip(key, val)}
wav = self.model(text=inp)["wav"]
except:
print('Error occurs. Generate default audio sample.\n')
inp = self.default_inp
wav = self.model(text=inp)["wav"]
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
soundfile.write(audio_filename, wav, samplerate=self.model.fs)
return audio_filename
class TTS_OOD:
def __init__(self, device):
from inference.tts.GenerSpeech import GenerSpeechInfer
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("Initializing GenerSpeech to %s" % device)
self.device = device
self.exp_name = 'checkpoints/GenerSpeech'
self.config = 'NeuralSeq/modules/GenerSpeech/config/generspeech.yaml'
self.set_model_hparams()
self.pipe = GenerSpeechInfer(self.hp, device)
def set_model_hparams(self):
set_hparams(config=self.config, exp_name=self.exp_name, print_hparams=False)
f0_stats_fn = f'{hp["binary_data_dir"]}/train_f0s_mean_std.npy'
if os.path.exists(f0_stats_fn):
hp['f0_mean'], hp['f0_std'] = np.load(f0_stats_fn)
hp['f0_mean'] = float(hp['f0_mean'])
hp['f0_std'] = float(hp['f0_std'])
hp['emotion_encoder_path'] = 'checkpoints/Emotion_encoder.pt'
self.hp = hp
def inference(self, inputs):
self.set_model_hparams()
key = ['ref_audio', 'text']
val = inputs.split(",")
inp = {k: v for k, v in zip(key, val)}
wav = self.pipe.infer_once(inp)
wav *= 32767
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
wavfile.write(audio_filename, self.hp['audio_sample_rate'], wav.astype(np.int16))
print(
f"Processed GenerSpeech.run. Input text:{val[1]}. Input reference audio: {val[0]}. Output Audio_filename: {audio_filename}")
return audio_filename
class Inpaint:
def __init__(self, device):
print("Initializing Make-An-Audio-inpaint to %s" % device)
self.device = device
self.sampler = self._initialize_model_inpaint('text_to_audio/Make_An_Audio/configs/inpaint/txt2audio_args.yaml', 'text_to_audio/Make_An_Audio/useful_ckpts/inpaint7_epoch00047.ckpt')
self.vocoder = VocoderBigVGAN('text_to_audio/Make_An_Audio/vocoder/logs/bigv16k53w',device=device)
self.cmap_transform = matplotlib.cm.viridis
def _initialize_model_inpaint(self, config, ckpt):
config = OmegaConf.load(config)
model = instantiate_from_config(config.model)
model.load_state_dict(torch.load(ckpt, map_location='cpu')["state_dict"], strict=False)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model = model.to(device)
print(model.device, device, model.cond_stage_model.device)
sampler = DDIMSampler(model)
return sampler
def make_batch_sd(self, mel, mask, num_samples=1):
mel = torch.from_numpy(mel)[None,None,...].to(dtype=torch.float32)
mask = torch.from_numpy(mask)[None,None,...].to(dtype=torch.float32)
masked_mel = (1 - mask) * mel
mel = mel * 2 - 1
mask = mask * 2 - 1
masked_mel = masked_mel * 2 -1
batch = {
"mel": repeat(mel.to(device=self.device), "1 ... -> n ...", n=num_samples),
"mask": repeat(mask.to(device=self.device), "1 ... -> n ...", n=num_samples),
"masked_mel": repeat(masked_mel.to(device=self.device), "1 ... -> n ...", n=num_samples),
}
return batch
def gen_mel(self, input_audio_path):
SAMPLE_RATE = 16000
sr, ori_wav = wavfile.read(input_audio_path)
print("gen_mel")
print(sr,ori_wav.shape,ori_wav)
ori_wav = ori_wav.astype(np.float32, order='C') / 32768.0
if len(ori_wav.shape)==2:# stereo
ori_wav = librosa.to_mono(ori_wav.T)
print(sr,ori_wav.shape,ori_wav)
ori_wav = librosa.resample(ori_wav,orig_sr = sr,target_sr = SAMPLE_RATE)
mel_len,hop_size = 848,256
input_len = mel_len * hop_size
if len(ori_wav) < input_len:
input_wav = np.pad(ori_wav,(0,mel_len*hop_size),constant_values=0)
else:
input_wav = ori_wav[:input_len]
mel = TRANSFORMS_16000(input_wav)
return mel
def gen_mel_audio(self, input_audio):
SAMPLE_RATE = 16000
sr,ori_wav = input_audio
print("gen_mel_audio")
print(sr,ori_wav.shape,ori_wav)
ori_wav = ori_wav.astype(np.float32, order='C') / 32768.0
if len(ori_wav.shape)==2:# stereo
ori_wav = librosa.to_mono(ori_wav.T)
print(sr,ori_wav.shape,ori_wav)
ori_wav = librosa.resample(ori_wav,orig_sr = sr,target_sr = SAMPLE_RATE)
mel_len,hop_size = 848,256
input_len = mel_len * hop_size
if len(ori_wav) < input_len:
input_wav = np.pad(ori_wav,(0,mel_len*hop_size),constant_values=0)
else:
input_wav = ori_wav[:input_len]
mel = TRANSFORMS_16000(input_wav)
return mel
def show_mel_fn(self, input_audio_path):
crop_len = 500
crop_mel = self.gen_mel(input_audio_path)[:,:crop_len]
color_mel = self.cmap_transform(crop_mel)
image = Image.fromarray((color_mel*255).astype(np.uint8))
image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
image.save(image_filename)
return image_filename
def inpaint(self, batch, seed, ddim_steps, num_samples=1, W=512, H=512):
model = self.sampler.model
prng = np.random.RandomState(seed)
start_code = prng.randn(num_samples, model.first_stage_model.embed_dim, H // 8, W // 8)
start_code = torch.from_numpy(start_code).to(device=self.device, dtype=torch.float32)
c = model.get_first_stage_encoding(model.encode_first_stage(batch["masked_mel"]))
cc = torch.nn.functional.interpolate(batch["mask"],
size=c.shape[-2:])
c = torch.cat((c, cc), dim=1) # (b,c+1,h,w) 1 is mask
shape = (c.shape[1]-1,)+c.shape[2:]
samples_ddim, _ = self.sampler.sample(S=ddim_steps,
conditioning=c,
batch_size=c.shape[0],
shape=shape,
verbose=False)
x_samples_ddim = model.decode_first_stage(samples_ddim)
mel = torch.clamp((batch["mel"]+1.0)/2.0,min=0.0, max=1.0)
mask = torch.clamp((batch["mask"]+1.0)/2.0,min=0.0, max=1.0)
predicted_mel = torch.clamp((x_samples_ddim+1.0)/2.0,min=0.0, max=1.0)
inpainted = (1-mask)*mel+mask*predicted_mel
inpainted = inpainted.cpu().numpy().squeeze()
inapint_wav = self.vocoder.vocode(inpainted)
return inpainted, inapint_wav
def inference(self, input_audio, mel_and_mask, seed = 55, ddim_steps = 100):
SAMPLE_RATE = 16000
torch.set_grad_enabled(False)
mel_img = Image.open(mel_and_mask['image'])
mask_img = Image.open(mel_and_mask["mask"])
show_mel = np.array(mel_img.convert("L"))/255
mask = np.array(mask_img.convert("L"))/255
mel_bins,mel_len = 80,848
input_mel = self.gen_mel_audio(input_audio)[:,:mel_len]
mask = np.pad(mask,((0,0),(0,mel_len-mask.shape[1])),mode='constant',constant_values=0)
print(mask.shape,input_mel.shape)
with torch.no_grad():
batch = self.make_batch_sd(input_mel,mask,num_samples=1)
inpainted,gen_wav = self.inpaint(
batch=batch,
seed=seed,
ddim_steps=ddim_steps,
num_samples=1,
H=mel_bins, W=mel_len
)
inpainted = inpainted[:,:show_mel.shape[1]]
color_mel = self.cmap_transform(inpainted)
input_len = int(input_audio[1].shape[0] * SAMPLE_RATE / input_audio[0])
gen_wav = (gen_wav * 32768).astype(np.int16)[:input_len]
image = Image.fromarray((color_mel*255).astype(np.uint8))
image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
image.save(image_filename)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
soundfile.write(audio_filename, gen_wav, samplerate = 16000)
return image_filename, audio_filename
class ASR:
def __init__(self, device):
print("Initializing Whisper to %s" % device)
self.device = device
self.model = whisper.load_model("base", device=device)
def inference(self, audio_path):
audio = whisper.load_audio(audio_path)
audio = whisper.pad_or_trim(audio)
mel = whisper.log_mel_spectrogram(audio).to(self.device)
_, probs = self.model.detect_language(mel)
options = whisper.DecodingOptions()
result = whisper.decode(self.model, mel, options)
return result.text
def translate_english(self, audio_path):
audio = self.model.transcribe(audio_path, language='English')
return audio['text']
class A2T:
def __init__(self, device):
from audio_to_text.inference_waveform import AudioCapModel
print("Initializing Audio-To-Text Model to %s" % device)
self.device = device
self.model = AudioCapModel("audio_to_text/audiocaps_cntrstv_cnn14rnn_trm")
def inference(self, audio_path):
audio = whisper.load_audio(audio_path)
caption_text = self.model(audio)
return caption_text[0]
class GeneFace:
def __init__(self, device=None):
print("Initializing GeneFace model to %s" % device)
from audio_to_face.GeneFace_binding import GeneFaceInfer
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = device
self.geneface_model = GeneFaceInfer(device)
print("Loaded GeneFace model")
def inference(self, audio_path):
audio_base_name = os.path.basename(audio_path)[:-4]
out_video_name = audio_path.replace("audio","video").replace(".wav", ".mp4")
inp = {
'audio_source_name': audio_path,
'out_npy_name': f'geneface/tmp/{audio_base_name}.npy',
'cond_name': f'geneface/tmp/{audio_base_name}.npy',
'out_video_name': out_video_name,
'tmp_imgs_dir': f'video/tmp_imgs',
}
self.geneface_model.infer_once(inp)
return out_video_name
class SoundDetection:
def __init__(self, device):
self.device = device
self.sample_rate = 32000
self.window_size = 1024
self.hop_size = 320
self.mel_bins = 64
self.fmin = 50
self.fmax = 14000
self.model_type = 'PVT'
self.checkpoint_path = 'audio_detection/audio_infer/useful_ckpts/audio_detection.pth'
self.classes_num = detection_config.classes_num
self.labels = detection_config.labels
self.frames_per_second = self.sample_rate // self.hop_size
# Model = eval(self.model_type)
self.model = PVT(sample_rate=self.sample_rate, window_size=self.window_size,
hop_size=self.hop_size, mel_bins=self.mel_bins, fmin=self.fmin, fmax=self.fmax,
classes_num=self.classes_num)
checkpoint = torch.load(self.checkpoint_path, map_location=self.device)
self.model.load_state_dict(checkpoint['model'])
self.model.to(device)
def inference(self, audio_path):
# Forward
(waveform, _) = librosa.core.load(audio_path, sr=self.sample_rate, mono=True)
waveform = waveform[None, :] # (1, audio_length)
waveform = torch.from_numpy(waveform)
waveform = waveform.to(self.device)
# Forward
with torch.no_grad():
self.model.eval()
batch_output_dict = self.model(waveform, None)
framewise_output = batch_output_dict['framewise_output'].data.cpu().numpy()[0]
"""(time_steps, classes_num)"""
# print('Sound event detection result (time_steps x classes_num): {}'.format(
# framewise_output.shape))
import numpy as np
import matplotlib.pyplot as plt
sorted_indexes = np.argsort(np.max(framewise_output, axis=0))[::-1]
top_k = 10 # Show top results
top_result_mat = framewise_output[:, sorted_indexes[0 : top_k]]
"""(time_steps, top_k)"""
# Plot result
stft = librosa.core.stft(y=waveform[0].data.cpu().numpy(), n_fft=self.window_size,
hop_length=self.hop_size, window='hann', center=True)
frames_num = stft.shape[-1]
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(10, 4))
axs[0].matshow(np.log(np.abs(stft)), origin='lower', aspect='auto', cmap='jet')
axs[0].set_ylabel('Frequency bins')
axs[0].set_title('Log spectrogram')
axs[1].matshow(top_result_mat.T, origin='upper', aspect='auto', cmap='jet', vmin=0, vmax=1)
axs[1].xaxis.set_ticks(np.arange(0, frames_num, self.frames_per_second))
axs[1].xaxis.set_ticklabels(np.arange(0, frames_num / self.frames_per_second))
axs[1].yaxis.set_ticks(np.arange(0, top_k))
axs[1].yaxis.set_ticklabels(np.array(self.labels)[sorted_indexes[0 : top_k]])
axs[1].yaxis.grid(color='k', linestyle='solid', linewidth=0.3, alpha=0.3)
axs[1].set_xlabel('Seconds')
axs[1].xaxis.set_ticks_position('bottom')
plt.tight_layout()
image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
plt.savefig(image_filename)
return image_filename
class SoundExtraction:
def __init__(self, device):
from sound_extraction.model.LASSNet import LASSNet
from sound_extraction.utils.stft import STFT
import torch.nn as nn
self.device = device
self.model_file = 'sound_extraction/useful_ckpts/LASSNet.pt'
self.stft = STFT()
self.model = nn.DataParallel(LASSNet(device)).to(device)
checkpoint = torch.load(self.model_file)
self.model.load_state_dict(checkpoint['model'])
self.model.eval()
def inference(self, inputs):
#key = ['ref_audio', 'text']
from sound_extraction.utils.wav_io import load_wav, save_wav
val = inputs.split(",")
audio_path = val[0] # audio_path, text
text = val[1]
waveform = load_wav(audio_path)
waveform = torch.tensor(waveform).transpose(1,0)
mixed_mag, mixed_phase = self.stft.transform(waveform)
text_query = ['[CLS] ' + text]
mixed_mag = mixed_mag.transpose(2,1).unsqueeze(0).to(self.device)
est_mask = self.model(mixed_mag, text_query)
est_mag = est_mask * mixed_mag
est_mag = est_mag.squeeze(1)
est_mag = est_mag.permute(0, 2, 1)
est_wav = self.stft.inverse(est_mag.cpu().detach(), mixed_phase)
est_wav = est_wav.squeeze(0).squeeze(0).numpy()
#est_path = f'output/est{i}.wav'
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
print('audio_filename ', audio_filename)
save_wav(est_wav, audio_filename)
return audio_filename
class Binaural:
def __init__(self, device):
from src.models import BinauralNetwork
self.device = device
self.model_file = 'mono2binaural/useful_ckpts/m2b/binaural_network.net'
self.position_file = ['mono2binaural/useful_ckpts/m2b/tx_positions.txt',
'mono2binaural/useful_ckpts/m2b/tx_positions2.txt',
'mono2binaural/useful_ckpts/m2b/tx_positions3.txt',
'mono2binaural/useful_ckpts/m2b/tx_positions4.txt',
'mono2binaural/useful_ckpts/m2b/tx_positions5.txt']
self.net = BinauralNetwork(view_dim=7,
warpnet_layers=4,
warpnet_channels=64,
)
self.net.load_from_file(self.model_file)
self.sr = 48000
def inference(self, audio_path):
mono, sr = librosa.load(path=audio_path, sr=self.sr, mono=True)
mono = torch.from_numpy(mono)
mono = mono.unsqueeze(0)
import numpy as np
import random
rand_int = random.randint(0,4)
view = np.loadtxt(self.position_file[rand_int]).transpose().astype(np.float32)
view = torch.from_numpy(view)
if not view.shape[-1] * 400 == mono.shape[-1]:
mono = mono[:,:(mono.shape[-1]//400)*400] #
if view.shape[1]*400 > mono.shape[1]:
m_a = view.shape[1] - mono.shape[-1]//400
rand_st = random.randint(0,m_a)
view = view[:,m_a:m_a+(mono.shape[-1]//400)] #
# binauralize and save output
self.net.eval().to(self.device)
mono, view = mono.to(self.device), view.to(self.device)
chunk_size = 48000 # forward in chunks of 1s
rec_field = 1000 # add 1000 samples as "safe bet" since warping has undefined rec. field
rec_field -= rec_field % 400 # make sure rec_field is a multiple of 400 to match audio and view frequencies
chunks = [
{
"mono": mono[:, max(0, i-rec_field):i+chunk_size],
"view": view[:, max(0, i-rec_field)//400:(i+chunk_size)//400]
}
for i in range(0, mono.shape[-1], chunk_size)
]
for i, chunk in enumerate(chunks):
with torch.no_grad():
mono = chunk["mono"].unsqueeze(0)
view = chunk["view"].unsqueeze(0)
binaural = self.net(mono, view).squeeze(0)
if i > 0:
binaural = binaural[:, -(mono.shape[-1]-rec_field):]
chunk["binaural"] = binaural
binaural = torch.cat([chunk["binaural"] for chunk in chunks], dim=-1)
binaural = torch.clamp(binaural, min=-1, max=1).cpu()
#binaural = chunked_forwarding(net, mono, view)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
import torchaudio
torchaudio.save(audio_filename, binaural, sr)
#soundfile.write(audio_filename, binaural, samplerate = 48000)
print(f"Processed Binaural.run, audio_filename: {audio_filename}")
return audio_filename
class TargetSoundDetection:
def __init__(self, device):
from target_sound_detection.src import models as tsd_models
from target_sound_detection.src.models import event_labels
self.device = device
self.MEL_ARGS = {
'n_mels': 64,
'n_fft': 2048,
'hop_length': int(22050 * 20 / 1000),
'win_length': int(22050 * 40 / 1000)
}
self.EPS = np.spacing(1)
self.clip_model, _ = clip.load("ViT-B/32", device=self.device)
self.event_labels = event_labels
self.id_to_event = {i : label for i, label in enumerate(self.event_labels)}
config = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/run_config.pth', map_location='cpu')
config_parameters = dict(config)
config_parameters['tao'] = 0.6
if 'thres' not in config_parameters.keys():
config_parameters['thres'] = 0.5
if 'time_resolution' not in config_parameters.keys():
config_parameters['time_resolution'] = 125
model_parameters = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/run_model_7_loss=-0.0724.pt'
, map_location=lambda storage, loc: storage) # load parameter
self.model = getattr(tsd_models, config_parameters['model'])(config_parameters,
inputdim=64, outputdim=2, time_resolution=config_parameters['time_resolution'], **config_parameters['model_args'])
self.model.load_state_dict(model_parameters)
self.model = self.model.to(self.device).eval()
self.re_embeds = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/text_emb.pth')
self.ref_mel = torch.load('audio_detection/target_sound_detection/useful_ckpts/tsd/ref_mel.pth')
def extract_feature(self, fname):
import soundfile as sf
y, sr = sf.read(fname, dtype='float32')
print('y ', y.shape)
ti = y.shape[0]/sr
if y.ndim > 1:
y = y.mean(1)
y = librosa.resample(y, sr, 22050)
lms_feature = np.log(librosa.feature.melspectrogram(y, **self.MEL_ARGS) + self.EPS).T
return lms_feature,ti
def build_clip(self, text):
text = clip.tokenize(text).to(self.device) # ["a diagram with dog", "a dog", "a cat"]
text_features = self.clip_model.encode_text(text)
return text_features
def cal_similarity(self, target, retrievals):
ans = []
#target =torch.from_numpy(target)
for name in retrievals.keys():
tmp = retrievals[name]
#tmp = torch.from_numpy(tmp)
s = torch.cosine_similarity(target.squeeze(), tmp.squeeze(), dim=0)
ans.append(s.item())
return ans.index(max(ans))
def inference(self, text, audio_path):
from target_sound_detection.src.utils import median_filter, decode_with_timestamps
target_emb = self.build_clip(text) # torch type
idx = self.cal_similarity(target_emb, self.re_embeds)
target_event = self.id_to_event[idx]
embedding = self.ref_mel[target_event]
embedding = torch.from_numpy(embedding)
embedding = embedding.unsqueeze(0).to(self.device).float()
#print('embedding ', embedding.shape)
inputs,ti = self.extract_feature(audio_path)
#print('ti ', ti)
inputs = torch.from_numpy(inputs)
inputs = inputs.unsqueeze(0).to(self.device).float()
#print('inputs ', inputs.shape)
decision, decision_up, logit = self.model(inputs, embedding)
pred = decision_up.detach().cpu().numpy()
pred = pred[:,:,0]
frame_num = decision_up.shape[1]
time_ratio = ti / frame_num
filtered_pred = median_filter(pred, window_size=1, threshold=0.5)
#print('filtered_pred ', filtered_pred)
time_predictions = []
for index_k in range(filtered_pred.shape[0]):
decoded_pred = []
decoded_pred_ = decode_with_timestamps(target_event, filtered_pred[index_k,:])
if len(decoded_pred_) == 0: # neg deal
decoded_pred_.append((target_event, 0, 0))
decoded_pred.append(decoded_pred_)
for num_batch in range(len(decoded_pred)): # when we test our model,the batch_size is 1
cur_pred = pred[num_batch]
# Save each frame output, for later visualization
label_prediction = decoded_pred[num_batch] # frame predict
# print(label_prediction)
for event_label, onset, offset in label_prediction:
time_predictions.append({
'onset': onset*time_ratio,
'offset': offset*time_ratio,})
ans = ''
for i,item in enumerate(time_predictions):
ans = ans + 'segment' + str(i+1) + ' start_time: ' + str(item['onset']) + ' end_time: ' + str(item['offset']) + '\t'
#print(ans)
return ans
# class Speech_Enh_SS_SC:
# """Speech Enhancement or Separation in single-channel
# Example usage:
# enh_model = Speech_Enh_SS("cuda")
# enh_wav = enh_model.inference("./test_chime4_audio_M05_440C0213_PED_REAL.wav")
# """
# def __init__(self, device="cuda", model_name="lichenda/chime4_fasnet_dprnn_tac"):
# self.model_name = model_name
# self.device = device
# print("Initializing ESPnet Enh to %s" % device)
# self._initialize_model()
# def _initialize_model(self):
# from espnet_model_zoo.downloader import ModelDownloader
# from espnet2.bin.enh_inference import SeparateSpeech
# d = ModelDownloader()
# cfg = d.download_and_unpack(self.model_name)
# self.separate_speech = SeparateSpeech(
# train_config=cfg["train_config"],
# model_file=cfg["model_file"],
# # for segment-wise process on long speech
# segment_size=2.4,
# hop_size=0.8,
# normalize_segment_scale=False,
# show_progressbar=True,
# ref_channel=None,
# normalize_output_wav=True,
# device=self.device,
# )
# def inference(self, speech_path, ref_channel=0):
# speech, sr = soundfile.read(speech_path)
# speech = speech[:, ref_channel]
# assert speech.dim() == 1
# enh_speech = self.separate_speech(speech[None, ], fs=sr)
# if len(enh_speech) == 1:
# return enh_speech[0]
# return enh_speech
# class Speech_Enh_SS_MC:
# """Speech Enhancement or Separation in multi-channel"""
# def __init__(self, device="cuda", model_name=None, ref_channel=4):
# self.model_name = model_name
# self.ref_channel = ref_channel
# self.device = device
# print("Initializing ESPnet Enh to %s" % device)
# self._initialize_model()
# def _initialize_model(self):
# from espnet_model_zoo.downloader import ModelDownloader
# from espnet2.bin.enh_inference import SeparateSpeech
# d = ModelDownloader()
# cfg = d.download_and_unpack(self.model_name)
# self.separate_speech = SeparateSpeech(
# train_config=cfg["train_config"],
# model_file=cfg["model_file"],
# # for segment-wise process on long speech
# segment_size=2.4,
# hop_size=0.8,
# normalize_segment_scale=False,
# show_progressbar=True,
# ref_channel=self.ref_channel,
# normalize_output_wav=True,
# device=self.device,
# )
# def inference(self, speech_path):
# speech, sr = soundfile.read(speech_path)
# speech = speech.T
# enh_speech = self.separate_speech(speech[None, ...], fs=sr)
# if len(enh_speech) == 1:
# return enh_speech[0]
# return enh_speech
class Speech_Enh_SS_SC:
"""Speech Enhancement or Separation in single-channel
Example usage:
enh_model = Speech_Enh_SS("cuda")
enh_wav = enh_model.inference("./test_chime4_audio_M05_440C0213_PED_REAL.wav")
"""
def __init__(self, device="cuda", model_name="espnet/Wangyou_Zhang_chime4_enh_train_enh_conv_tasnet_raw"):
self.model_name = model_name
self.device = device
print("Initializing ESPnet Enh to %s" % device)
self._initialize_model()
def _initialize_model(self):
from espnet_model_zoo.downloader import ModelDownloader
from espnet2.bin.enh_inference import SeparateSpeech
d = ModelDownloader()
cfg = d.download_and_unpack(self.model_name)
self.separate_speech = SeparateSpeech(
train_config=cfg["train_config"],
model_file=cfg["model_file"],
# for segment-wise process on long speech
segment_size=2.4,
hop_size=0.8,
normalize_segment_scale=False,
show_progressbar=True,
ref_channel=None,
normalize_output_wav=True,
device=self.device,
)
def inference(self, speech_path, ref_channel=0):
speech, sr = soundfile.read(speech_path)
speech = speech[:, ref_channel]
# speech = torch.from_numpy(speech)
# assert speech.dim() == 1
enh_speech = self.separate_speech(speech[None, ...], fs=sr)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
# if len(enh_speech) == 1:
soundfile.write(audio_filename, enh_speech[0].squeeze(), samplerate=sr)
# return enh_speech[0]
# return enh_speech
# else:
# print("############")
# audio_filename_1 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
# soundfile.write(audio_filename_1, enh_speech[0].squeeze(), samplerate=sr)
# audio_filename_2 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
# soundfile.write(audio_filename_2, enh_speech[1].squeeze(), samplerate=sr)
# audio_filename = merge_audio(audio_filename_1, audio_filename_2)
return audio_filename
class Speech_SS:
def __init__(self, device="cuda", model_name="lichenda/wsj0_2mix_skim_noncausal"):
self.model_name = model_name
self.device = device
print("Initializing ESPnet SS to %s" % device)
self._initialize_model()
def _initialize_model(self):
from espnet_model_zoo.downloader import ModelDownloader
from espnet2.bin.enh_inference import SeparateSpeech
d = ModelDownloader()
cfg = d.download_and_unpack(self.model_name)
self.separate_speech = SeparateSpeech(
train_config=cfg["train_config"],
model_file=cfg["model_file"],
# for segment-wise process on long speech
segment_size=2.4,
hop_size=0.8,
normalize_segment_scale=False,
show_progressbar=True,
ref_channel=None,
normalize_output_wav=True,
device=self.device,
)
def inference(self, speech_path):
speech, sr = soundfile.read(speech_path)
enh_speech = self.separate_speech(speech[None, ...], fs=sr)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
if len(enh_speech) == 1:
soundfile.write(audio_filename, enh_speech[0], samplerate=sr)
else:
# print("############")
audio_filename_1 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
soundfile.write(audio_filename_1, enh_speech[0].squeeze(), samplerate=sr)
audio_filename_2 = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
soundfile.write(audio_filename_2, enh_speech[1].squeeze(), samplerate=sr)
audio_filename = merge_audio(audio_filename_1, audio_filename_2)
return audio_filename
class ConversationBot:
def __init__(self):
print("Initializing AudioGPT")
self.llm = OpenAI(temperature=0)
self.t2i = T2I(device="cuda:1")
self.i2t = ImageCaptioning(device="cuda:0")
self.t2a = T2A(device="cuda:0")
self.tts = TTS(device="cpu")
self.t2s = T2S(device="cpu")
self.i2a = I2A(device="cuda:0")
self.a2t = A2T(device="cpu")
self.asr = ASR(device="cuda:0")
self.SE_SS_SC = Speech_Enh_SS_SC(device="cuda:0")
# self.SE_SS_MC = Speech_Enh_SS_MC(device="cuda:0")
self.SS = Speech_SS(device="cuda:0")
self.inpaint = Inpaint(device="cuda:0")
self.tts_ood = TTS_OOD(device="cpu")
self.geneface = GeneFace(device="cuda:0")
self.detection = SoundDetection(device="cpu")
self.binaural = Binaural(device="cuda:0")
self.extraction = SoundExtraction(device="cuda:0")
self.TSD = TargetSoundDetection(device="cuda:0")
self.memory = ConversationBufferMemory(memory_key="chat_history", output_key='output')
def init_tools(self, interaction_type):
if interaction_type == 'text':
self.tools = [
Tool(name="Generate Image From User Input Text", func=self.t2i.inference,
description="useful for when you want to generate an image from a user input text and it saved it to a file. like: generate an image of an object or something, or generate an image that includes some objects. "
"The input to this tool should be a string, representing the text used to generate image. "),
Tool(name="Get Photo Description", func=self.i2t.inference,
description="useful for when you want to know what is inside the photo. receives image_path as input. "
"The input to this tool should be a string, representing the image_path. "),
Tool(name="Generate Audio From User Input Text", func=self.t2a.inference,
description="useful for when you want to generate an audio from a user input text and it saved it to a file."
"The input to this tool should be a string, representing the text used to generate audio."),
Tool(
name="Style Transfer", func= self.tts_ood.inference,
description="useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice."
"Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx."
"The input to this tool should be a comma seperated string of two, representing reference audio path and input text."),
Tool(name="Generate Singing Voice From User Input Text, Note and Duration Sequence", func= self.t2s.inference,
description="useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file."
"If Like: Generate a piece of singing voice, the input to this tool should be \"\" since there is no User Input Text, Note and Duration Sequence ."
"If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. "
"Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx."
"The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided."),
Tool(name="Synthesize Speech Given the User Input Text", func=self.tts.inference,
description="useful for when you want to convert a user input text into speech audio it saved it to a file."
"The input to this tool should be a string, representing the text used to be converted to speech."),
# Tool(name="Speech Enhancement Or Separation In Single-Channel", func=self.SE_SS_SC.inference,
# description="useful for when you want to enhance the quality of the speech signal by reducing background noise (single-channel), "
# "or separate each speech from the speech mixture (single-channel), receives audio_path as input."
# "The input to this tool should be a string, representing the audio_path."),
Tool(name="Speech Enhancement In Single-Channel", func=self.SE_SS_SC.inference,
description="useful for when you want to enhance the quality of the speech signal by reducing background noise (single-channel), receives audio_path as input."
"The input to this tool should be a string, representing the audio_path."),
Tool(name="Speech Separation In Single-Channel", func=self.SS.inference,
description="useful for when you want to separate each speech from the speech mixture, receives audio_path as input."
"The input to this tool should be a string, representing the audio_path."),
# Tool(name="Speech Enhancement In Multi-Channel", func=self.SE_SS_MC.inference,
# description="useful for when you want to enhance the quality of the speech signal by reducing background noise (multi-channel), receives audio_path as input."
# "The input to this tool should be a string, representing the audio_path."),
Tool(name="Generate Audio From The Image", func=self.i2a.inference,
description="useful for when you want to generate an audio based on an image."
"The input to this tool should be a string, representing the image_path. "),
Tool(name="Generate Text From The Audio", func=self.a2t.inference,
description="useful for when you want to describe an audio in text, receives audio_path as input."
"The input to this tool should be a string, representing the audio_path."),
Tool(name="Audio Inpainting", func=self.inpaint.show_mel_fn,
description="useful for when you want to inpaint a mel spectrum of an audio and predict this audio, this tool will generate a mel spectrum and you can inpaint it, receives audio_path as input, "
"The input to this tool should be a string, representing the audio_path."),
Tool(name="Transcribe Speech", func=self.asr.inference,
description="useful for when you want to know the text corresponding to a human speech, receives audio_path as input."
"The input to this tool should be a string, representing the audio_path."),
Tool(name="Generate a talking human portrait video given a input Audio", func=self.geneface.inference,
description="useful for when you want to generate a talking human portrait video given a input audio."
"The input to this tool should be a string, representing the audio_path."),
Tool(name="Detect The Sound Event From The Audio", func=self.detection.inference,
description="useful for when you want to know what event in the audio and the sound event start or end time, this tool will generate an image of all predict events, receives audio_path as input. "
"The input to this tool should be a string, representing the audio_path. "),
Tool(name="Sythesize Binaural Audio From A Mono Audio Input", func=self.binaural.inference,
description="useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. "
"The input to this tool should be a string, representing the audio_path. "),
Tool(name="Extract Sound Event From Mixture Audio Based On Language Description", func=self.extraction.inference,
description="useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. "
"The input to this tool should be a comma seperated string of two, representing mixture audio path and input text."),
Tool(name="Target Sound Detection", func=self.TSD.inference,
description="useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. "
"The input to this tool should be a comma seperated string of two, representing audio path and the text description. ")]
self.agent = initialize_agent(
self.tools,
self.llm,
agent="conversational-react-description",
verbose=True,
memory=self.memory,
return_intermediate_steps=True,
agent_kwargs={'prefix': AUDIO_CHATGPT_PREFIX, 'format_instructions': AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX}, )
return gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)
else:
self.tools = [
Tool(name="Generate Audio From User Input Text", func=self.t2a.inference,
description="useful for when you want to generate an audio from a user input text and it saved it to a file."
"The input to this tool should be a string, representing the text used to generate audio."),
Tool(
name="Style Transfer", func= self.tts_ood.inference,
description="useful for when you want to generate speech samples with styles (e.g., timbre, emotion, and prosody) derived from a reference custom voice."
"Like: Generate a speech with style transferred from this voice. The text is xxx., or speak using the voice of this audio. The text is xxx."
"The input to this tool should be a comma seperated string of two, representing reference audio path and input text."),
Tool(name="Generate Singing Voice From User Input Text, Note and Duration Sequence", func= self.t2s.inference,
description="useful for when you want to generate a piece of singing voice (Optional: from User Input Text, Note and Duration Sequence) and save it to a file."
"If Like: Generate a piece of singing voice, the input to this tool should be \"\" since there is no User Input Text, Note and Duration Sequence ."
"If Like: Generate a piece of singing voice. Text: xxx, Note: xxx, Duration: xxx. "
"Or Like: Generate a piece of singing voice. Text is xxx, note is xxx, duration is xxx."
"The input to this tool should be a comma seperated string of three, representing text, note and duration sequence since User Input Text, Note and Duration Sequence are all provided."),
Tool(name="Synthesize Speech Given the User Input Text", func=self.tts.inference,
description="useful for when you want to convert a user input text into speech audio it saved it to a file."
"The input to this tool should be a string, representing the text used to be converted to speech."),
Tool(name="Generate Text From The Audio", func=self.a2t.inference,
description="useful for when you want to describe an audio in text, receives audio_path as input."
"The input to this tool should be a string, representing the audio_path."),
Tool(name="Generate a talking human portrait video given a input Audio", func=self.geneface.inference,
description="useful for when you want to generate a talking human portrait video given a input audio."
"The input to this tool should be a string, representing the audio_path."),
Tool(name="Generate Binaural Audio From A Mono Audio Input", func=self.binaural.inference,
description="useful for when you want to transfer your mono audio into binaural audio, receives audio_path as input. "
"The input to this tool should be a string, representing the audio_path. "),
Tool(name="Extract Sound Event From Mixture Audio Based On Language Description", func=self.extraction.inference,
description="useful for when you extract target sound from a mixture audio, you can describe the target sound by text, receives audio_path and text as input. "
"The input to this tool should be a comma seperated string of two, representing mixture audio path and input text."),
Tool(name="Target Sound Detection", func=self.TSD.inference,
description="useful for when you want to know when the target sound event in the audio happens. You can use language descriptions to instruct the model. receives text description and audio_path as input. "
"The input to this tool should be a comma seperated string of two, representing audio path and the text description. ")]
self.agent = initialize_agent(
self.tools,
self.llm,
agent="conversational-react-description",
verbose=True,
memory=self.memory,
return_intermediate_steps=True,
agent_kwargs={'prefix': AUDIO_CHATGPT_PREFIX, 'format_instructions': AUDIO_CHATGPT_FORMAT_INSTRUCTIONS, 'suffix': AUDIO_CHATGPT_SUFFIX}, )
return gr.update(visible=False), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True)
def run_text(self, text, state):
print("===============Running run_text =============")
print("Inputs:", text, state)
print("======>Previous memory:\n %s" % self.agent.memory)
self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_words=500)
res = self.agent({"input": text})
if res['intermediate_steps'] == []:
print("======>Current memory:\n %s" % self.agent.memory)
response = res['output']
state = state + [(text, response)]
print("Outputs:", state)
return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)
else:
tool = res['intermediate_steps'][0][0].tool
if tool == "Generate Image From User Input Text" or tool == "Generate Text From The Audio" or tool == "Target Sound Detection":
print("======>Current memory:\n %s" % self.agent.memory)
response = re.sub('(image/\S*png)', lambda m: f'})*{m.group(0)}*', res['output'])
state = state + [(text, response)]
print("Outputs:", state)
return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)
elif tool == "Transcribe Speech":
response = res['output']
state = state + [(text, response)]
print("Outputs:", state)
return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)
elif tool == "Detect The Sound Event From The Audio":
image_filename = res['intermediate_steps'][0][1]
response = res['output'] + f"*{image_filename}*"
state = state + [(text, response)]
print("Outputs:", state)
return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)
elif tool == "Audio Inpainting":
audio_filename = res['intermediate_steps'][0][0].tool_input
image_filename = res['intermediate_steps'][0][1]
print("======>Current memory:\n %s" % self.agent.memory)
response = res['output']
state = state + [(text, response)]
print("Outputs:", state)
return state, state, gr.Audio.update(value=audio_filename,visible=True), gr.Video.update(visible=False), gr.Image.update(value=image_filename,visible=True), gr.Button.update(visible=True)
elif tool == "Generate a talking human portrait video given a input Audio":
video_filename = res['intermediate_steps'][0][1]
print("======>Current memory:\n %s" % self.agent.memory)
response = res['output']
state = state + [(text, response)]
print("Outputs:", state)
return state, state, gr.Audio.update(visible=False), gr.Video.update(value=video_filename,visible=True), gr.Image.update(visible=False), gr.Button.update(visible=False)
print("======>Current memory:\n %s" % self.agent.memory)
response = re.sub('(image/\S*png)', lambda m: f'})*{m.group(0)}*', res['output'])
audio_filename = res['intermediate_steps'][0][1]
state = state + [(text, response)]
print("Outputs:", state)
return state, state, gr.Audio.update(value=audio_filename,visible=True), gr.Video.update(visible=False), gr.Image.update(visible=False), gr.Button.update(visible=False)
def run_image_or_audio(self, file, state, txt):
file_type = file.name[-3:]
if file_type == "wav":
print("===============Running run_audio =============")
print("Inputs:", file, state)
print("======>Previous memory:\n %s" % self.agent.memory)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
# audio_load = whisper.load_audio(file.name)
audio_load, sr = soundfile.read(file.name)
soundfile.write(audio_filename, audio_load, samplerate = sr)
description = self.a2t.inference(audio_filename)
Human_prompt = "\nHuman: provide an audio named {}. The description is: {}. This information helps you to understand this audio, but you should use tools to finish following tasks, " \
"rather than directly imagine from my description. If you understand, say \"Received\". \n".format(audio_filename, description)
AI_prompt = "Received. "
self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt
print("======>Current memory:\n %s" % self.agent.memory)
#state = state + [(f"<audio src=audio_filename controls=controls></audio>*{audio_filename}*", AI_prompt)]
state = state + [(f"*{audio_filename}*", AI_prompt)]
print("Outputs:", state)
return state, state, gr.Audio.update(value=audio_filename,visible=True), gr.Video.update(visible=False)
else:
print("===============Running run_image =============")
print("Inputs:", file, state)
print("======>Previous memory:\n %s" % self.agent.memory)
image_filename = os.path.join('image', str(uuid.uuid4())[0:8] + ".png")
print("======>Auto Resize Image...")
img = Image.open(file.name)
width, height = img.size
ratio = min(512 / width, 512 / height)
width_new, height_new = (round(width * ratio), round(height * ratio))
img = img.resize((width_new, height_new))
img = img.convert('RGB')
img.save(image_filename, "PNG")
print(f"Resize image form {width}x{height} to {width_new}x{height_new}")
description = self.i2t.inference(image_filename)
Human_prompt = "\nHuman: provide a figure named {}. The description is: {}. This information helps you to understand this image, but you should use tools to finish following tasks, " \
"rather than directly imagine from my description. If you understand, say \"Received\". \n".format(image_filename, description)
AI_prompt = "Received. "
self.agent.memory.buffer = self.agent.memory.buffer + Human_prompt + 'AI: ' + AI_prompt
print("======>Current memory:\n %s" % self.agent.memory)
state = state + [(f"*{image_filename}*", AI_prompt)]
print("Outputs:", state)
return state, state, gr.Audio.update(visible=False), gr.Video.update(visible=False)
def speech(self, speech_input, state):
input_audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
text = self.asr.translate_english(speech_input)
print("Inputs:", text, state)
print("======>Previous memory:\n %s" % self.agent.memory)
self.agent.memory.buffer = cut_dialogue_history(self.agent.memory.buffer, keep_last_n_words=500)
res = self.agent({"input": text})
if res['intermediate_steps'] == []:
print("======>Current memory:\n %s" % self.agent.memory)
response = res['output']
output_audio_filename = self.tts.inference(response)
state = state + [(text, response)]
print("Outputs:", state)
return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)
else:
tool = res['intermediate_steps'][0][0].tool
if tool == "Generate Image From User Input Text" or tool == "Generate Text From The Audio" or tool == "Target Sound Detection":
print("======>Current memory:\n %s" % self.agent.memory)
response = re.sub('(image/\S*png)', lambda m: f'})*{m.group(0)}*', res['output'])
output_audio_filename = self.tts.inference(res['output'])
state = state + [(text, response)]
print("Outputs:", state)
return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)
elif tool == "Transcribe Speech":
print("======>Current memory:\n %s" % self.agent.memory)
output_audio_filename = self.tts.inference(res['output'])
response = res['output']
state = state + [(text, response)]
print("Outputs:", state)
return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)
elif tool == "Detect The Sound Event From The Audio":
print("======>Current memory:\n %s" % self.agent.memory)
image_filename = res['intermediate_steps'][0][1]
output_audio_filename = self.tts.inference(res['output'])
response = res['output'] + f"*{image_filename}*"
state = state + [(text, response)]
print("Outputs:", state)
return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)
elif tool == "Generate a talking human portrait video given a input Audio":
video_filename = res['intermediate_steps'][0][1]
print("======>Current memory:\n %s" % self.agent.memory)
response = res['output']
output_audio_filename = self.tts.inference(res['output'])
state = state + [(text, response)]
print("Outputs:", state)
return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(value=video_filename,visible=True)
print("======>Current memory:\n %s" % self.agent.memory)
response = re.sub('(image/\S*png)', lambda m: f'})*{m.group(0)}*', res['output'])
audio_filename = res['intermediate_steps'][0][1]
Res = "The audio file has been generated and the audio is "
output_audio_filename = merge_audio(self.tts.inference(Res), audio_filename)
print(output_audio_filename)
state = state + [(text, response)]
response = res['output']
print("Outputs:", state)
return gr.Audio.update(value=None), gr.Audio.update(value=output_audio_filename,visible=True), state, gr.Video.update(visible=False)
def inpainting(self, state, audio_filename, image_filename):
print("===============Running inpainting =============")
print("Inputs:", state)
print("======>Previous memory:\n %s" % self.agent.memory)
new_image_filename, new_audio_filename = self.inpaint.inference(audio_filename, image_filename)
AI_prompt = "Here are the predict audio and the mel spectrum." + f"*{new_audio_filename}*" + f"*{new_image_filename}*"
output_audio_filename = self.tts.inference(AI_prompt)
self.agent.memory.buffer = self.agent.memory.buffer + 'AI: ' + AI_prompt
print("======>Current memory:\n %s" % self.agent.memory)
state = state + [(f"Audio Inpainting", AI_prompt)]
print("Outputs:", state)
return state, state, gr.Image.update(visible=False), gr.Audio.update(value=new_audio_filename, visible=True), gr.Video.update(visible=False), gr.Button.update(visible=False)
def clear_audio(self):
return gr.Audio.update(value=None, visible=False)
def clear_input_audio(self):
return gr.Audio.update(value=None)
def clear_image(self):
return gr.Image.update(value=None, visible=False)
def clear_video(self):
return gr.Video.update(value=None, visible=False)
def clear_button(self):
return gr.Button.update(visible=False)
if __name__ == '__main__':
bot = ConversationBot()
with gr.Blocks(css="#chatbot .overflow-y-auto{height:500px}") as demo:
with gr.Row():
gr.Markdown("## AudioGPT")
chatbot = gr.Chatbot(elem_id="chatbot", label="AudioGPT", visible=False)
state = gr.State([])
with gr.Row() as select_raws:
with gr.Column(scale=0.7):
interaction_type = gr.Radio(choices=['text', 'speech'], value='text', label='Interaction Type')
with gr.Column(scale=0.3, min_width=0):
select = gr.Button("Select")
with gr.Row(visible=False) as text_input_raws:
with gr.Column(scale=0.7):
txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter, or upload an image").style(container=False)
with gr.Column(scale=0.1, min_width=0):
run = gr.Button("🏃♂️Run")
with gr.Column(scale=0.1, min_width=0):
clear_txt = gr.Button("🔄Clear️")
with gr.Column(scale=0.1, min_width=0):
btn = gr.UploadButton("🖼️Upload", file_types=["image","audio"])
with gr.Row():
outaudio = gr.Audio(visible=False)
with gr.Row():
with gr.Column(scale=0.3, min_width=0):
outvideo = gr.Video(visible=False)
with gr.Row():
show_mel = gr.Image(type="filepath",tool='sketch',visible=False)
with gr.Row():
run_button = gr.Button("Predict Masked Place",visible=False)
with gr.Row(visible=False) as speech_input_raws:
with gr.Column(scale=0.7):
speech_input = gr.Audio(source="microphone", type="filepath", label="Input")
with gr.Column(scale=0.15, min_width=0):
submit_btn = gr.Button("🏃♂️Submit")
with gr.Column(scale=0.15, min_width=0):
clear_speech = gr.Button("🔄Clear️")
with gr.Row():
speech_output = gr.Audio(label="Output",visible=False)
select.click(bot.init_tools, [interaction_type], [chatbot, select_raws, text_input_raws, speech_input_raws])
txt.submit(bot.run_text, [txt, state], [chatbot, state, outaudio, outvideo, show_mel, run_button])
txt.submit(lambda: "", None, txt)
run.click(bot.run_text, [txt, state], [chatbot, state, outaudio, outvideo, show_mel, run_button])
run.click(lambda: "", None, txt)
btn.upload(bot.run_image_or_audio, [btn, state, txt], [chatbot, state, outaudio, outvideo])
run_button.click(bot.inpainting, [state, outaudio, show_mel], [chatbot, state, show_mel, outaudio, outvideo, run_button])
clear_txt.click(bot.memory.clear)
clear_txt.click(lambda: [], None, chatbot)
clear_txt.click(lambda: [], None, state)
clear_txt.click(lambda:None, None, txt)
clear_txt.click(bot.clear_button, None, run_button)
clear_txt.click(bot.clear_image, None, show_mel)
clear_txt.click(bot.clear_audio, None, outaudio)
clear_txt.click(bot.clear_video, None, outvideo)
submit_btn.click(bot.speech, [speech_input, state], [speech_input, speech_output, state, outvideo])
clear_speech.click(bot.clear_input_audio, None, speech_input)
clear_speech.click(bot.clear_audio, None, speech_output)
clear_speech.click(lambda: [], None, state)
clear_speech.click(bot.clear_video, None, outvideo)
demo.launch(server_name="0.0.0.0", server_port=7860, share=True) | [
"\nHuman: provide a figure named PLACEHOLDER. The description is: PLACEHOLDER. This information helps you to understand this image, but you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\". \n",
"Received. ",
"Here are the predict audio and the mel spectrum.*PLACEHOLDER**PLACEHOLDER*",
"\nHuman: provide an audio named PLACEHOLDER. The description is: PLACEHOLDER. This information helps you to understand this audio, but you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say \"Received\". \n"
] |
2024-01-10 | adjeielias90/Sentiment-Analysis-With-Bert-and-Hugging-Face | sentiment_analysis_and_classification_with_bert_and_hugging_face.py | # -*- coding: utf-8 -*-
"""Sentiment Analysis and Classification with BERT and Hugging Face.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1vvCSehFYukB8VcyllqLHlkTOocfwyUp8
# Sentiment Analysis with BERT
> In this exercise tutorial, we will obtain and fine-tune BERT base model for sentiment analysis. We'll do the required text preprocessing such as adding special tokens, padding, and attention masks. Finally we will build a Sentiment Classifier using the amazing Transformers library provided by Hugging Face.
We will:
- Preprocess text data for BERT and build PyTorch Dataset (tokenization, attention masks, and padding)
- Use Transfer Learning to build Sentiment Classifier using the Transformers library by Hugging Face
- Evaluate the model on test data
- Predict sentiment on raw text
#### Source:
Comprehensive tutorial on sentiment classification: https://youtu.be/8N-nM3QW7O0
BERT Paper: https://arxiv.org/abs/1810.04805
Attention is All you Need: https://arxiv.org/abs/1706.03762
Encoding words with context: https://arxiv.org/abs/1802.05365v2
"""
!nvidia-smi
"""## What is BERT?
BERT stands for Bidirectional Encoder Representations from Transformers.
According to the BERT paper, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layersm much unlike recent language representation models, such as LSTM.
As a result, the pre-trained BERT model we will download can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications.
Some important features of the BERT model are:
- Bidirectional - to understand the text you're looking you'll have to look back (at the previous words) and forward (at the next words)
- Transformers - The "Attention Is All You Need" paper presented the Transformer model. The Transformer reads entire sequences of tokens at once.
This well preserves the context of our natural languages, allowing us to avoid the contextual loss problem.
In a sense, the model is non-directional, while LSTMs read sequentially (left-to-right or right-to-left). The attention mechanism allows for learning contextual relations between words.
- (Pre-trained) contextualized word embeddings - The ELMO paper introduced a way to encode words based on their meaning/context. Nails has multiple meanings - fingernails and metal nails.
BERT was trained by masking 15% of the tokens with the goal to guess them. An additional objective was to predict the next sentence. BERT our of the box if very capable at asked Language Modeling (Where we let the model guess striked out words in our input) and Next Sentence Prediction (where BERT predicts the next item in our sentence based on an input).
BERT is simply a pre-trained stack of Transformer Encoders. There exists two versions of BERT, - one with 12 encoders (BERT base) and 24 encoders (BERT Large).
### Is This Thing Useful in Practice?
The best part is that you can do Transfer Learning (thanks to the ideas from OpenAI Transformer) with BERT for many NLP tasks - Classification, Question Answering, Entity Recognition, etc. You can train with small amounts of data and achieve great performance!
## Setup
We'll need the Transformers library by Hugging Face, so we'll go ahead and download it:
"""
!pip install -q -U watermark
!pip install -qq transformers
# Commented out IPython magic to ensure Python compatibility.
# %reload_ext watermark
# %watermark -v -p numpy,pandas,torch,transformers
# Commented out IPython magic to ensure Python compatibility.
#@title Setup & Config
# We'll perfomr some quick setup, these will come in handy later when
# we train and evaluate our model.
# We will also be using the GPU mostly for our mdoeling, as recommended by the BERT paper.
import transformers
from transformers import BertModel, BertTokenizer, AdamW, get_linear_schedule_with_warmup
import torch
import numpy as np
import pandas as pd
import seaborn as sns
from pylab import rcParams
import matplotlib.pyplot as plt
from matplotlib import rc
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report
from collections import defaultdict
from textwrap import wrap
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
# %matplotlib inline
# %config InlineBackend.figure_format='retina'
sns.set(style='whitegrid', palette='muted', font_scale=1.2)
HAPPY_COLORS_PALETTE = ["#01BEFE", "#FFDD00", "#FF7D00", "#FF006D", "#ADFF02", "#8F00FF"]
sns.set_palette(sns.color_palette(HAPPY_COLORS_PALETTE))
rcParams['figure.figsize'] = 12, 8
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device
"""## EDA
We'll load the Google Play app reviews dataset, gathered from the tutorial:
"""
!gdown --id 1S6qMioqPJjyBLpLVz4gmRTnJHnjitnuV
!gdown --id 1zdmewp7ayS4js4VtrJEHzAheSW-5NBZv
df = pd.read_csv("reviews.csv")
df.head()
df.shape
"""We have about 16k examples. Let's check for missing values:"""
df.info()
"""Great, no missing values in the score and review texts! Do we have class imbalance?"""
sns.countplot(df.score)
plt.xlabel('review score');
"""Our dataset is initialy hugely imbalanced, but that's fine,
We will convert the dataset into negative, neutral and positive sentiment, totaling 3 classes.
"""
def to_sentiment(rating):
rating = int(rating)
if rating <= 2:
return 0
elif rating == 3:
return 1
else:
return 2
df['sentiment'] = df.score.apply(to_sentiment)
class_names = ['negative', 'neutral', 'positive']
ax = sns.countplot(df.sentiment)
plt.xlabel('review sentiment')
ax.set_xticklabels(class_names);
"""The balance is mostly restored after our custom scoring. Next, we need to pre-process our data so pytorch can handle it.
## Data Preprocessing
Since Machine Learning models don't work with raw text, We need to convert all the text to numbers. BERT requires even more attention, pun intended.
We need to effectively:
- Add special tokens to separate sentences and do classification
- Pass sequences of constant length (introduce padding to fill up empty spaces)
- Create array of 0s (pad token) and 1s (real token) called *attention mask*
The Transformers library provides a wide variety of Transformer models including BERT. It works with TensorFlow and PyTorch, for the purpose of our exercise we will be using pytorch.
It also includes prebuilt tokenizers that will do the heavy lifting for us.
"""
# We will use the case-sensitive model since more context may be attributed to cased words or sentences.
PRE_TRAINED_MODEL_NAME = 'bert-base-cased'
"""We will use the case-sensitive model since more context may be attributed to cased words or sentences. The cased version simply works better. Intuitively, that makes sense, since "HEY!" might convey more sentiment than "hey".
Let's load a pre-trained Bert Tokenizer next
"""
tokenizer = BertTokenizer.from_pretrained(PRE_TRAINED_MODEL_NAME)
"""We'll use this text to understand the tokenization process:"""
sample_txt = 'Machine Learning is not as hard as previously thought. Things have obviously gotten easier over the years!'
"""We run some basic operations can convert the text to tokens and tokens to unique integers (ids):"""
tokens = tokenizer.tokenize(sample_txt)
token_ids = tokenizer.convert_tokens_to_ids(tokens)
print(f' Sentence: {sample_txt}')
print(f' Tokens: {tokens}')
print(f'Token IDs: {token_ids}')
"""### Special Tokens
`[SEP]` - marker for ending of a sentence
"""
tokenizer.sep_token, tokenizer.sep_token_id
"""`[CLS]` - we must add this token to the start of each sentence, so BERT knows we're doing classification"""
tokenizer.cls_token, tokenizer.cls_token_id
"""There is also a special token for padding:"""
tokenizer.pad_token, tokenizer.pad_token_id
"""BERT understands tokens that were in the training set. Everything else can be encoded using the `[UNK]` (unknown) token:"""
tokenizer.unk_token, tokenizer.unk_token_id
"""All of that work can be done using the [`encode_plus()`](https://huggingface.co/transformers/main_classes/tokenizer.html#transformers.PreTrainedTokenizer.encode_plus) method:"""
# Our method converts our input natural text into a form that pytorch will understand,
# In this case, a tensor.
encoding = tokenizer.encode_plus(
sample_txt,
max_length=32,
add_special_tokens=True, # Add '[CLS]' and '[SEP]'
return_token_type_ids=False,
pad_to_max_length=True,
return_attention_mask=True,
return_tensors='pt', # Return PyTorch tensors
)
# A look at what is contained in our encoding.
encoding.keys()
"""The token ids are now stored in a Tensor and padded to a length of 32:"""
print(len(encoding['input_ids'][0]))
encoding['input_ids'][0]
"""The attention mask has the same length:"""
print(len(encoding['attention_mask'][0]))
encoding['attention_mask']
"""We can inverse the tokenization to have a look at the special tokens:"""
tokenizer.convert_ids_to_tokens(encoding['input_ids'][0])
"""### Choosing Sequence Length
BERT works with fixed-length sequences. We'll use a simple strategy to choose the max length. Let's store the token length of each review:
"""
token_lens = []
for txt in df.content:
tokens = tokenizer.encode(txt, max_length=512)
token_lens.append(len(tokens))
"""and plot the distribution:"""
# We realize that most of our sentences have a token length below 170
sns.distplot(token_lens)
plt.xlim([0, 256]);
plt.xlabel('Token count');
"""Most of the reviews seem to contain less than 128 tokens, but we'll be on the safe side and choose a maximum length of 160."""
# We will therefore set our maximum token length to 160
MAX_LEN = 160
"""We have all building blocks required to create a PyTorch dataset. Let's do it:"""
class GPReviewDataset(Dataset):
def __init__(self, reviews, targets, tokenizer, max_len):
self.reviews = reviews
self.targets = targets
self.tokenizer = tokenizer
self.max_len = max_len
def __len__(self):
return len(self.reviews)
def __getitem__(self, item):
review = str(self.reviews[item])
target = self.targets[item]
encoding = self.tokenizer.encode_plus(
review,
add_special_tokens=True,
max_length=self.max_len,
return_token_type_ids=False,
pad_to_max_length=True,
return_attention_mask=True,
return_tensors='pt',
)
return {
'review_text': review,
'input_ids': encoding['input_ids'].flatten(),
'attention_mask': encoding['attention_mask'].flatten(),
'targets': torch.tensor(target, dtype=torch.long)
}
"""The tokenizer does most of the heavy lifting for us. We also return the review texts, so it'll be easier to evaluate the predictions from our model. From there we split our data into test, train and validation sets."""
df_train, df_test = train_test_split(df, test_size=0.1, random_state=RANDOM_SEED)
df_val, df_test = train_test_split(df_test, test_size=0.5, random_state=RANDOM_SEED)
df_train.shape, df_val.shape, df_test.shape
"""We also need to create a couple of data loaders. Here's a helper function to do it. Our data loader will take our data set, divide them into batches and tokenize them. It will return a format that will be easier for us to handle."""
def create_data_loader(df, tokenizer, max_len, batch_size):
ds = GPReviewDataset(
reviews=df.content.to_numpy(),
targets=df.sentiment.to_numpy(),
tokenizer=tokenizer,
max_len=max_len
)
return DataLoader(
ds,
batch_size=batch_size,
num_workers=2
)
BATCH_SIZE = 16
train_data_loader = create_data_loader(df_train, tokenizer, MAX_LEN, BATCH_SIZE)
val_data_loader = create_data_loader(df_val, tokenizer, MAX_LEN, BATCH_SIZE)
test_data_loader = create_data_loader(df_test, tokenizer, MAX_LEN, BATCH_SIZE)
"""Let's have a look at an example batch from our training data loader:"""
data = next(iter(train_data_loader))
data.keys()
print(data['input_ids'].shape)
print(data['attention_mask'].shape)
print(data['targets'].shape)
"""## Sentiment Classification with BERT and Hugging Face
There are a lot of helpers that make using BERT easy with the Transformers library. Depending on the task you might want to use BertForSequenceClassification or BertForQuestionAnswering. For our use case we'll use the basic BERT model and build our sentiment classifier on top of it using transfer learning.
"""
bert_model = BertModel.from_pretrained(PRE_TRAINED_MODEL_NAME)
"""And try to use it on the encoding of our sample text:"""
last_hidden_state, pooled_output = bert_model(
input_ids=encoding['input_ids'],
attention_mask=encoding['attention_mask'],
return_dict = False # this is needed to get a tensor as result, instead of a dict of str
)
"""The `last_hidden_state` is a sequence of hidden states of the last layer of the model. Obtaining the `pooled_output` is done by applying the BertPooler on `last_hidden_state`:"""
last_hidden_state.shape
"""We have the hidden state for each of our 32 tokens (the length of our example sequence). But why 768? This is the number of hidden units in the feedforward-networks. We can verify that by checking the config:"""
bert_model.config.hidden_size
"""
You can think of the `pooled_output` as a summary of the content, according to BERT. Albeit, you might try and do better. Let's look at the shape of the output:"""
pooled_output.shape
"""
We will then create a classifier that uses the BERT model:"""
class SentimentClassifier(nn.Module):
def __init__(self, n_classes):
super(SentimentClassifier, self).__init__()
self.bert = BertModel.from_pretrained(PRE_TRAINED_MODEL_NAME, return_dict=False)
self.drop = nn.Dropout(p=0.3)
self.out = nn.Linear(self.bert.config.hidden_size, n_classes)
def forward(self, input_ids, attention_mask):
_, pooled_output = self.bert(
input_ids=input_ids,
attention_mask=attention_mask
)
output = self.drop(pooled_output)
return self.out(output)
"""Our classifier delegates most of the heavy lifting to the BertModel. We use a dropout layer for some regularization and a fully-connected layer for our output. Note that we're returning the raw output of the last layer since that is required for the cross-entropy loss function in PyTorch to work.
We create an instance and delegate it to the GPU
"""
model = SentimentClassifier(len(class_names))
model = model.to(device)
"""We'll move the example batch of our training data to the GPU:"""
input_ids = data['input_ids'].to(device)
attention_mask = data['attention_mask'].to(device)
print(input_ids.shape) # batch size x seq length
print(attention_mask.shape) # batch size x seq length
"""To get the predicted probabilities from our trained model, we'll apply the softmax function to the outputs:"""
# Apply softmax to our output.
F.softmax(model(input_ids, attention_mask), dim=1)
"""### Training
To reproduce the training procedure from the BERT paper, we'll use the AdamW optimizer provided by Hugging Face. It corrects weight decay, so it's similar to the original paper. We will then use 4 epochs are recommended by the paper.We'll also use a linear scheduler with no warmup steps to train our model.
"""
EPOCHS = 4
optimizer = AdamW(model.parameters(), lr=2e-5, correct_bias=False)
total_steps = len(train_data_loader) * EPOCHS
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=0,
num_training_steps=total_steps
)
loss_fn = nn.CrossEntropyLoss().to(device)
"""How do we come up with all hyperparameters? The BERT authors have some recommendations for fine-tuning:
- Batch size: 16, 32
- Learning rate (Adam): 5e-5, 3e-5, 2e-5
- Number of epochs: 2, 3, 4
For the purpose of our exercise, we will stick with the recommendation.
Note that increasing the batch size reduces the training time significantly, but gives you lower accuracy.
We will then have a helper function for training our model for one epoch.
"""
def train_epoch(
model,
data_loader,
loss_fn,
optimizer,
device,
scheduler,
n_examples
):
model = model.train()
losses = []
correct_predictions = 0
for d in data_loader:
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
targets = d["targets"].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask
)
_, preds = torch.max(outputs, dim=1)
loss = loss_fn(outputs, targets)
correct_predictions += torch.sum(preds == targets)
losses.append(loss.item())
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
return correct_predictions.double() / n_examples, np.mean(losses)
"""Training the model has some interesting paramters.
The scheduler gets called every time a batch is fed to the model. We're avoiding exploding gradients by clipping the gradients of the model by clipping the gradient when neccesary.
We will also have another helper that will help us evaluate our model based on
our data loader.
"""
def eval_model(model, data_loader, loss_fn, device, n_examples):
model = model.eval()
losses = []
correct_predictions = 0
with torch.no_grad():
for d in data_loader:
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
targets = d["targets"].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask
)
_, preds = torch.max(outputs, dim=1)
loss = loss_fn(outputs, targets)
correct_predictions += torch.sum(preds == targets)
losses.append(loss.item())
return correct_predictions.double() / n_examples, np.mean(losses)
"""Using those two, we can then write our training loop. We'll also store the training history:"""
# Commented out IPython magic to ensure Python compatibility.
# # We'll keep track of the time.
# # This will probably take a while.
# %%time
#
# history = defaultdict(list)
# best_accuracy = 0
#
# for epoch in range(EPOCHS):
#
# print(f'Epoch {epoch + 1}/{EPOCHS}')
# print('-' * 10)
#
# train_acc, train_loss = train_epoch(
# model,
# train_data_loader,
# loss_fn,
# optimizer,
# device,
# scheduler,
# len(df_train)
# )
#
# print(f'Train loss {train_loss} accuracy {train_acc}')
#
# val_acc, val_loss = eval_model(
# model,
# val_data_loader,
# loss_fn,
# device,
# len(df_val)
# )
#
# print(f'Val loss {val_loss} accuracy {val_acc}')
# print()
#
# history['train_acc'].append(train_acc)
# history['train_loss'].append(train_loss)
# history['val_acc'].append(val_acc)
# history['val_loss'].append(val_loss)
#
# # We will save the best state of our model as a binary file
# if val_acc > best_accuracy:
# torch.save(model.state_dict(), 'best_model_state.bin')
# best_accuracy = val_acc
"""After training, we will store the state of the best model, indicated by the highest validation accuracy.
We can retrieve this later and perform our prediction and classification instead of going through the whole training process again.
### Evaluating Our Model
It took a while to train our model. More epochs will definitely be better, if we spend more time training our model.
We can look at the training vs validation accuracy:
"""
plt.plot(history['train_acc'], label='train accuracy')
plt.plot(history['val_acc'], label='validation accuracy')
plt.title('Training history')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend()
plt.ylim([0, 1]);
"""We are guessing the training accuracy starts to approach 100% after 10 epochs or so. We could try to fine-tune the parameters a bit more, but this will be good enough for us."""
model = SentimentClassifier(len(class_names))
model.load_state_dict(torch.load('./best_model_state.bin'))
model = model.to(device)
"""## Evaluation
So how good is our model on predicting sentiment? Let's start by calculating the accuracy on the test data:
"""
test_acc, _ = eval_model(
model,
test_data_loader,
loss_fn,
device,
len(df_test)
)
test_acc.item()
"""The accuracy is about 2% higher on the test set. Our model seems to generalize well.
We'll define a helper function to get the predictions from our model.
"""
def get_predictions(model, data_loader):
model = model.eval()
review_texts = []
predictions = []
prediction_probs = []
real_values = []
with torch.no_grad():
for d in data_loader:
texts = d["review_text"]
input_ids = d["input_ids"].to(device)
attention_mask = d["attention_mask"].to(device)
targets = d["targets"].to(device)
outputs = model(
input_ids=input_ids,
attention_mask=attention_mask
)
_, preds = torch.max(outputs, dim=1)
probs = F.softmax(outputs, dim=1)
review_texts.extend(texts)
predictions.extend(preds)
prediction_probs.extend(probs)
real_values.extend(targets)
predictions = torch.stack(predictions).cpu()
prediction_probs = torch.stack(prediction_probs).cpu()
real_values = torch.stack(real_values).cpu()
return review_texts, predictions, prediction_probs, real_values
"""This is similar to the evaluation function, except that we're storing the text of the reviews and the predicted probabilities (by applying the softmax on the model outputs):"""
y_review_texts, y_pred, y_pred_probs, y_test = get_predictions(
model,
test_data_loader
)
"""Let's have a look at the classification report"""
print(classification_report(y_test, y_pred, target_names=class_names))
"""Looks like it is really hard to classify neutral (3 stars) reviews. And I can tell you from experience, looking at many reviews, those are hard to classify.
We'll continue with the confusion matrix:
"""
def show_confusion_matrix(confusion_matrix):
hmap = sns.heatmap(confusion_matrix, annot=True, fmt="d", cmap="Blues")
hmap.yaxis.set_ticklabels(hmap.yaxis.get_ticklabels(), rotation=0, ha='right')
hmap.xaxis.set_ticklabels(hmap.xaxis.get_ticklabels(), rotation=30, ha='right')
plt.ylabel('True sentiment')
plt.xlabel('Predicted sentiment');
cm = confusion_matrix(y_test, y_pred)
df_cm = pd.DataFrame(cm, index=class_names, columns=class_names)
show_confusion_matrix(df_cm)
"""This confirms that our model is having difficulty classifying neutral reviews. It mistakes those for negative and positive at a roughly equal frequency.
That's a good overview of the performance of our model. But let's have a look at an example from our test data.
"""
idx = 6
review_text = y_review_texts[idx]
true_sentiment = y_test[idx]
pred_df = pd.DataFrame({
'class_names': class_names,
'values': y_pred_probs[idx]
})
print("\n".join(wrap(review_text)))
print()
print(f'True sentiment: {class_names[true_sentiment]}')
"""Now we can look at the confidence of each sentiment of our model:"""
sns.barplot(x='values', y='class_names', data=pred_df, orient='h')
plt.ylabel('sentiment')
plt.xlabel('probability')
plt.xlim([0, 1]);
"""### Predicting on Raw Text
Let's use our model to predict the sentiment of some raw text:
"""
review_text = "I couldn't figure this out. Worst app ever!"
"""We have to use the tokenizer to encode the text:"""
encoded_review = tokenizer.encode_plus(
review_text,
max_length=MAX_LEN,
add_special_tokens=True,
return_token_type_ids=False,
pad_to_max_length=True,
return_attention_mask=True,
return_tensors='pt',
)
"""Let's get the predictions from our model:"""
input_ids = encoded_review['input_ids'].to(device)
attention_mask = encoded_review['attention_mask'].to(device)
output = model(input_ids, attention_mask)
_, prediction = torch.max(output, dim=1)
print(f'Review text: {review_text}')
print(f'Sentiment : {class_names[prediction]}')
"""## Conclusion
In this exercise, we used BERT for sentiment analysis. We built a custom classifier using the Hugging Face library and trained it on our app reviews dataset, and validated our model with the validation set. We achieved quite a high level of accuracy, with our model generalizing well.
Our attention mask helped us protect the context of our embeddings, overall resulting in a much accurate understanding of what we saying by our model.
In conclusion, we did a lot less work than if we had to implement this from scratch.
## References
- [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805)
- [L11 Language Models - Alec Radford (OpenAI)](https://www.youtube.com/watch?v=BnpB3GrpsfM)
- [The Illustrated BERT, ELMo, and co.](https://jalammar.github.io/illustrated-bert/)
- [BERT Fine-Tuning Tutorial with PyTorch](https://mccormickml.com/2019/07/22/BERT-fine-tuning/)
- [How to Fine-Tune BERT for Text Classification?](https://arxiv.org/pdf/1905.05583.pdf)
- [Huggingface Transformers](https://huggingface.co/transformers/)
- [BERT Explained: State of the art language model for NLP](https://towardsdatascience.com/bert-explained-state-of-the-art-language-model-for-nlp-f8b21a9b6270)
- [Read the `Getting Things Done with Pytorch`](https://github.com/curiousily/Getting-Things-Done-with-Pytorch)
""" | [] |
2024-01-10 | rjvgupta/selfactualize-ai-agent | agent~lambda~agent-handler~fsi_agent.py | from langchain.agents.tools import Tool
from langchain.agents.conversational.base import ConversationalAgent
from langchain.agents import AgentExecutor
from tools import tools
from datetime import datetime
PREFIX = "\n\nHuman: You are a Financial Services AI chatbot (Assistant) for a company called Octank Financial. Also, you can answer general questions about anything. You quickly respond to questions from a user with an answer and the sources you used to find your answer in the format: \
[Source 1: Source Title 1 - Source Link 1], \
[Source 2: Source Title 2 - Source Link 2], \
[Source n: Source Title n - Source Link n]. Provide two newline characters between your answer and the sources. By the way, the date is " + datetime.now().strftime("%m/%d/%Y, %H:%M:%S") + ".\n\nAssistant:"
FORMAT_INSTRUCTIONS = "\n\nHuman: \n\nAssistant:"
class FSIAgent():
def __init__(self,llm, memory) -> None:
self.prefix = PREFIX
self.ai_prefix = "Assistant"
self.human_prefix = "Human"
self.llm = llm
self.memory = memory
self.format_instructions = FORMAT_INSTRUCTIONS
self.agent = self.create_agent()
def create_agent(self):
fsi_agent = ConversationalAgent.from_llm_and_tools(
llm = self.llm,
tools = tools,
prefix = self.prefix,
ai_prefix = self.ai_prefix,
human_prefix = self.human_prefix,
format_instructions = self.format_instructions,
return_intermediate_steps = True,
return_source_documents = True
)
agent_executor = AgentExecutor.from_agent_and_tools(agent=fsi_agent, tools=tools, verbose=True, memory=self.memory, return_source_documents=True, return_intermediate_steps=True) # , handle_parsing_errors=True
return agent_executor
def run(self, input):
print("Running FSI Agent with input: " + str(input))
try:
response = self.agent(input)
except ValueError as e:
response = str(e)
if not response.startswith("An output parsing error occurred"):
raise e
response = response.removeprefix("An output parsing error occurred. In order to pass this error back to the agent and have it try again, pass `handle_parsing_errors=True` to the AgentExecutor. This is the error: Could not parse LLM output: `").removesuffix("`")
return response
| [] |
2024-01-10 | whwhwana/alldata | mlops~modelscope~modelscope~models~cv~image_probing_model~backbone.py | # The implementation is adopted from OpenAI-CLIP,
# made pubicly available under the MIT License at https://github.com/openai/CLIP
import math
import sys
from collections import OrderedDict
from functools import reduce
from operator import mul
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
from torchvision import models
from .utils import convert_weights, load_pretrained
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed
# after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool,
# and the subsequent convolution has stride 1
self.downsample = nn.Sequential(
OrderedDict([('-1', nn.AvgPool2d(stride)),
('0',
nn.Conv2d(
inplanes,
planes * self.expansion,
1,
stride=1,
bias=False)),
('1', nn.BatchNorm2d(planes * self.expansion))]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self,
spacial_dim: int,
embed_dim: int,
num_heads: int,
output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(
torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1],
x.shape[2] * x.shape[3]).permute(2, 0, 1)
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0)
x = x + self.positional_embedding[:, None, :].to(x.dtype)
x, _ = F.multi_head_attention_forward(
query=x,
key=x,
value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat(
[self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False)
return x[0]
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self,
d_model: int,
n_head: int,
attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(
OrderedDict([('c_fc', nn.Linear(d_model, d_model * 4)),
('gelu', QuickGELU()),
('c_proj', nn.Linear(d_model * 4, d_model))]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(
dtype=x.dtype,
device=x.device) if self.attn_mask is not None else None
return self.attn(
x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor, idx):
features = {}
x_norm = self.ln_1(x)
features['layer_{}_pre_attn'.format(idx)] = x_norm.permute(1, 0, 2)
attn = self.attention(x_norm)
features['layer_{}_attn'.format(idx)] = attn.permute(1, 0, 2)
x = x + attn
mlp = self.mlp(self.ln_2(x))
features['layer_{}_mlp'.format(idx)] = mlp.permute(1, 0, 2)
x = x + mlp
return x, features
class Transformer(nn.Module):
def __init__(self,
width: int,
layers: int,
heads: int,
attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.ModuleList()
for i in range(layers):
block = ResidualAttentionBlock(width, heads, attn_mask)
self.resblocks.append(block)
def forward(self, x: torch.Tensor):
features = {}
for idx, block in enumerate(self.resblocks):
x, block_feats = block(x, idx)
features.update(block_feats)
return x, features
class VisualTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int,
layers: int, heads: int, output_dim: int):
super().__init__()
print(input_resolution, patch_size, width, layers, heads, output_dim)
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(
in_channels=3,
out_channels=width,
kernel_size=patch_size,
stride=patch_size,
bias=False)
scale = width**-0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn(
(input_resolution // patch_size)**2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor, return_all=True):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1],
-1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
zeros = torch.zeros(
x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device)
# shape = [*, grid ** 2 + 1, width]
x = torch.cat([self.class_embedding.to(x.dtype) + zeros, x], dim=1)
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x, features = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if return_all:
features['pre_logits'] = x
return features
if self.proj is not None:
x = x @ self.proj
return x
class CLIPNet(nn.Module):
def __init__(self, arch_name, pretrained, **kwargs):
super(CLIPNet, self).__init__()
if arch_name == 'CLIP_ViTB32':
self.clip = VisualTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512)
elif arch_name in ('CLIP_ViTB16', 'CLIP_ViTB16_FP16'):
self.clip = VisualTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512)
elif arch_name in ('CLIP_ViTL14', 'CLIP_ViTL14_FP16'):
self.clip = VisualTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768)
else:
raise KeyError(f'Unsupported arch_name for CLIP, {arch_name}')
def forward(self, input_data):
output = self.clip(input_data)
return output
def CLIP(arch_name='CLIP_RN50',
use_pretrain=False,
load_from='',
state_dict=None,
**kwargs):
model = CLIPNet(arch_name=arch_name, pretrained=None, **kwargs)
if use_pretrain:
if arch_name.endswith('FP16'):
convert_weights(model.clip)
load_pretrained(model.clip, state_dict, load_from)
return model
class ProbingModel(torch.nn.Module):
def __init__(self, feat_size, num_classes):
super(ProbingModel, self).__init__()
self.linear = torch.nn.Linear(feat_size, num_classes)
def forward(self, x):
return self.linear(x)
| [] |
2024-01-10 | tooniez/ml-search | haystack~nodes~retriever~_embedding_encoder.py | import json
import logging
from abc import abstractmethod
from pathlib import Path
from typing import Optional, TYPE_CHECKING, Any, Callable, Dict, List, Union
import numpy as np
import requests
import torch
from sentence_transformers import InputExample
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SequentialSampler
from tqdm.auto import tqdm
from transformers import AutoModel, AutoTokenizer
from haystack.document_stores.base import BaseDocumentStore
from haystack.errors import OpenAIError, OpenAIRateLimitError, CohereError
from haystack.modeling.data_handler.dataloader import NamedDataLoader
from haystack.modeling.data_handler.dataset import convert_features_to_dataset, flatten_rename
from haystack.modeling.infer import Inferencer
from haystack.nodes.retriever._losses import _TRAINING_LOSSES
from haystack.schema import Document
from haystack.utils.reflection import retry_with_exponential_backoff
if TYPE_CHECKING:
from haystack.nodes.retriever import EmbeddingRetriever
logger = logging.getLogger(__name__)
class _BaseEmbeddingEncoder:
@abstractmethod
def embed_queries(self, queries: List[str]) -> np.ndarray:
"""
Create embeddings for a list of queries.
:param queries: List of queries to embed.
:return: Embeddings, one per input query, shape: (queries, embedding_dim)
"""
pass
@abstractmethod
def embed_documents(self, docs: List[Document]) -> np.ndarray:
"""
Create embeddings for a list of documents.
:param docs: List of documents to embed.
:return: Embeddings, one per input document, shape: (documents, embedding_dim)
"""
pass
def train(
self,
training_data: List[Dict[str, Any]],
learning_rate: float = 2e-5,
n_epochs: int = 1,
num_warmup_steps: Optional[int] = None,
batch_size: int = 16,
):
"""
Trains or adapts the underlying embedding model.
Each training data example is a dictionary with the following keys:
* question: The question string.
* pos_doc: Positive document string (the document containing the answer).
* neg_doc: Negative document string (the document that doesn't contain the answer).
* score: The score margin the answer must fall within.
:param training_data: The training data in a dictionary format. Required.
:type training_data: List[Dict[str, Any]]
:param learning_rate: The speed at which the model learns. Required. We recommend that you leave the default `2e-5` value.
:type learning_rate: float
:param n_epochs: The number of epochs (complete passes of the training data through the algorithm) that you want the model to go through. Required.
:type n_epochs: int
:param num_warmup_steps: The number of warmup steps for the model. Warmup steps are epochs when the learning rate is very low. You can use them at the beginning of the training to prevent early overfitting of your model. Required.
:type num_warmup_steps: int
:param batch_size: The batch size to use for the training. Optional. The default values is 16.
:type batch_size: int (optional)
"""
pass
def save(self, save_dir: Union[Path, str]):
"""
Save the model to the directory you specify.
:param save_dir: The directory where the model is saved. Required.
:type save_dir: Union[Path, str]
"""
pass
def _check_docstore_similarity_function(self, document_store: BaseDocumentStore, model_name: str):
"""
Check that document_store uses a similarity function
compatible with the embedding model
"""
if "sentence-transformers" in model_name.lower():
model_similarity = None
if "-cos-" in model_name.lower():
model_similarity = "cosine"
elif "-dot-" in model_name.lower():
model_similarity = "dot_product"
if model_similarity is not None and document_store.similarity != model_similarity:
logger.warning(
f"You seem to be using {model_name} model with the {document_store.similarity} function instead of the recommended {model_similarity}. "
f"This can be set when initializing the DocumentStore"
)
elif "dpr" in model_name.lower() and document_store.similarity != "dot_product":
logger.warning(
f"You seem to be using a DPR model with the {document_store.similarity} function. "
f"We recommend using dot_product instead. "
f"This can be set when initializing the DocumentStore"
)
class _DefaultEmbeddingEncoder(_BaseEmbeddingEncoder):
def __init__(self, retriever: "EmbeddingRetriever"):
self.embedding_model = Inferencer.load(
retriever.embedding_model,
revision=retriever.model_version,
task_type="embeddings",
extraction_strategy=retriever.pooling_strategy,
extraction_layer=retriever.emb_extraction_layer,
gpu=retriever.use_gpu,
batch_size=retriever.batch_size,
max_seq_len=retriever.max_seq_len,
num_processes=0,
use_auth_token=retriever.use_auth_token,
)
if retriever.document_store:
self._check_docstore_similarity_function(
document_store=retriever.document_store, model_name=retriever.embedding_model
)
def embed(self, texts: Union[List[List[str]], List[str], str]) -> np.ndarray:
# TODO: FARM's `sample_to_features_text` need to fix following warning -
# tokenization_utils.py:460: FutureWarning: `is_pretokenized` is deprecated and will be removed in a future version, use `is_split_into_words` instead.
emb = self.embedding_model.inference_from_dicts(dicts=[{"text": t} for t in texts])
emb = np.stack([r["vec"] for r in emb])
return emb
def embed_queries(self, queries: List[str]) -> np.ndarray:
"""
Create embeddings for a list of queries.
:param queries: List of queries to embed.
:return: Embeddings, one per input query, shape: (queries, embedding_dim)
"""
return self.embed(queries)
def embed_documents(self, docs: List[Document]) -> np.ndarray:
"""
Create embeddings for a list of documents.
:param docs: List of documents to embed.
:return: Embeddings, one per input document, shape: (documents, embedding_dim)
"""
passages = [d.content for d in docs]
return self.embed(passages)
def train(
self,
training_data: List[Dict[str, Any]],
learning_rate: float = 2e-5,
n_epochs: int = 1,
num_warmup_steps: Optional[int] = None,
batch_size: int = 16,
):
raise NotImplementedError(
"You can't train this retriever. You can only use the `train` method with sentence-transformers EmbeddingRetrievers."
)
def save(self, save_dir: Union[Path, str]):
raise NotImplementedError(
"You can't save your record as `save` only works for sentence-transformers EmbeddingRetrievers."
)
class _SentenceTransformersEmbeddingEncoder(_BaseEmbeddingEncoder):
def __init__(self, retriever: "EmbeddingRetriever"):
# pretrained embedding models coming from: https://github.com/UKPLab/sentence-transformers#pretrained-models
# e.g. 'roberta-base-nli-stsb-mean-tokens'
try:
from sentence_transformers import SentenceTransformer
except (ImportError, ModuleNotFoundError) as ie:
from haystack.utils.import_utils import _optional_component_not_installed
_optional_component_not_installed(__name__, "sentence", ie)
self.embedding_model = SentenceTransformer(
retriever.embedding_model, device=str(retriever.devices[0]), use_auth_token=retriever.use_auth_token
)
self.batch_size = retriever.batch_size
self.embedding_model.max_seq_length = retriever.max_seq_len
self.show_progress_bar = retriever.progress_bar
if retriever.document_store:
self._check_docstore_similarity_function(
document_store=retriever.document_store, model_name=retriever.embedding_model
)
def embed(self, texts: Union[List[str], str]) -> np.ndarray:
# texts can be a list of strings
# get back list of numpy embedding vectors
emb = self.embedding_model.encode(
texts, batch_size=self.batch_size, show_progress_bar=self.show_progress_bar, convert_to_numpy=True
)
return emb
def embed_queries(self, queries: List[str]) -> np.ndarray:
"""
Create embeddings for a list of queries.
:param queries: List of queries to embed.
:return: Embeddings, one per input query, shape: (queries, embedding_dim)
"""
return self.embed(queries)
def embed_documents(self, docs: List[Document]) -> np.ndarray:
"""
Create embeddings for a list of documents.
:param docs: List of documents to embed.
:return: Embeddings, one per input document, shape: (documents, embedding_dim)
"""
passages = [d.content for d in docs]
return self.embed(passages)
def train(
self,
training_data: List[Dict[str, Any]],
learning_rate: float = 2e-5,
n_epochs: int = 1,
num_warmup_steps: Optional[int] = None,
batch_size: int = 16,
train_loss: str = "mnrl",
):
if train_loss not in _TRAINING_LOSSES:
raise ValueError(f"Unrecognized train_loss {train_loss}. Should be one of: {_TRAINING_LOSSES.keys()}")
st_loss = _TRAINING_LOSSES[train_loss]
train_examples = []
for train_i in training_data:
missing_attrs = st_loss.required_attrs.difference(set(train_i.keys()))
if len(missing_attrs) > 0:
raise ValueError(
f"Some training examples don't contain the fields {missing_attrs} which are necessary when using the '{train_loss}' loss."
)
texts = [train_i["question"], train_i["pos_doc"]]
if "neg_doc" in train_i:
texts.append(train_i["neg_doc"])
if "score" in train_i:
train_examples.append(InputExample(texts=texts, label=train_i["score"]))
else:
train_examples.append(InputExample(texts=texts))
logger.info("Training/adapting %s with %s examples", self.embedding_model, len(train_examples))
train_dataloader = DataLoader(train_examples, batch_size=batch_size, drop_last=True, shuffle=True)
train_loss = st_loss.loss(self.embedding_model)
# Tune the model
self.embedding_model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=n_epochs,
optimizer_params={"lr": learning_rate},
warmup_steps=int(len(train_dataloader) * 0.1) if num_warmup_steps is None else num_warmup_steps,
)
def save(self, save_dir: Union[Path, str]):
self.embedding_model.save(path=str(save_dir))
class _RetribertEmbeddingEncoder(_BaseEmbeddingEncoder):
def __init__(self, retriever: "EmbeddingRetriever"):
self.progress_bar = retriever.progress_bar
self.batch_size = retriever.batch_size
self.max_length = retriever.max_seq_len
self.embedding_tokenizer = AutoTokenizer.from_pretrained(
retriever.embedding_model, use_auth_token=retriever.use_auth_token
)
self.embedding_model = AutoModel.from_pretrained(
retriever.embedding_model, use_auth_token=retriever.use_auth_token
).to(str(retriever.devices[0]))
def embed_queries(self, queries: List[str]) -> np.ndarray:
"""
Create embeddings for a list of queries.
:param queries: List of queries to embed.
:return: Embeddings, one per input query, shape: (queries, embedding_dim)
"""
query_text = [{"text": q} for q in queries]
dataloader = self._create_dataloader(query_text)
embeddings: List[np.ndarray] = []
disable_tqdm = True if len(dataloader) == 1 else not self.progress_bar
for i, batch in enumerate(tqdm(dataloader, desc=f"Creating Embeddings", unit=" Batches", disable=disable_tqdm)):
batch = {key: batch[key].to(self.embedding_model.device) for key in batch}
with torch.no_grad():
q_reps = (
self.embedding_model.embed_questions(
input_ids=batch["input_ids"], attention_mask=batch["padding_mask"]
)
.cpu()
.numpy()
)
embeddings.append(q_reps)
return np.concatenate(embeddings)
def embed_documents(self, docs: List[Document]) -> np.ndarray:
"""
Create embeddings for a list of documents.
:param docs: List of documents to embed.
:return: Embeddings, one per input document, shape: (documents, embedding_dim)
"""
doc_text = [{"text": d.content} for d in docs]
dataloader = self._create_dataloader(doc_text)
embeddings: List[np.ndarray] = []
disable_tqdm = True if len(dataloader) == 1 else not self.progress_bar
for i, batch in enumerate(tqdm(dataloader, desc=f"Creating Embeddings", unit=" Batches", disable=disable_tqdm)):
batch = {key: batch[key].to(self.embedding_model.device) for key in batch}
with torch.no_grad():
q_reps = (
self.embedding_model.embed_answers(
input_ids=batch["input_ids"], attention_mask=batch["padding_mask"]
)
.cpu()
.numpy()
)
embeddings.append(q_reps)
return np.concatenate(embeddings)
def _create_dataloader(self, text_to_encode: List[dict]) -> NamedDataLoader:
dataset, tensor_names = self.dataset_from_dicts(text_to_encode)
dataloader = NamedDataLoader(
dataset=dataset, sampler=SequentialSampler(dataset), batch_size=self.batch_size, tensor_names=tensor_names
)
return dataloader
def dataset_from_dicts(self, dicts: List[dict]):
texts = [x["text"] for x in dicts]
tokenized_batch = self.embedding_tokenizer(
texts,
return_token_type_ids=True,
return_attention_mask=True,
max_length=self.max_length,
truncation=True,
padding=True,
)
features_flat = flatten_rename(
tokenized_batch,
["input_ids", "token_type_ids", "attention_mask"],
["input_ids", "segment_ids", "padding_mask"],
)
dataset, tensornames = convert_features_to_dataset(features=features_flat)
return dataset, tensornames
def train(
self,
training_data: List[Dict[str, Any]],
learning_rate: float = 2e-5,
n_epochs: int = 1,
num_warmup_steps: Optional[int] = None,
batch_size: int = 16,
):
raise NotImplementedError(
"You can't train this retriever. You can only use the `train` method with sentence-transformers EmbeddingRetrievers."
)
def save(self, save_dir: Union[Path, str]):
raise NotImplementedError(
"You can't save your record as `save` only works for sentence-transformers EmbeddingRetrievers."
)
class _OpenAIEmbeddingEncoder(_BaseEmbeddingEncoder):
def __init__(self, retriever: "EmbeddingRetriever"):
# See https://beta.openai.com/docs/guides/embeddings for more details
# OpenAI has a max seq length of 2048 tokens and unknown max batch size
self.max_seq_len = min(2048, retriever.max_seq_len)
self.url = "https://api.openai.com/v1/embeddings"
self.api_key = retriever.api_key
self.batch_size = min(64, retriever.batch_size)
self.progress_bar = retriever.progress_bar
model_class: str = next(
(m for m in ["ada", "babbage", "davinci", "curie"] if m in retriever.embedding_model), "babbage"
)
self.query_model_encoder_engine = f"text-search-{model_class}-query-001"
self.doc_model_encoder_engine = f"text-search-{model_class}-doc-001"
self.tokenizer = AutoTokenizer.from_pretrained("gpt2")
def _ensure_text_limit(self, text: str) -> str:
"""
Ensure that length of the text is within the maximum length of the model.
OpenAI embedding models have a limit of 2048 tokens
"""
tokenized_payload = self.tokenizer(text)
return self.tokenizer.decode(tokenized_payload["input_ids"][: self.max_seq_len])
@retry_with_exponential_backoff(backoff_in_seconds=10, max_retries=5)
def embed(self, model: str, text: List[str]) -> np.ndarray:
payload = {"model": model, "input": text}
headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
response = requests.request("POST", self.url, headers=headers, data=json.dumps(payload), timeout=30)
res = json.loads(response.text)
if response.status_code != 200:
openai_error: OpenAIError
if response.status_code == 429:
openai_error = OpenAIRateLimitError(f"API rate limit exceeded: {response.text}")
else:
openai_error = OpenAIError(
f"OpenAI returned an error.\n"
f"Status code: {response.status_code}\n"
f"Response body: {response.text}",
status_code=response.status_code,
)
raise openai_error
unordered_embeddings = [(ans["index"], ans["embedding"]) for ans in res["data"]]
ordered_embeddings = sorted(unordered_embeddings, key=lambda x: x[0])
generated_embeddings = [emb[1] for emb in ordered_embeddings]
return np.array(generated_embeddings)
def embed_batch(self, model: str, text: List[str]) -> np.ndarray:
all_embeddings = []
for i in tqdm(
range(0, len(text), self.batch_size), disable=not self.progress_bar, desc="Calculating embeddings"
):
batch = text[i : i + self.batch_size]
batch_limited = [self._ensure_text_limit(content) for content in batch]
generated_embeddings = self.embed(model, batch_limited)
all_embeddings.append(generated_embeddings)
return np.concatenate(all_embeddings)
def embed_queries(self, queries: List[str]) -> np.ndarray:
return self.embed_batch(self.query_model_encoder_engine, queries)
def embed_documents(self, docs: List[Document]) -> np.ndarray:
return self.embed_batch(self.doc_model_encoder_engine, [d.content for d in docs])
def train(
self,
training_data: List[Dict[str, Any]],
learning_rate: float = 2e-5,
n_epochs: int = 1,
num_warmup_steps: Optional[int] = None,
batch_size: int = 16,
):
raise NotImplementedError(f"Training is not implemented for {self.__class__}")
def save(self, save_dir: Union[Path, str]):
raise NotImplementedError(f"Saving is not implemented for {self.__class__}")
class _CohereEmbeddingEncoder(_BaseEmbeddingEncoder):
def __init__(self, retriever: "EmbeddingRetriever"):
# See https://docs.cohere.ai/embed-reference/ for more details
# Cohere has a max seq length of 4096 tokens and a max batch size of 16
self.max_seq_len = min(4096, retriever.max_seq_len)
self.url = "https://api.cohere.ai/embed"
self.api_key = retriever.api_key
self.batch_size = min(16, retriever.batch_size)
self.progress_bar = retriever.progress_bar
self.model: str = next((m for m in ["small", "medium", "large"] if m in retriever.embedding_model), "large")
self.tokenizer = AutoTokenizer.from_pretrained("gpt2")
def _ensure_text_limit(self, text: str) -> str:
"""
Ensure that length of the text is within the maximum length of the model.
Cohere embedding models have a limit of 4096 tokens
"""
tokenized_payload = self.tokenizer(text)
return self.tokenizer.decode(tokenized_payload["input_ids"][: self.max_seq_len])
@retry_with_exponential_backoff(backoff_in_seconds=10, max_retries=5, errors=(CohereError,))
def embed(self, model: str, text: List[str]) -> np.ndarray:
payload = {"model": model, "texts": text}
headers = {"Authorization": f"BEARER {self.api_key}", "Content-Type": "application/json"}
response = requests.request("POST", self.url, headers=headers, data=json.dumps(payload), timeout=30)
res = json.loads(response.text)
if response.status_code != 200:
raise CohereError(response.text, status_code=response.status_code)
generated_embeddings = [e for e in res["embeddings"]]
return np.array(generated_embeddings)
def embed_batch(self, text: List[str]) -> np.ndarray:
all_embeddings = []
for i in tqdm(
range(0, len(text), self.batch_size), disable=not self.progress_bar, desc="Calculating embeddings"
):
batch = text[i : i + self.batch_size]
batch_limited = [self._ensure_text_limit(content) for content in batch]
generated_embeddings = self.embed(self.model, batch_limited)
all_embeddings.append(generated_embeddings)
return np.concatenate(all_embeddings)
def embed_queries(self, queries: List[str]) -> np.ndarray:
return self.embed_batch(queries)
def embed_documents(self, docs: List[Document]) -> np.ndarray:
return self.embed_batch([d.content for d in docs])
def train(
self,
training_data: List[Dict[str, Any]],
learning_rate: float = 2e-5,
n_epochs: int = 1,
num_warmup_steps: Optional[int] = None,
batch_size: int = 16,
):
raise NotImplementedError(f"Training is not implemented for {self.__class__}")
def save(self, save_dir: Union[Path, str]):
raise NotImplementedError(f"Saving is not implemented for {self.__class__}")
_EMBEDDING_ENCODERS: Dict[str, Callable] = {
"farm": _DefaultEmbeddingEncoder,
"transformers": _DefaultEmbeddingEncoder,
"sentence_transformers": _SentenceTransformersEmbeddingEncoder,
"retribert": _RetribertEmbeddingEncoder,
"openai": _OpenAIEmbeddingEncoder,
"cohere": _CohereEmbeddingEncoder,
}
| [] |
2024-01-10 | alxschwrz/dalle2_python | dalle2_python.py | import os
import configparser
import sys
import webbrowser
import urllib.request
import openai
class Dalle:
def __init__(self, img_sz="512", n_images=2):
self._api_keys_location = "./config"
self._generated_image_location = "./output"
self._stream = True
self._img_sz = img_sz
self._n_images = n_images
self._image_urls = []
self._input_prompt = None
self._response = None
self.initialize_openai_api()
def create_template_ini_file(self):
"""
If the ini file does not exist create it and add the organization_id and
secret_key
"""
if not os.path.isfile(self._api_keys_location):
with open(self._api_keys_location, 'w') as f:
f.write('[openai]\n')
f.write('organization_id=\n')
f.write('secret_key=\n')
print('OpenAI API config file created at {}'.format(self._api_keys_location))
print('Please edit it and add your organization ID and secret key')
print('If you do not yet have an organization ID and secret key, you\n'
'need to register for OpenAI Codex: \n'
'https://openai.com/blog/openai-codex/')
sys.exit(1)
def initialize_openai_api(self):
"""
Initialize the OpenAI API
"""
# Check if file at API_KEYS_LOCATION exists
self.create_template_ini_file()
config = configparser.ConfigParser()
config.read(self._api_keys_location)
openai.organization_id = config['openai']['organization_id'].strip('"').strip("'")
openai.api_key = config['openai']['secret_key'].strip('"').strip("'")
del config
def read_from_command_line(self):
self._input_prompt = input("What image should dalle create: ")
def generate_image_from_prompt(self):
self._response = openai.Image.create(
prompt=self._input_prompt,
n=self._n_images,
size=f"{self._img_sz}x{self._img_sz}",
)
def get_urls_from_response(self):
for i in range(self._n_images):
self._image_urls.append(self._response['data'][i]['url'])
def open_urls_in_browser(self, image_urls=None):
if image_urls is None:
image_urls = self._image_urls
for url in image_urls:
webbrowser.open(url)
def save_urls_as_image(self):
if not os.path.isdir(self._generated_image_location):
os.mkdir(self._generated_image_location)
for idx, image_url in enumerate(self._image_urls):
file_name = f"{self._generated_image_location}/{self._input_prompt}_{idx}.png"
urllib.request.urlretrieve(image_url, file_name)
print(f"Generated image stored in: {file_name}")
def generate_and_save_images(self):
self.read_from_command_line()
self.generate_image_from_prompt()
self.get_urls_from_response()
self.save_urls_as_image()
commandLineDalle = Dalle()
commandLineDalle.generate_and_save_images()
commandLineDalle.open_urls_in_browser() | [] |
2024-01-10 | sarah-4-coder/Ai_prompt | FlaskWithData.py | from flask import Flask, request, send_file, jsonify
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import openai
import tempfile
import os
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
upload_dir = os.path.join(app.root_path, 'uploads.csv')
def plotter(data, plot_type, time_start, time_end, column_name):
req_data = data[(data['Year'] >= time_start) & (data['Year'] <= time_end)]
plt.figure(figsize=(8, 6))
if "point" in plot_type.lower():
sns.pointplot(x=req_data["Year"], y=req_data[column_name])
elif "bar" in plot_type.lower():
sns.barplot(x=req_data["Year"], y=req_data[column_name])
elif "pie" in plot_type.lower():
colors = sns.color_palette('pastel')[0:5]
plt.pie(req_data[column_name], labels=req_data["Year"], colors=colors)
plt.xlabel('Year')
plt.ylabel(column_name)
plt.title(f'{plot_type.capitalize()} of {column_name} ({time_start}-{time_end})')
plt.xticks(rotation=90)
plt.tight_layout()
temp_dir = tempfile.gettempdir()
temp_file = os.path.join(temp_dir, 'temp_figure.png')
plt.savefig(temp_file)
plt.close()
return temp_file
api_key = "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"
openai.api_key = api_key
def extract_categories(prompt_text):
prompt = "Given the following statement, identify the categories for column_name, time_start, time_end, and plot_type:\n\n"\
"\"" + prompt_text + '"'
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=50,
temperature=0.6,
stop=None
)
categories = response.choices[0].text.strip().split('\n')
column_name = categories[0][13:]
column_name = column_name.replace(" ", "")
time_start = int(categories[1][12:])
time_end = int(categories[2][10:])
plot_type = categories[3][11:]
plot_type = plot_type.lower()
plot_type = plot_type.replace(" ", "")
if 'plot' not in plot_type:
plot_type = plot_type + 'plot'
return column_name, time_start, time_end, plot_type
@app.route('/generate_plot', methods=['POST'])
def generate_plot():
try:
print(request.form)
request_data = request.form
prompt_text = request_data.get('prompt_text')
file = request.files['file']
if file and file.filename.endswith('.csv'):
file.save(upload_dir)
else:
return jsonify({'error': 'Invalid or missing CSV file'})
data = pd.read_csv('uploads.csv')
column_name, time_start, time_end, plot_type = extract_categories(prompt_text)
temp_file_path = plotter(data, plot_type, time_start, time_end, column_name)
return send_file(temp_file_path, mimetype='image/png')
except Exception as e:
print(e)
return jsonify({'error': str(e)})
if __name__ == '__main__':
app.run(debug=True)
| [
"Given the following statement, identify the categories for column_name, time_start, time_end, and plot_type:\n\n\"PLACEHOLDER\"",
"prompt_text"
] |
2024-01-10 | sarah-4-coder/Ai_prompt | 123.py | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import openai
data = pd.read_csv("Ai_prompt\data.csv")
def plotter(data, plot_type, time_start, time_end, column_name):
req_data = data[(data['Year'] >= time_start) & (data['Year'] <= time_end)]
if "point" in plot_type.lower():
sns.pointplot(x=req_data["Year"], y=req_data[column_name])
if "bar" in plot_type.lower():
sns.barplot(x=req_data["Year"], y=req_data[column_name])
if "pie" in plot_type.lower():
colors = sns.color_palette('pastel')[0:5]
plt.pie(req_data["Year"], labels=req_data[column_name], colors=colors)
plt.xlabel('Index')
plt.ylabel('Values')
plt.title(f'Bar Plot of {column_name}')
plt.xticks(rotation=90) # Rotate x-axis labels for better readability
plt.tight_layout()
plt.savefig('1.png')
plt.show()
# Set your OpenAI API key
api_key = "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"
# Initialize the OpenAI API client
openai.api_key = api_key
# Define the prompt
prompt = "Given the following statement, identify the categories for column_name, time_start, time_end, and plot_type:\n\n"\
"\"Prepare a bar plot for the column agriculture between the time period of 1985 and 1989 from the data.\""
# Call the OpenAI API to get the categories
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=50, # Adjust as needed to capture the required information
temperature=0.6,
stop=None
)
prompt1 = input("Enter prompt")
prompt_fin = "Given the following statement, identify the categories for column_name, time_start, time_end, and plot_type:\n\n" \
"\"" + prompt1 + '"'
# Call the OpenAI API to get the categories
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt_fin,
max_tokens=50, # Adjust as needed to capture the required information
temperature=0.6,
stop=None
)
categories = response.choices[0].text.strip().split('\n')
column_name = categories[0][13:]
column_name = column_name.replace(" ", "")
time_start = int(categories[1][12:])
time_end = int(categories[2][10:])
plot_type = categories[3][11:]
plot_type = plot_type.lower()
plot_type = plot_type.replace(" ", "")
if 'plot' not in plot_type:
plot_type = plot_type + 'plot'
plotter(data, plot_type, time_start, time_end, column_name) | [
"Given the following statement, identify the categories for column_name, time_start, time_end, and plot_type:\n\n\"PLACEHOLDER\"",
"Enter prompt",
"Given the following statement, identify the categories for column_name, time_start, time_end, and plot_type:\n\n\"Prepare a bar plot for the column agriculture between the time period of 1985 and 1989 from the data.\""
] |
2024-01-10 | sarah-4-coder/Ai_prompt | Flassk.py | from flask import Flask, request, send_file, jsonify
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import openai
import tempfile
import os
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
data = pd.read_csv("data.csv")
def plotter(data, plot_type, time_start, time_end, column_name):
req_data = data[(data['Year'] >= time_start) & (data['Year'] <= time_end)]
plt.figure(figsize=(8, 6))
if "point" in plot_type.lower():
sns.pointplot(x=req_data["Year"], y=req_data[column_name])
elif "bar" in plot_type.lower():
sns.barplot(x=req_data["Year"], y=req_data[column_name])
elif "pie" in plot_type.lower():
colors = sns.color_palette('pastel')[0:5]
plt.pie(req_data[column_name], labels=req_data["Year"], colors=colors)
plt.xlabel('Year')
plt.ylabel(column_name)
plt.title(f'{plot_type.capitalize()} of {column_name} ({time_start}-{time_end})')
plt.xticks(rotation=90)
plt.tight_layout()
temp_dir = tempfile.gettempdir()
temp_file = os.path.join(temp_dir, 'temp_figure.png')
plt.savefig(temp_file)
plt.close()
return temp_file
api_key = "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"
openai.api_key = api_key
def extract_categories(prompt_text):
prompt = "Given the following statement, identify the categories for column_name, time_start, time_end, and plot_type:\n\n"\
"\"" + prompt_text + '"'
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
max_tokens=50,
temperature=0.6,
stop=None
)
categories = response.choices[0].text.strip().split('\n')
column_name = categories[0][13:]
column_name = column_name.replace(" ", "")
time_start = int(categories[1][12:])
time_end = int(categories[2][10:])
plot_type = categories[3][11:]
plot_type = plot_type.lower()
plot_type = plot_type.replace(" ", "")
if 'plot' not in plot_type:
plot_type = plot_type + 'plot'
return column_name, time_start, time_end, plot_type
@app.route('/generate_plot', methods=['POST'])
def generate_plot():
try:
request_data = request.form
prompt_text = request_data.get('prompt_text')
column_name, time_start, time_end, plot_type = extract_categories(prompt_text)
temp_file_path = plotter(data, plot_type, time_start, time_end, column_name)
return send_file(temp_file_path, mimetype='image/png')
except Exception as e:
return jsonify({'error': str(e)})
if __name__ == '__main__':
app.run(debug=True)
| [
"Given the following statement, identify the categories for column_name, time_start, time_end, and plot_type:\n\n\"PLACEHOLDER\"",
"prompt_text"
] |
2024-01-10 | gurugithub/CXChat | datachad~backend~deeplake.py | from glob import glob
from typing import List
import deeplake
from deeplake.client.client import DeepLakeBackendClient
from deeplake.util.bugout_reporter import deeplake_reporter
from langchain.schema import Document
from langchain.vectorstores import DeepLake, VectorStore
from datachad.backend.constants import DATA_PATH, DEFAULT_USER, FORCE_LOCAL_DEEPLAKE
from datachad.backend.io import clean_string_for_storing
from datachad.backend.loader import load_data_source, split_docs
from datachad.backend.logging import logger
from datachad.backend.models import MODES, get_embeddings
from datachad.backend.utils import clean_string_for_storing
SPLIT = "_"
def list_deeplake_datasets(
org_id: str = "",
token: str = None,
) -> None:
"""List all available Deep Lake cloud datasets for a given user / orgnaization.
Removed from deeplake in: https://github.com/activeloopai/deeplake/pull/2182/files
"""
deeplake_reporter.feature_report(
feature_name="list",
parameters={"org_id": org_id},
)
def get_datasets(self, workspace: str):
LIST_DATASETS = "/api/datasets/{}"
suffix_public = LIST_DATASETS.format("public")
suffix_user = LIST_DATASETS.format("all")
if workspace:
res_datasets = self.get_workspace_datasets(
workspace, suffix_public, suffix_user
)
else:
public_datasets = self.request(
"GET",
suffix_public,
endpoint=self.endpoint(),
).json()
user_datasets = self.request(
"GET",
suffix_user,
endpoint=self.endpoint(),
).json()
res_datasets = public_datasets + user_datasets
return [ds["_id"] for ds in res_datasets]
client = DeepLakeBackendClient(token=token)
client.get_datasets = get_datasets
datasets = client.get_datasets(client, workspace=org_id)
return datasets
def get_deeplake_dataset_path(dataset_name: str, options: dict, credentials: dict):
# TODO add user id and dataset size as unique id
if options["mode"] == MODES.LOCAL or FORCE_LOCAL_DEEPLAKE:
dataset_path = str(DATA_PATH / dataset_name)
else:
dataset_path = f"hub://{credentials['activeloop_id']}/{dataset_name}"
return dataset_path
def delete_all_deeplake_datasets(credentials: dict):
datasets = list_deeplake_datasets(
credentials["activeloop_id"], credentials["activeloop_token"]
)
for dataset in datasets:
path = f"hub://{dataset}"
logger.info(f"Deleting dataset: {path}")
deeplake.delete(path, token=credentials["activeloop_token"], force=True)
def get_existing_deeplake_vector_store_paths(
options: str, credentials: dict
) -> list[str]:
if options["mode"] == MODES.LOCAL or FORCE_LOCAL_DEEPLAKE:
return glob(str(DATA_PATH / "*"), recursive=False)
else:
dataset_names = list_deeplake_datasets(
credentials["activeloop_id"], credentials["activeloop_token"]
)
dataset_pahs = [f"hub://{name}" for name in dataset_names]
return dataset_pahs
def get_deeplake_vector_store_paths_for_user(
options: str, credentials: dict
) -> list[str]:
all_paths = get_existing_deeplake_vector_store_paths(options, credentials)
# TODO: replace DEFAULT_USER with user id once supported
user_paths = [p for p in all_paths if p.split(SPLIT)[-1] == DEFAULT_USER]
return user_paths
def get_data_source_from_deeplake_dataset_path(dataset_path):
return dataset_path.split(SPLIT)[-4].split("/")[-1]
def get_deeplake_vector_store_path(
data_source: str, options: dict, credentials: dict
) -> str:
dataset_name = (
f"{clean_string_for_storing(data_source)}"
f"{SPLIT}{options['chunk_size']}-{options['chunk_overlap_pct']}"
f"{SPLIT}{options['model'].embedding}"
# TODO: replace DEFAULT_USER with user id once supported
f"{SPLIT}{DEFAULT_USER}"
)
dataset_path = get_deeplake_dataset_path(dataset_name, options, credentials)
return dataset_path
def get_deeplake_docs_path(data_source: str, options: dict, credentials: dict) -> str:
dataset_name = clean_string_for_storing(data_source)
dataset_name += "-docs"
dataset_path = get_deeplake_dataset_path(dataset_name, options, credentials)
return dataset_path
def load_docs_from_deeplake(docs_path: str, credentials: dict) -> List[Document]:
ds = deeplake.load(docs_path, token=credentials["activeloop_token"])
metadatas = ds["metadata"].data()["value"]
texts = ds["text"].data()["value"]
docs = [
Document(
page_content=text,
metadata=metadata,
)
for text, metadata in zip(texts, metadatas)
]
return docs
def store_docs_to_deeplake(docs: List[Document], docs_path: str, credentials: dict):
ds = deeplake.empty(docs_path, token=credentials["activeloop_token"])
ds.create_tensor(
"text",
htype="text",
create_id_tensor=False,
create_sample_info_tensor=False,
create_shape_tensor=False,
chunk_compression="lz4",
)
ds.create_tensor(
"metadata",
htype="json",
create_id_tensor=False,
create_sample_info_tensor=False,
create_shape_tensor=False,
chunk_compression="lz4",
)
for doc in docs:
ds.append(
{
"text": doc.page_content,
"metadata": doc.metadata,
}
)
ds.commit()
logger.info(f"Stored docs to: {docs_path}")
def load_data_source_or_docs_from_deeplake(
data_source: str, options: dict, credentials: dict
) -> List[Document]:
if options["store_docs_extra"]:
docs_path = get_deeplake_docs_path(data_source, options, credentials)
if deeplake.exists(docs_path, token=credentials["activeloop_token"]):
logger.info(f"Docs exist -> loading docs: {docs_path}")
docs = load_docs_from_deeplake(docs_path, credentials)
else:
logger.info(
f"Docs do not exist for data source -> loading data source: {data_source}"
)
docs = load_data_source(data_source)
store_docs_to_deeplake(docs, docs_path, credentials)
logger.info(f"Docs {docs_path} loaded!")
else:
docs = load_data_source(data_source)
return docs
def get_deeplake_vector_store(
data_source: str, vector_store_path: str, options: dict, credentials: dict
) -> VectorStore:
# either load existing vector store or upload a new one to the hub
embeddings = get_embeddings(options, credentials)
if not vector_store_path:
vector_store_path = get_deeplake_vector_store_path(
data_source, options, credentials
)
if deeplake.exists(vector_store_path, token=credentials["activeloop_token"]):
logger.info(f"Vector Store '{vector_store_path}' exists -> loading")
vector_store = DeepLake(
dataset_path=vector_store_path,
read_only=True,
embedding_function=embeddings,
token=credentials["activeloop_token"],
)
else:
logger.info(f"Vector Store '{vector_store_path}' does not exist -> uploading")
docs = load_data_source_or_docs_from_deeplake(data_source, options, credentials)
docs = split_docs(docs, options)
vector_store = DeepLake.from_documents(
docs,
embeddings,
dataset_path=vector_store_path,
token=credentials["activeloop_token"],
)
logger.info(f"Vector Store {vector_store_path} loaded!")
return vector_store
| [] |
2024-01-10 | gurugithub/CXChat | datachad~backend~loader.py | import os
import shutil
from pathlib import Path
from typing import List
from langchain.document_loaders import (
CSVLoader,
EverNoteLoader,
GitLoader,
NotebookLoader,
OnlinePDFLoader,
PDFMinerLoader,
PythonLoader,
TextLoader,
UnstructuredEPubLoader,
UnstructuredFileLoader,
UnstructuredHTMLLoader,
UnstructuredMarkdownLoader,
UnstructuredODTLoader,
UnstructuredPowerPointLoader,
UnstructuredWordDocumentLoader,
WebBaseLoader,
)
from langchain.document_loaders.base import BaseLoader
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from tqdm import tqdm
from datachad.backend.constants import DATA_PATH
from datachad.backend.logging import logger
from datachad.backend.models import get_tokenizer
class AutoGitLoader:
def __init__(self, data_source: str) -> None:
self.data_source = data_source
def load(self) -> List[Document]:
# We need to try both common main branches
# Thank you github for the "master" to "main" switch
# we need to make sure the data path exists
if not os.path.exists(DATA_PATH):
os.makedirs(DATA_PATH)
repo_name = self.data_source.split("/")[-1].split(".")[0]
repo_path = str((DATA_PATH / repo_name).absolute())
clone_url = self.data_source
if os.path.exists(repo_path):
clone_url = None
branches = ["main", "master"]
for branch in branches:
try:
docs = GitLoader(repo_path, clone_url, branch).load()
break
except Exception as e:
logger.error(f"Error loading git: {e}")
if os.path.exists(repo_path):
# cleanup repo afterwards
shutil.rmtree(repo_path)
try:
return docs
except:
raise RuntimeError(
"Error loading git. Make sure to use HTTPS GitHub repo links."
)
FILE_LOADER_MAPPING = {
".csv": (CSVLoader, {"encoding": "utf-8"}),
".doc": (UnstructuredWordDocumentLoader, {}),
".docx": (UnstructuredWordDocumentLoader, {}),
".enex": (EverNoteLoader, {}),
".epub": (UnstructuredEPubLoader, {}),
".html": (UnstructuredHTMLLoader, {}),
".md": (UnstructuredMarkdownLoader, {}),
".odt": (UnstructuredODTLoader, {}),
".pdf": (PDFMinerLoader, {}),
".ppt": (UnstructuredPowerPointLoader, {}),
".pptx": (UnstructuredPowerPointLoader, {}),
".txt": (TextLoader, {"encoding": "utf8"}),
".ipynb": (NotebookLoader, {}),
".py": (PythonLoader, {}),
# Add more mappings for other file extensions and loaders as needed
}
WEB_LOADER_MAPPING = {
".git": (AutoGitLoader, {}),
".pdf": (OnlinePDFLoader, {}),
}
def load_document(
file_path: str,
mapping: dict = FILE_LOADER_MAPPING,
default_loader: BaseLoader = UnstructuredFileLoader,
) -> Document:
# Choose loader from mapping, load default if no match found
ext = "." + file_path.rsplit(".", 1)[-1]
if ext in mapping:
loader_class, loader_args = mapping[ext]
loader = loader_class(file_path, **loader_args)
else:
loader = default_loader(file_path)
return loader.load()
def load_directory(path: str, silent_errors=True) -> List[Document]:
# We don't load hidden files starting with "."
all_files = list(Path(path).rglob("**/[!.]*"))
results = []
with tqdm(total=len(all_files), desc="Loading documents", ncols=80) as pbar:
for file in all_files:
try:
results.extend(load_document(str(file)))
except Exception as e:
if silent_errors:
logger.error(f"failed to load {file}")
else:
raise e
pbar.update()
return results
def load_data_source(data_source: str) -> List[Document]:
# Ugly thing that decides how to load data
# It aint much, but it's honest work
is_web = data_source.startswith("http")
is_dir = os.path.isdir(data_source)
is_file = os.path.isfile(data_source)
docs = None
try:
if is_dir:
docs = load_directory(data_source)
elif is_file:
docs = load_document(data_source)
elif is_web:
docs = load_document(data_source, WEB_LOADER_MAPPING, WebBaseLoader)
return docs
except Exception as e:
error_msg = f"Failed to load your data source '{data_source}'."
logger.error(error_msg)
e.args += (error_msg,)
raise e
def split_docs(docs: List[Document], options: dict) -> List[Document]:
tokenizer = get_tokenizer(options)
def length_function(text: str) -> int:
# count chunks like the embeddings model tokenizer does
return len(tokenizer.encode(text))
chunk_overlap = int(options["chunk_size"] * options["chunk_overlap_pct"] / 100)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=options["chunk_size"],
chunk_overlap=chunk_overlap,
length_function=length_function,
separators=["\n\n", "\n", " ", ""],
)
splitted_docs = text_splitter.split_documents(docs)
logger.info(f"Loaded: {len(splitted_docs)} document chucks")
return splitted_docs
| [] |
2024-01-10 | gurugithub/CXChat | datachad~streamlit~helper.py | import os
import deeplake
import openai
import streamlit as st
from dotenv import load_dotenv
from langchain.callbacks import OpenAICallbackHandler, get_openai_callback
from datachad.backend.chain import get_qa_chain
from datachad.backend.deeplake import (
get_data_source_from_deeplake_dataset_path,
get_deeplake_vector_store_paths_for_user,
)
from datachad.backend.io import delete_files, save_files
from datachad.backend.logging import logger
from datachad.backend.models import MODELS, MODES
from datachad.streamlit.constants import (
ACTIVELOOP_HELP,
AUTHENTICATION_HELP,
CHUNK_OVERLAP_PCT,
CHUNK_SIZE,
DEFAULT_DATA_SOURCE,
DISTANCE_METRIC,
ENABLE_ADVANCED_OPTIONS,
ENABLE_LOCAL_MODE,
K_FETCH_K_RATIO,
LOCAL_MODE_DISABLED_HELP,
MAX_TOKENS,
MAXIMAL_MARGINAL_RELEVANCE,
MODE_HELP,
MODEL_N_CTX,
OPENAI_HELP,
PAGE_ICON,
PROJECT_URL,
STORE_DOCS_EXTRA,
TEMPERATURE,
)
# loads environment variables
load_dotenv()
def initialize_session_state():
# Initialise all session state variables with defaults
SESSION_DEFAULTS = {
"past": [],
"usage": {},
"chat_history": [],
"generated": [],
"auth_ok": False,
"chain": None,
"openai_api_key": None,
"activeloop_token": None,
"activeloop_id": None,
"uploaded_files": None,
"info_container": None,
"data_source": DEFAULT_DATA_SOURCE,
"mode": MODES.OPENAI,
"model": MODELS.GPT35TURBO,
"k_fetch_k_ratio": K_FETCH_K_RATIO,
"chunk_size": CHUNK_SIZE,
"chunk_overlap_pct": CHUNK_OVERLAP_PCT,
"temperature": TEMPERATURE,
"max_tokens": MAX_TOKENS,
"model_n_ctx": MODEL_N_CTX,
"distance_metric": DISTANCE_METRIC,
"maximal_marginal_relevance": MAXIMAL_MARGINAL_RELEVANCE,
"store_docs_extra": STORE_DOCS_EXTRA,
"vector_store": None,
"existing_vector_stores": [],
}
for k, v in SESSION_DEFAULTS.items():
if k not in st.session_state:
st.session_state[k] = v
def authentication_form() -> None:
# widget for authentication input form
st.title("Authentication", help=AUTHENTICATION_HELP)
with st.form("authentication"):
openai_api_key = st.text_input(
f"{st.session_state['mode']} API Key",
type="password",
help=OPENAI_HELP,
placeholder="This field is mandatory",
)
activeloop_token = st.text_input(
"ActiveLoop Token",
type="password",
help=ACTIVELOOP_HELP,
placeholder="Optional, using ours if empty",
)
activeloop_id = st.text_input(
"ActiveLoop Organisation Name",
type="password",
help=ACTIVELOOP_HELP,
placeholder="Optional, using ours if empty",
)
submitted = st.form_submit_button("Submit")
if submitted:
authenticate(openai_api_key, activeloop_token, activeloop_id)
def advanced_options_form() -> None:
# Input Form that takes advanced options and rebuilds chain with them
advanced_options = st.checkbox(
"Advanced Options", help="Caution! This may break things!"
)
if advanced_options:
with st.form("advanced_options"):
st.selectbox(
"model",
options=MODELS.for_mode(st.session_state["mode"]),
help=f"Learn more about which models are supported [here]({PROJECT_URL})",
key="model",
)
col1, col2 = st.columns(2)
col1.number_input(
"temperature",
min_value=0.0,
max_value=1.0,
value=TEMPERATURE,
help="Controls the randomness of the language model output",
key="temperature",
)
col2.number_input(
"max_tokens",
min_value=1,
max_value=30000,
value=MAX_TOKENS,
help=(
"Limits the documents returned from "
"database based on number of tokens"
),
key="max_tokens",
)
col1.number_input(
"chunk_size",
min_value=1,
max_value=100000,
value=CHUNK_SIZE,
help=(
"The size at which the text is divided into smaller chunks "
"before being embedded.\n\nChanging this parameter makes re-embedding "
"and re-uploading the data to the database necessary "
),
key="chunk_size",
)
col2.number_input(
"chunk_overlap",
min_value=0,
max_value=50,
value=CHUNK_OVERLAP_PCT,
help="The percentage of overlap between splitted document chunks",
key="chunk_overlap_pct",
)
applied = st.form_submit_button("Apply")
if applied:
update_chain()
def app_can_be_started():
# Only start App if authentication is OK or Local Mode
return st.session_state["auth_ok"] or st.session_state["mode"] == MODES.LOCAL
def update_model_on_mode_change():
# callback for mode selectbox
# the default model must be updated for the mode
st.session_state["model"] = MODELS.for_mode(st.session_state["mode"])[0]
# Chain needs to be rebuild if app can be started
if not st.session_state["chain"] is None and app_can_be_started():
update_chain()
def authentication_and_options_side_bar():
# Sidebar with Authentication and Advanced Options
with st.sidebar:
mode = st.selectbox(
"Mode",
MODES.all(),
key="mode",
help=MODE_HELP,
on_change=update_model_on_mode_change,
)
if mode == MODES.LOCAL and not ENABLE_LOCAL_MODE:
st.error(LOCAL_MODE_DISABLED_HELP, icon=PAGE_ICON)
st.stop()
if mode != MODES.LOCAL:
authentication_form()
st.info(f"Learn how it works [here]({PROJECT_URL})")
if not app_can_be_started():
st.stop()
# Advanced Options
if ENABLE_ADVANCED_OPTIONS:
advanced_options_form()
def authenticate(
openai_api_key: str, activeloop_token: str, activeloop_id: str
) -> None:
# Validate all credentials are set and correct
# Check for env variables to enable local dev and deployments with shared credentials
openai_api_key = (
openai_api_key
or os.environ.get("OPENAI_API_KEY")
or st.secrets.get("OPENAI_API_KEY")
)
activeloop_token = (
activeloop_token
or os.environ.get("ACTIVELOOP_TOKEN")
or st.secrets.get("ACTIVELOOP_TOKEN")
)
activeloop_id = (
activeloop_id
or os.environ.get("ACTIVELOOP_ID")
or st.secrets.get("ACTIVELOOP_ID")
)
if not (openai_api_key and activeloop_token and activeloop_id):
st.session_state["auth_ok"] = False
st.error("Credentials neither set nor stored", icon=PAGE_ICON)
return
try:
# Try to access openai and deeplake
with st.spinner("Authentifying..."):
openai.api_key = openai_api_key
openai.Model.list()
deeplake.exists(
f"hub://{activeloop_id}/DataChad-Authentication-Check",
token=activeloop_token,
)
except Exception as e:
logger.error(f"Authentication failed with {e}")
st.session_state["auth_ok"] = False
st.error("Authentication failed", icon=PAGE_ICON)
return
# store credentials in the session state
st.session_state["auth_ok"] = True
st.session_state["openai_api_key"] = openai_api_key
st.session_state["activeloop_token"] = activeloop_token
st.session_state["activeloop_id"] = activeloop_id
logger.info("Authentification successful!")
def update_chain() -> None:
# Build chain with parameters from session state and store it back
# Also delete chat history to not confuse the bot with old context
try:
with st.session_state["info_container"], st.spinner("Building Chain..."):
vector_store_path = None
data_source = st.session_state["data_source"]
if st.session_state["uploaded_files"] == st.session_state["data_source"]:
# Save files uploaded by streamlit to disk and set their path as data source.
# We need to repeat this at every chain update as long as data source is the uploaded file
# as we need to delete the files after each chain build to make sure to not pollute the app
# and to ensure data privacy by not storing user data
data_source = save_files(st.session_state["uploaded_files"])
if st.session_state["vector_store"] == st.session_state["data_source"]:
# Load an existing vector store if it has been choosen
vector_store_path = st.session_state["vector_store"]
data_source = get_data_source_from_deeplake_dataset_path(
vector_store_path
)
options = {
"mode": st.session_state["mode"],
"model": st.session_state["model"],
"k_fetch_k_ratio": st.session_state["k_fetch_k_ratio"],
"chunk_size": st.session_state["chunk_size"],
"chunk_overlap_pct": st.session_state["chunk_overlap_pct"],
"temperature": st.session_state["temperature"],
"max_tokens": st.session_state["max_tokens"],
"model_n_ctx": st.session_state["model_n_ctx"],
"distance_metric": st.session_state["distance_metric"],
"maximal_marginal_relevance": st.session_state[
"maximal_marginal_relevance"
],
"store_docs_extra": st.session_state["store_docs_extra"],
}
credentials = {
"openai_api_key": st.session_state["openai_api_key"],
"activeloop_token": st.session_state["activeloop_token"],
"activeloop_id": st.session_state["activeloop_id"],
}
st.session_state["chain"] = get_qa_chain(
data_source=data_source,
vector_store_path=vector_store_path,
options=options,
credentials=credentials,
)
if st.session_state["uploaded_files"] == st.session_state["data_source"]:
# remove uploaded files from disk
delete_files(st.session_state["uploaded_files"])
# update list of existing vector stores
st.session_state["existing_vector_stores"] = get_existing_vector_stores(
options, credentials
)
st.session_state["chat_history"] = []
print("data_source", data_source, type(data_source))
msg = f"Data source **{data_source}** is ready to go with model **{st.session_state['model']}**!"
logger.info(msg)
st.session_state["info_container"].info(msg, icon=PAGE_ICON)
except Exception as e:
msg = f"Failed to build chain for data source **{data_source}** with model **{st.session_state['model']}**: {e}"
logger.error(msg)
st.session_state["info_container"].error(msg, icon=PAGE_ICON)
def update_usage(cb: OpenAICallbackHandler) -> None:
# Accumulate API call usage via callbacks
logger.info(f"Usage: {cb}")
callback_properties = [
"total_tokens",
"prompt_tokens",
"completion_tokens",
"total_cost",
]
for prop in callback_properties:
value = getattr(cb, prop, 0)
st.session_state["usage"].setdefault(prop, 0)
st.session_state["usage"][prop] += value
def generate_response(prompt: str) -> str:
# call the chain to generate responses and add them to the chat history
with st.spinner("Generating response"), get_openai_callback() as cb:
response = st.session_state["chain"](
{"question": prompt, "chat_history": st.session_state["chat_history"]}
)
update_usage(cb)
logger.info(f"Response: '{response}'")
st.session_state["chat_history"].append((prompt, response["answer"]))
return response["answer"]
def get_existing_vector_stores(options: dict, credentials: dict) -> list[str]:
return [None] + get_deeplake_vector_store_paths_for_user(options, credentials)
def format_vector_stores(option: str) -> str:
if option is not None:
return get_data_source_from_deeplake_dataset_path(option)
return option
| [] |
2024-01-10 | gurugithub/CXChat | datachad~backend~models.py | from dataclasses import dataclass
from typing import Any, List
import streamlit as st
import tiktoken
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.embeddings.openai import Embeddings, OpenAIEmbeddings
from langchain.llms import GPT4All
from transformers import AutoTokenizer
from datachad.backend.constants import GPT4ALL_BINARY, MODEL_PATH
from datachad.backend.logging import logger
class Enum:
@classmethod
def all(cls) -> List[Any]:
return [v for k, v in cls.__dict__.items() if not k.startswith("_")]
@dataclass
class Model:
name: str
mode: str
embedding: str
path: str = None # for local models only
def __str__(self) -> str:
return self.name
class MODES(Enum):
# Add more modes as needed
OPENAI = "OpenAI"
LOCAL = "Local"
class EMBEDDINGS(Enum):
# Add more embeddings as needed
OPENAI = "text-embedding-ada-002"
HUGGINGFACE = "sentence-transformers/all-MiniLM-L6-v2"
class MODELS(Enum):
# Add more models as needed
GPT35TURBO = Model(
name="gpt-3.5-turbo",
mode=MODES.OPENAI,
embedding=EMBEDDINGS.OPENAI,
)
GPT4 = Model(name="gpt-4", mode=MODES.OPENAI, embedding=EMBEDDINGS.OPENAI)
GPT4ALL = Model(
name="GPT4All",
mode=MODES.LOCAL,
embedding=EMBEDDINGS.HUGGINGFACE,
path=str(MODEL_PATH / GPT4ALL_BINARY),
)
@classmethod
def for_mode(cls, mode) -> List[Model]:
return [m for m in cls.all() if isinstance(m, Model) and m.mode == mode]
def get_model(options: dict, credentials: dict) -> BaseLanguageModel:
match options["model"].name:
case MODELS.GPT35TURBO.name | MODELS.GPT4.name:
model = ChatOpenAI(
model_name=options["model"].name,
temperature=options["temperature"],
openai_api_key=credentials["openai_api_key"],
)
case MODELS.GPT4ALL.name:
model = GPT4All(
model=options["model"].path,
n_ctx=options["model_n_ctx"],
backend="gptj",
temp=options["temperature"],
verbose=True,
callbacks=[StreamingStdOutCallbackHandler()],
)
# Added models need to be cased here
case _default:
msg = f"Model {options['model'].name} not supported!"
logger.error(msg)
st.error(msg)
exit
return model
def get_embeddings(options: dict, credentials: dict) -> Embeddings:
match options["model"].embedding:
case EMBEDDINGS.OPENAI:
embeddings = OpenAIEmbeddings(
model=EMBEDDINGS.OPENAI,
disallowed_special=(),
openai_api_key=credentials["openai_api_key"],
)
case EMBEDDINGS.HUGGINGFACE:
embeddings = HuggingFaceEmbeddings(
model_name=EMBEDDINGS.HUGGINGFACE, cache_folder=str(MODEL_PATH)
)
# Added embeddings need to be cased here
case _default:
msg = f"Embeddings {options['model'].embedding} not supported!"
logger.error(msg)
st.error(msg)
exit
return embeddings
def get_tokenizer(options: dict) -> Embeddings:
match options["model"].embedding:
case EMBEDDINGS.OPENAI:
tokenizer = tiktoken.encoding_for_model(EMBEDDINGS.OPENAI)
case EMBEDDINGS.HUGGINGFACE:
tokenizer = AutoTokenizer.from_pretrained(EMBEDDINGS.HUGGINGFACE)
# Added tokenizers need to be cased here
case _default:
msg = f"Tokenizer {options['model'].embedding} not supported!"
logger.error(msg)
st.error(msg)
exit
return tokenizer
| [] |
2024-01-10 | AngelSanchezAW/Blue | blueapp~analis~utils~new_ai_post.py | from analis.models import ArticuloGenerado
from openai import OpenAI
from consts import APIKEYOAI
client = OpenAI(api_key=APIKEYOAI)
from consts import APIKEYOAI
def new_ai_post(nombreSitioWeb, urlSitioWeb, postUrl, titulo_new_post, extracto_texto_new_post):
# Establecer la clave de API de OpenAI
prompt = f"Crea un articulo original optimizado para SEO con esta información: {titulo_new_post} {extracto_texto_new_post}"
# Configurar el modelo de lenguaje
modelo = "gpt-3.5-turbo"
mensaje = [
{"role":"system","content":"Eres un experto en redacción de articulos."},
{"role":"user","content":prompt}
]
# Generar la respuesta utilizando la API de OpenAI
response = client.chat.completions.create(model=modelo,
messages=mensaje,
temperature=1,
max_tokens=2000)
respuesta = response.choices[0].message.content
# Crear una instancia de ArticuloGenerado y guardarla en la base de datos
ai_post_instance = ArticuloGenerado.objects.create(
contenido_generado=respuesta,
titulo=titulo_new_post,
nombre_sitio_web=nombreSitioWeb,
url_sitio_web=urlSitioWeb,
post_url=postUrl
)
print("Artículo generado con éxito y guardado en la base de datos")
return ai_post_instance
| [
"Eres un experto en redacción de articulos.",
"Crea un articulo original optimizado para SEO con esta información: PLACEHOLDER PLACEHOLDER"
] |
2024-01-10 | ransjnr/GPT3-Python--AI | getAccessToGptAPI.py | #api_key = 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
#pip install openai
#npm install openai
import os
import openai
openai.api_key = "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"
#list of names of different models available for OpenAI gpt-3
# print(openai.Model.list())
answer = openai.Completion.create(
model="text-davinci-003",
prompt="Say this is a test",
max_tokens=7,
temperature=0
)
print(answer) | [
"Say this is a test"
] |
2024-01-10 | ransjnr/GPT3-Python--AI | voiceAssistant.py | #GPT-3 Rans AI Voice Assistant
import pyttsx3 #pip install pyttsx3 - python text-to-speech
#GPT-3 powered AGI Chat Application: RANSFORD OPPONG: Aug 4,2023
import os
import openai #pip install openai
import gradio as gr #pip install gradio
import speech_recognition as sr #pip install SpeechRecognition == voice to text
import pyaudio
##sudo apt install espeak
openai.api_key = "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"
#command to tell the model how to arrange the inputs and outputs.
start_sequence = "\nAI:"
restart_sequence = "\Human: "
#initial input
prompt ="The following is a conversation with an AI Assistant. The Assistant is helpful, creative, clever and very friendly. \n\nHuman: Hello, who are you\nAI: I am an AI created by OpenAI. How may I assist you today?\nHuman: ",
#Speak Function: text to voice
engine = pyttsx3.init()
def speak(text):
engine.say(text)
engine.runAndWait()
speak("Hello , I'm Rans AI Voice Assistant, How can I help you?")
#voice to text
def STT():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio,language = "en-IN")
print("Human said :" +query)
except Exception as e:
print(e)
speak("Say that again please...")
return "None"
return query
def gpt_output(prompt):
response = openai.Completion.create(
model ="text-davinci-003",
prompt = prompt,
temperature = 0.9,
max_tokens = 150,
top_p = 1,
frequency_penalty = 0,
presence_penalty = 0.6,
stop = ["Human: ","AI: "]
)
data = response.choice[0].text
# return data
print(data)
speak(data)
# a loop to take input from a user when the function is true
while True:
query = STT()
gpt_output(query)
##Solving pyaudio problem:
###### sudo apt-get install portaudio19.dev
###### pip install pyaudio
| [
"The following is a conversation with an AI Assistant. The Assistant is helpful, creative, clever and very friendly. \n\nHuman: Hello, who are you\nAI: I am an AI created by OpenAI. How may I assist you today?\nHuman: "
] |
2024-01-10 | ransjnr/GPT3-Python--AI | ChatApplication.py | #GPT-3 powered AGI Chat Application: RANSFORD OPPONG: Aug 4,2023
import os
import openai
import gradio as gr
openai.api_key = "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"
#command to tell the model how to arrange the inputs and outputs.
start_sequence = "\nAI:"
restart_sequence = "\Human: "
#initial input
prompt ="The following is a conversation with an AI Assistant. The Assistant is helpful, creative, clever and very friendly. \n\nHuman: Hello, who are you\nAI: I am an AI created by OpenAI. How may I assist you today?\nHuman: ",
def gpt_output(prompt):
response = openai.Completion.create(
model ="text-davinci-003",
prompt = prompt,
temperature = 0.9,
max_tokens = 150,
top_p = 1,
frequency_penalty = 0,
presence_penalty = 0.6,
stop = ["Human: ","AI: "]
)
return response.choice[0].text
#a loop to take input from a user when the function is true
# while True:
# query = input("Ask a QUestion to AI:\n")
# gpt_output(query)
#context storage or history
def chatgpt_clone(input,history):
history = history or []
s = list(sum(history,()))
s.append(input)
inp = ''.join(s)
output = gpt_output(inp)
history.append((input,output))
return history,history
#pip install gradio - chat application web interface
block = gr.Blocks()
#builtin gradio functions for the interface
with block:
gr.Markdown("""<h1><center>Rans AI Assistant</center></h1>""")
chatbot = gr.Chatbot()
message = gr.Textbox(placeholder = prompt)
state = gr.State()
# session = gr.File()
submit = gr.Button("SEND")
submit.click(chatgpt_clone,inputs=[message,state],outputs=[chatbot,state])
#set the launch to true
block.launch(debug=True) | [
"The following is a conversation with an AI Assistant. The Assistant is helpful, creative, clever and very friendly. \n\nHuman: Hello, who are you\nAI: I am an AI created by OpenAI. How may I assist you today?\nHuman: "
] |
2024-01-10 | ortmasiu/Galileo | app~galileo.py | import os
import openai
import argparse
from ast import List
import re
MAX_INPUT_LENGTH = 280
def main():
# https://docs.python.org/3/library/argparse.html
parser = argparse.ArgumentParser()
parser.add_argument("--input", "-i", type=str, required=True)
args = parser.parse_args()
user_input = args.input
print(f"User input: {user_input}")
if validate_length(user_input):
text_result = generate_summarized_text(user_input)
keywords_result = generate_keywords(user_input)
print(text_result)
print(keywords_result)
else:
raise ValueError(
f"Input lenght is too long. Must be under {MAX_INPUT_LENGTH}.")
def validate_length(prompt: str) -> bool:
return len(prompt) <= MAX_INPUT_LENGTH
def generate_summarized_text(prompt: str) -> str:
# Load API key from env
openai.api_key = os.getenv("OPENAI_API_KEY")
enriched_prompt = f"Summarize this for a twelve-grade student:\n\n{prompt}"
print(enriched_prompt)
response = openai.Completion.create(
model="text-davinci-002",
prompt=enriched_prompt,
temperature=0.7,
max_tokens=50,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
# Extract output text
summarized_text: str = response["choices"][0]["text"]
last_char = summarized_text[-1]
if last_char not in {".", "!", "?"}:
summarized_text += "..."
print(f"snippet: {summarized_text}")
return summarized_text
def generate_keywords(prompt: str) -> List(str):
# Load API key from an environment variable
openai.api_key = os.getenv("OPENAI_API_KEY")
enriched_prompt = f"Generate 5 category keywords for {prompt}: "
print(enriched_prompt)
response = openai.Completion.create(
model="text-davinci-002",
prompt=enriched_prompt,
max_tokens=50,
)
# Extract output text
keywords_text: str = response["choices"][0]["text"]
last_char = keywords_text[-1]
if last_char not in {".", "!", "?"}:
keywords_text += "..."
# Remove the unwanted characters
keywords_array = re.split(",|\n|-", keywords_text)
keywords_array = [k.lower().strip() for k in keywords_array]
keywords_array = [k.strip("12345).") for k in keywords_array if len(k) > 0]
# strip whitespace
keywords_array = [k.strip() for k in keywords_array]
print(f"keywords: {keywords_array}")
return keywords_array
if __name__ == "__main__":
main()
| [
"Generate 5 category keywords for PLACEHOLDER: ",
"Summarize this for a twelve-grade student:\n\nPLACEHOLDER"
] |
2024-01-10 | ortmasiu/Galileo | cdk.out~asset.3b1e429fc8690bd6b40c7247ba21dfa85b26fce158068b6f473a0d5e4aacdfed~galileo.py | import os
import openai
import argparse
from ast import List
import re
MAX_INPUT_LENGTH = 280
def main():
# https://docs.python.org/3/library/argparse.html
parser = argparse.ArgumentParser()
parser.add_argument("--input", "-i", type=str, required=True)
args = parser.parse_args()
user_input = args.input
print(f"User input: {user_input}")
if validate_length(user_input):
text_result = generate_summarized_text(user_input)
keywords_result = generate_keywords(user_input)
print(text_result)
print(keywords_result)
else:
raise ValueError(
f"Input lenght is too long. Must be under {MAX_INPUT_LENGTH}.")
def validate_length(prompt: str) -> bool:
return len(prompt) <= MAX_INPUT_LENGTH
def generate_summarized_text(prompt: str) -> str:
# Load API key from env
openai.api_key = os.getenv("OPENAI_API_KEY")
enriched_prompt = f"Summarize this for a twelve-grade student:\n\n{prompt}"
print(enriched_prompt)
response = openai.Completion.create(
model="text-davinci-002",
prompt=enriched_prompt,
temperature=0.7,
max_tokens=50,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
# Extract output text
summarized_text: str = response["choices"][0]["text"]
last_char = summarized_text[-1]
if last_char not in {".", "!", "?"}:
summarized_text += "..."
print(f"snippet: {summarized_text}")
return summarized_text
def generate_keywords(prompt: str) -> List(str):
# Load API key from an environment variable
openai.api_key = os.getenv("OPENAI_API_KEY")
enriched_prompt = f"Generate 5 category keywords for {prompt}: "
print(enriched_prompt)
response = openai.Completion.create(
model="text-davinci-002",
prompt=enriched_prompt,
max_tokens=50,
)
# Extract output text
keywords_text: str = response["choices"][0]["text"]
last_char = keywords_text[-1]
if last_char not in {".", "!", "?"}:
keywords_text += "..."
# Remove the unwanted characters
keywords_array = re.split(",|\n|-", keywords_text)
keywords_array = [k.lower().strip() for k in keywords_array]
keywords_array = [k.strip("12345).") for k in keywords_array if len(k) > 0]
# strip whitespace
keywords_array = [k.strip() for k in keywords_array]
print(f"keywords: {keywords_array}")
return keywords_array
if __name__ == "__main__":
main()
| [
"Generate 5 category keywords for PLACEHOLDER: ",
"Summarize this for a twelve-grade student:\n\nPLACEHOLDER"
] |
2024-01-10 | ai-ld/processor | reporting~analyze.py | import os
import re
import json
import nltk
import openai
import shutil
import logging
import operator
import tarfile
import numpy as np
import pandas as pd
import seaborn as sns
import datetime as dt
import reporting.calc as cal
import reporting.utils as utl
import reporting.vmcolumns as vmc
import reporting.dictionary as dct
import reporting.vendormatrix as vm
import reporting.dictcolumns as dctc
class Analyze(object):
date_col = 'date'
database_cache = 'database_cache'
delivery_col = 'delivery'
under_delivery_col = 'under-delivery'
full_delivery_col = 'full-delivery'
over_delivery_col = 'over-delivery'
unknown_col = 'unknown'
delivery_comp_col = 'delivery_completion'
daily_delivery_col = 'daily_delivery'
over_daily_pace = 'over_daily_pace'
under_daily_pace = 'under_daily_pace'
adserving_alert = 'adserving_alert'
daily_pacing_alert = 'daily_pacing'
raw_file_update_col = 'raw_file_update'
topline_col = 'topline_metrics'
lw_topline_col = 'last_week_topline_metrics'
tw_topline_col = 'two_week_topline_merics'
kpi_col = 'kpi_col'
raw_columns = 'raw_file_columns'
vk_metrics = 'vendor_key_metrics'
vendor_metrics = 'vendor_metrics'
missing_metrics = 'missing_metrics'
flagged_metrics = 'flagged_metrics'
placement_col = 'placement_col'
max_api_length = 'max_api_length'
double_counting_all = 'double_counting_all'
double_counting_partial = 'double_counting_partial'
missing_flat = 'missing_flat'
missing_serving = 'missing_serving'
missing_ad_rate = 'missing_ad_rate'
package_cap = 'package_cap'
package_vendor = 'package_vendor'
package_vendor_good = 'package_vendor_good'
package_vendor_bad = 'package_vendor_bad'
cap_name = 'cap_name'
blank_lines = 'blank_lines'
change_auto_order = 'change_auto_order'
brandtracker_imports = 'brandtracker_imports'
analysis_dict_file_name = 'analysis_dict.json'
analysis_dict_key_col = 'key'
analysis_dict_data_col = 'data'
analysis_dict_msg_col = 'message'
analysis_dict_date_col = 'date'
analysis_dict_param_col = 'parameter'
analysis_dict_param_2_col = 'parameter_2'
analysis_dict_filter_col = 'filter_col'
analysis_dict_filter_val = 'filter_val'
analysis_dict_split_col = 'split_col'
analysis_dict_small_param_2 = 'Smallest'
analysis_dict_large_param_2 = 'Largest'
analysis_dict_only_param_2 = 'Only'
fixes_to_run = False
topline_metrics = [[cal.TOTAL_COST], [cal.NCF],
[vmc.impressions, 'CTR'], [vmc.clicks, 'CPC'],
[vmc.views], [vmc.views100, 'VCR'],
[vmc.landingpage, 'CPLPV'], [vmc.btnclick, 'CPBC'],
[vmc.purchase, 'CPP']]
topline_metrics_final = [vmc.impressions, 'CPM', vmc.clicks, 'CTR', 'CPC',
vmc.views, vmc.views100, 'VCR', 'CPV', 'CPCV',
vmc.landingpage, vmc.btnclick, vmc.purchase,
'CPLPV', 'CPBC', 'CPP', cal.NCF, cal.TOTAL_COST]
def __init__(self, df=pd.DataFrame(), file_name=None, matrix=None,
load_chat=False, chat_path=utl.config_path):
self.analysis_dict = []
self.df = df
self.file_name = file_name
self.matrix = matrix
self.load_chat = load_chat
self.chat_path = chat_path
self.chat = None
self.vc = ValueCalc()
self.class_list = [
CheckRawFileUpdateTime, CheckFirstRow, CheckColumnNames,
FindPlacementNameCol, CheckAutoDictOrder, CheckApiDateLength,
CheckFlatSpends, CheckDoubleCounting, GetPacingAnalysis,
GetDailyDelivery, GetServingAlerts, GetDailyPacingAlerts,
CheckPackageCapping]
if self.df.empty and self.file_name:
self.load_df_from_file()
if self.load_chat:
self.chat = AliChat(config_path=self.chat_path)
def get_base_analysis_dict_format(self):
analysis_dict_format = {
self.analysis_dict_key_col: '',
self.analysis_dict_data_col: {},
self.analysis_dict_msg_col: '',
self.analysis_dict_param_col: '',
self.analysis_dict_param_2_col: '',
self.analysis_dict_split_col: '',
self.analysis_dict_filter_col: '',
self.analysis_dict_filter_val: ''
}
return analysis_dict_format
def load_df_from_file(self):
self.df = utl.import_read_csv(self.file_name)
def add_to_analysis_dict(self, key_col, message='', data='',
param='', param2='', split='',
filter_col='', filter_val=''):
base_dict = self.get_base_analysis_dict_format()
base_dict[self.analysis_dict_key_col] = str(key_col)
base_dict[self.analysis_dict_msg_col] = str(message)
base_dict[self.analysis_dict_param_col] = str(param)
base_dict[self.analysis_dict_param_2_col] = str(param2)
base_dict[self.analysis_dict_split_col] = str(split)
base_dict[self.analysis_dict_filter_col] = str(filter_col)
base_dict[self.analysis_dict_filter_val] = str(filter_val)
base_dict[self.analysis_dict_data_col] = data
self.analysis_dict.append(base_dict)
def check_delivery(self, df):
plan_names = self.matrix.vendor_set(vm.plan_key)
if not plan_names:
logging.warning('VM does not have plan key')
return False
plan_names = plan_names[vmc.fullplacename]
miss_cols = [x for x in plan_names if x not in df.columns]
if miss_cols:
logging.warning('Df does not have cols {}'.format(miss_cols))
return False
df = df.groupby(plan_names).apply(lambda x: 0 if x[dctc.PNC].sum() == 0
else x[vmc.cost].sum() /
x[dctc.PNC].sum())
f_df = df[df > 1]
if f_df.empty:
delivery_msg = 'Nothing has delivered in full.'
logging.info(delivery_msg)
self.add_to_analysis_dict(key_col=self.delivery_col,
param=self.under_delivery_col,
message=delivery_msg)
else:
del_p = f_df.apply(lambda x: "{0:.2f}%".format(x * 100))
delivery_msg = 'The following have delivered in full: '
logging.info('{}\n{}'.format(delivery_msg, del_p))
data = del_p.reset_index().rename(columns={0: 'Delivery'})
self.add_to_analysis_dict(key_col=self.delivery_col,
param=self.full_delivery_col,
message=delivery_msg,
data=data.to_dict())
o_df = f_df[f_df > 1.5]
if not o_df.empty:
del_p = o_df.apply(lambda x: "{0:.2f}%".format(x * 100))
delivery_msg = 'The following have over-delivered:'
logging.info('{}\n{}'.format(delivery_msg, del_p))
data = del_p.reset_index().rename(columns={0: 'Delivery'})
self.add_to_analysis_dict(key_col=self.delivery_col,
param=self.over_delivery_col,
message=delivery_msg,
data=data.to_dict())
@staticmethod
def get_start_end_dates(df, plan_names):
"""
Gets start and end dates at the level of the planned net full placement
name. Dates taken from mediaplan where available, else from
vendormatrix based on which vendorkey has more spend.
:param df: full output df
:param plan_names: planned net full placement columns
:returns: two dfs w/ start and end dates for each unique breakout
"""
matrix = vm.VendorMatrix().vm_df
matrix = matrix[[vmc.vendorkey, vmc.startdate, vmc.enddate]]
matrix = matrix.rename(columns={vmc.startdate: dctc.SD,
vmc.enddate: dctc.ED})
matrix = utl.data_to_type(matrix, date_col=[dctc.SD, dctc.ED],
fill_empty=False)
matrix[[dctc.SD, dctc.ED]] = matrix[[dctc.SD, dctc.ED]].fillna(pd.NaT)
matrix[[dctc.SD, dctc.ED]] = matrix[
[dctc.SD, dctc.ED]].replace([""], pd.NaT)
vm_dates = df[plan_names + [vmc.vendorkey, vmc.cost]]
vm_dates = vm_dates.merge(matrix, how='left', on=vmc.vendorkey)
vm_dates = vm_dates.groupby(
plan_names + [vmc.vendorkey]).agg(
{vmc.cost: 'sum', dctc.SD: 'min', dctc.ED: 'max'}).reset_index()
vm_dates = vm_dates[vm_dates[vmc.cost] > 0]
vm_dates = vm_dates.groupby(plan_names).agg(
{dctc.SD: 'min', dctc.ED: 'max'}).reset_index()
if dctc.SD in df.columns and dctc.ED in df.columns:
start_end_dates = df[plan_names + [dctc.SD, dctc.ED]]
start_end_dates = start_end_dates.groupby(plan_names).agg(
{dctc.SD: 'min', dctc.ED: 'max'})
start_end_dates = start_end_dates.reset_index()
else:
start_end_dates = vm_dates[plan_names + [dctc.SD, dctc.ED]]
start_end_dates = start_end_dates[start_end_dates.apply(
lambda x: (x[dctc.SD] != x[dctc.ED]
and not pd.isnull(x[dctc.SD])
and not pd.isnull(x[dctc.ED])), axis=1)]
vm_dates[plan_names] = vm_dates[plan_names].astype(object)
vm_dates = vm_dates.merge(
start_end_dates, how='left', on=plan_names, indicator=True)
vm_dates = vm_dates[vm_dates['_merge'] == 'left_only']
vm_dates = vm_dates.drop(
columns=['_merge', 'mpStart Date_y', 'mpEnd Date_y'])
vm_dates = vm_dates.rename(columns={'mpStart Date_x': dctc.SD,
'mpEnd Date_x': dctc.ED})
start_end_dates = pd.concat([start_end_dates, vm_dates])
start_end_dates = utl.data_to_type(start_end_dates,
date_col=[dctc.SD, dctc.ED],
fill_empty=False)
start_dates = start_end_dates[plan_names + [dctc.SD]]
end_dates = start_end_dates[plan_names + [dctc.ED]]
return start_dates, end_dates
def get_plan_names(self):
plan_names = self.matrix.vendor_set(vm.plan_key)
if not plan_names:
logging.warning('VM does not have plan key')
plan_names = None
else:
plan_names = plan_names[vmc.fullplacename]
return plan_names
def check_plan_error(self, df):
plan_names = self.get_plan_names()
if not plan_names:
return False
er = self.matrix.vendor_set(vm.plan_key)[vmc.filenameerror]
edf = utl.import_read_csv(er, utl.error_path)
if edf.empty:
plan_error_msg = ('No Planned error - all {} '
'combinations are defined.'.format(plan_names))
logging.info(plan_error_msg)
self.add_to_analysis_dict(key_col=self.unknown_col,
message=plan_error_msg)
return True
if dctc.PFPN not in df.columns:
logging.warning('Df does not have column: {}'.format(dctc.PFPN))
return False
df = df[df[dctc.PFPN].isin(edf[vmc.fullplacename].values)][
plan_names + [vmc.vendorkey]].drop_duplicates()
df = vm.full_placement_creation(df, None, dctc.FPN, plan_names)
df = df[df[dctc.FPN].isin(edf[dctc.FPN].values)]
df = utl.col_removal(df, None, [dctc.FPN])
for col in df.columns:
df[col] = "'" + df[col] + "'"
df = df.dropna()
df_dict = '\n'.join(['{}{}'.format(k, v)
for k, v in df.to_dict(orient='index').items()])
undefined_msg = 'Missing planned spends have the following keys:'
logging.info('{}\n{}'.format(undefined_msg, df_dict))
self.add_to_analysis_dict(key_col=self.unknown_col,
message=undefined_msg,
data=df.to_dict())
def backup_files(self):
bu = os.path.join(utl.backup_path, dt.date.today().strftime('%Y%m%d'))
logging.info('Backing up all files to {}'.format(bu))
dir_to_backup = [utl.config_path, utl.dict_path, utl.raw_path]
for path in [utl.backup_path, bu] + dir_to_backup:
utl.dir_check(path)
file_dicts = {'raw.gzip': self.df}
for file_name, df in file_dicts.items():
file_name = os.path.join(bu, file_name)
df.to_csv(file_name, compression='gzip')
for file_path in dir_to_backup:
file_name = '{}.tar.gz'.format(file_path.replace('/', ''))
file_name = os.path.join(bu, file_name)
tar = tarfile.open(file_name, "w:gz")
tar.add(file_path, arcname=file_path.replace('/', ''))
tar.close()
for file_name in ['logfile.log']:
if os.path.exists(file_name):
new_file_name = os.path.join(bu, file_name)
shutil.copy(file_name, new_file_name)
logging.info('Successfully backed up files to {}'.format(bu))
# noinspection PyUnresolvedReferences
@staticmethod
def make_heat_map(df, cost_cols=None):
fig, axs = sns.plt.subplots(ncols=len(df.columns),
gridspec_kw={'hspace': 0, 'wspace': 0})
for idx, col in enumerate(df.columns):
text_format = ",.0f"
sns.heatmap(df[[col]], annot=True, fmt=text_format, linewidths=.5,
cbar=False, cmap="Blues", ax=axs[idx])
if col in cost_cols:
for t in axs[idx].texts:
t.set_text('$' + t.get_text())
if idx != 0:
axs[idx].set_ylabel('')
axs[idx].get_yaxis().set_ticks([])
else:
labels = [val[:30] for val in reversed(list(df.index))]
axs[idx].set_yticklabels(labels=labels)
axs[idx].xaxis.tick_top()
sns.plt.show()
sns.plt.close()
def generate_table(self, group, metrics, sort=None):
df = self.generate_df_table(group, metrics, sort)
cost_cols = [x for x in metrics if metrics[x]]
self.make_heat_map(df, cost_cols)
def generate_df_table(self, group, metrics, sort=None, data_filter=None,
df=pd.DataFrame()):
base_metrics = [x for x in metrics if x not in self.vc.metric_names]
calc_metrics = [x for x in metrics if x not in base_metrics]
if df.empty:
df = self.df.copy()
if data_filter:
filter_col = data_filter[0]
filter_val = data_filter[1]
if filter_col in df.columns:
df = df[df[filter_col].isin(filter_val)]
else:
logging.warning('{} not in df columns'.format(filter_col))
columns = group + metrics + [filter_col]
return pd.DataFrame({x: [] for x in columns})
for group_col in group:
if group_col not in df.columns:
logging.warning('{} not in df columns'.format(group))
columns = group + metrics
return pd.DataFrame({x: [] for x in columns})
df = df.groupby(group)[base_metrics].sum()
df = self.vc.calculate_all_metrics(calc_metrics, df)
if sort:
df = df.sort_values(sort, ascending=False)
return df
@staticmethod
def give_df_default_format(df, columns=None):
df = utl.give_df_default_format(df, columns)
return df
def get_table_without_format(self, data_filter=None, group=dctc.CAM):
group = [group]
metrics = []
kpis = self.get_kpis()
for metric in self.topline_metrics:
if metric[0] in self.df.columns:
metrics += metric
if kpis:
metrics += list(kpis.keys())
metrics += [value for values in kpis.values() for value in values]
metrics = list(set(metrics))
df = self.generate_df_table(group=group, metrics=metrics,
data_filter=data_filter)
return df
def generate_topline_metrics(self, data_filter=None, group=dctc.CAM):
df = self.get_table_without_format(data_filter, group)
df = self.give_df_default_format(df)
final_cols = [x for x in self.topline_metrics_final if x in df.columns]
df = df[final_cols]
df = df.transpose()
df = df.reindex(final_cols)
df = df.replace([np.inf, -np.inf], np.nan)
df = df.fillna(0)
df = df.reset_index().rename(columns={'index': 'Topline Metrics'})
log_info_text = ('Topline metrics are as follows: \n{}'
''.format(df.to_string()))
if data_filter:
log_info_text = data_filter[2] + log_info_text
logging.info(log_info_text)
return df
def calculate_kpi_trend(self, kpi, group, metrics):
df = self.get_df_based_on_kpi(kpi, group, metrics, split=vmc.date)
if len(df) < 2:
logging.warning('Less than two datapoints for KPI {}'.format(kpi))
return False
df = df.sort_values(vmc.date).reset_index(drop=True).reset_index()
df = df.replace([np.inf, -np.inf], np.nan).fillna(0)
fit = np.polyfit(df['index'], df[kpi], deg=1)
format_map = utl.get_default_format(kpi)
if fit[0] > 0:
trend = 'increasing'
else:
trend = 'decreasing'
msg = ('The KPI {} is {} at a rate of {} per day when given '
'linear fit').format(kpi, trend, format_map(abs(fit[0])))
logging.info(msg)
df['fit'] = fit[0] * df['index'] + fit[1]
df[vmc.date] = df[vmc.date].dt.strftime('%Y-%m-%d')
self.add_to_analysis_dict(
key_col=self.kpi_col, message=msg, data=df.to_dict(),
param=kpi, param2='Trend', split=vmc.date)
def explain_lowest_kpi_for_vendor(self, kpi, group, metrics, filter_col):
min_val = self.find_in_analysis_dict(
self.kpi_col, param=kpi, param_2=self.analysis_dict_small_param_2,
split_col=dctc.VEN)
if len(min_val) == 0:
min_val = self.find_in_analysis_dict(
self.kpi_col, param=kpi,
param_2=self.analysis_dict_only_param_2, split_col=dctc.VEN)
if len(min_val) == 0:
return False
min_val = min_val[0][self.analysis_dict_data_col][dctc.VEN].values()
for val in min_val:
for split in [dctc.CRE, dctc.TAR, dctc.PKD, dctc.PLD, dctc.ENV]:
self.evaluate_smallest_largest_kpi(
kpi, group, metrics, split, filter_col, val, number=1)
def get_df_based_on_kpi(self, kpi, group, metrics, split=None,
filter_col=None, filter_val=None, sort=None):
if split:
group = group + [split]
if filter_col:
group = group + [filter_col]
if not sort:
sort = kpi
df = self.generate_df_table(group=group, metrics=metrics, sort=sort)
df = df.reset_index().replace([np.inf, -np.inf], np.nan).fillna(0)
df = df.loc[(df[dctc.KPI] == kpi) & (df[kpi].notnull()) & (df[kpi] > 0)]
if filter_col:
df = df.loc[(df[filter_col] == filter_val)]
return df
def evaluate_df_kpi_smallest_largest(self, df, kpi, split, filter_col,
filter_val, small_large='Smallest'):
format_df = self.give_df_default_format(df, columns=[kpi])
if split == vmc.date:
df[split] = df[split].dt.strftime('%Y-%m-%d')
split_values = ['{} ({})'.format(x, y) for x, y in
format_df[[split, kpi]].values]
split_values = ', '.join(split_values)
msg = '{} value(s) for KPI {} broken out by {} are {}'.format(
small_large, kpi, split, split_values)
if filter_col:
msg = '{} when filtered by the {} {}'.format(
msg, filter_col, filter_val)
log_info_text = ('{}\n{}'.format(msg, format_df.to_string()))
logging.info(log_info_text)
self.add_to_analysis_dict(
key_col=self.kpi_col, message=msg, data=df.to_dict(),
param=kpi, param2=small_large, split=split,
filter_col=filter_col, filter_val=filter_val)
def evaluate_smallest_largest_kpi(self, kpi, group, metrics, split=None,
filter_col=None, filter_val=None,
number=3):
df = self.get_df_based_on_kpi(kpi, group, metrics, split, filter_col,
filter_val)
if df.empty:
msg = ('Value(s) for KPI {} broken out by {} could '
'not be calculated'.format(kpi, split))
if filter_col:
msg = '{} when filtered by the {} {}'.format(
msg, filter_col, filter_val)
logging.warning(msg)
return False
if len(df) < 2:
df_list = [[df, self.analysis_dict_only_param_2]]
else:
smallest_df = df.nsmallest(n=number, columns=[kpi])
largest_df = df.nlargest(n=number, columns=[kpi])
df_list = [[smallest_df, self.analysis_dict_small_param_2],
[largest_df, self.analysis_dict_large_param_2]]
for df in df_list:
self.evaluate_df_kpi_smallest_largest(df[0], kpi, split, filter_col,
filter_val, df[1])
def evaluate_on_kpi(self, kpi, formula):
metrics = [kpi] + formula
group = [dctc.CAM, dctc.KPI]
self.evaluate_smallest_largest_kpi(kpi, group, metrics, split=dctc.VEN)
self.explain_lowest_kpi_for_vendor(
kpi=kpi, group=group, metrics=metrics, filter_col=dctc.VEN)
self.evaluate_smallest_largest_kpi(kpi, group, metrics, split=vmc.date)
self.calculate_kpi_trend(kpi, group, metrics)
def get_kpi(self, kpi, write=False):
kpi_cols = []
kpi_formula = [
self.vc.calculations[x] for x in self.vc.calculations
if self.vc.calculations[x][self.vc.metric_name] == kpi]
if kpi_formula:
kpi_cols = kpi_formula[0][self.vc.formula][::2]
missing_cols = [x for x in kpi_cols if x not in self.df.columns]
if missing_cols:
msg = 'Missing columns could not evaluate {}'.format(kpi)
logging.warning(msg)
if write:
self.add_to_analysis_dict(key_col=self.kpi_col,
message=msg, param=kpi)
kpi = False
elif kpi not in self.df.columns:
msg = 'Unknown KPI: {}'.format(kpi)
logging.warning(msg)
kpi = False
return kpi, kpi_cols
def get_kpis(self, write=False):
kpis = {}
if dctc.KPI in self.df.columns:
for kpi in self.df[dctc.KPI].unique():
kpi, kpi_cols = self.get_kpi(kpi, write)
if kpi:
kpis[kpi] = kpi_cols
return kpis
def evaluate_on_kpis(self):
kpis = self.get_kpis(write=True)
if kpis:
for kpi, formula in kpis.items():
self.evaluate_on_kpi(kpi, formula)
def generate_topline_and_weekly_metrics(self, group=dctc.CAM):
df = self.generate_topline_metrics(group=group)
last_week_filter = [
dt.datetime.strftime(
(dt.datetime.today() - dt.timedelta(days=x)), '%Y-%m-%d')
for x in range(1, 8)]
tdf = self.generate_topline_metrics(
data_filter=[vmc.date, last_week_filter, 'Last Weeks '],
group=group)
two_week_filter = [
dt.datetime.strftime(
(dt.datetime.today() - dt.timedelta(days=x)), '%Y-%m-%d')
for x in range(8, 15)]
twdf = self.generate_topline_metrics(
data_filter=[vmc.date, two_week_filter, '2 Weeks Ago '],
group=group)
for val in [(self.topline_col, df), (self.lw_topline_col, tdf),
(self.tw_topline_col, twdf)]:
msg = '{} as follows:'.format(val[0].replace('_', ' '))
self.add_to_analysis_dict(key_col=self.topline_col,
message=msg,
data=val[1].to_dict(),
param=val[0])
return df, tdf, twdf
def get_metrics_by_vendor_key(self):
data_sources = self.matrix.get_all_data_sources()
df = self.df.copy()
if df.empty:
logging.warning('Dataframe empty could not get metrics.')
return False
metrics = []
for source in data_sources:
metrics.extend(source.get_active_metrics())
metrics = list(set(metrics))
metrics = [x for x in metrics if x in df.columns]
agg_map = {x: [np.min, np.max] if (x == vmc.date) else np.sum
for x in metrics}
df = df.groupby([vmc.vendorkey]).agg(agg_map)
df.columns = [' - '.join(col).strip() for col in df.columns]
df.columns = [x[:-6] if x[-6:] == ' - sum' else x for x in df.columns]
df = df.reset_index()
for col in [' - amin', ' - amax']:
df[vmc.date + col] = df[vmc.date + col].astype('U')
update_msg = 'Metrics by vendor key are as follows:'
logging.info('{}\n{}'.format(update_msg, df.to_string()))
self.add_to_analysis_dict(key_col=self.vk_metrics,
message=update_msg, data=df.to_dict())
return True
def find_missing_metrics(self):
df = self.get_table_without_format(group=dctc.VEN)
format_df = self.give_df_default_format(df.copy())
df = df.T
update_msg = 'Metrics by vendor are as follows:'
logging.info('{}\n{}'.format(update_msg, format_df.to_string()))
self.add_to_analysis_dict(key_col=self.vendor_metrics,
message=update_msg,
data=format_df.T.to_dict())
mdf = []
for col in df.columns:
missing_metrics = df[df[col] == 0][col].index.to_list()
if missing_metrics:
miss_dict = {dctc.VEN: col,
self.missing_metrics: missing_metrics}
if mdf is None:
mdf = []
mdf.append(miss_dict)
mdf = pd.DataFrame(mdf)
if mdf.empty:
missing_msg = 'No vendors have missing metrics.'
logging.info('{}'.format(missing_msg))
else:
missing_msg = 'The following vendors have missing metrics:'
logging.info('{}\n{}'.format(missing_msg, mdf.to_string()))
self.add_to_analysis_dict(key_col=self.missing_metrics,
message=missing_msg, data=mdf.to_dict())
def flag_errant_metrics(self):
metrics = [vmc.impressions, vmc.clicks, 'CTR']
if [metric for metric in metrics[:2] if metric not in self.df.columns]:
logging.warning('Missing metric, could not determine flags.')
return False
df = self.generate_df_table(group=[dctc.VEN, dctc.CAM], metrics=metrics)
if df.empty:
logging.warning('Dataframe empty, could not determine flags.')
return False
all_threshold = 'All'
threshold_col = 'threshold'
thresholds = {'CTR': {'Google SEM': 0.2, all_threshold: 0.06}}
for metric_name, threshold_dict in thresholds.items():
edf = df.copy()
edf = edf.reset_index().set_index(dctc.VEN)
edf[threshold_col] = edf.index.map(threshold_dict).fillna(
threshold_dict[all_threshold])
edf = edf[edf['CTR'] > edf[threshold_col]]
if not edf.empty:
edf = edf[[metric_name, threshold_col]]
edf = edf.replace([np.inf, -np.inf], np.nan).fillna(0)
flagged_msg = ('The following vendors have unusually high {}s'
'.'.format(metric_name))
logging.info('{}\n{}'.format(
flagged_msg, edf.to_string()))
self.add_to_analysis_dict(
key_col=self.flagged_metrics, param=metric_name,
message=flagged_msg, data=edf.to_dict())
return True
@staticmethod
def processor_clean_functions(df, cd, cds_name, clean_functions):
success = True
for text, clean_func in clean_functions.items():
if not success:
msg = 'A previous step failed to process.'
cd[text][cds_name] = (False, msg)
continue
try:
df = clean_func(df)
msg = 'Successfully able to {}'.format(text)
cd[text][cds_name] = (True, msg)
except Exception as e:
msg = 'Could not {} with error: {}'.format(text, e)
cd[text][cds_name] = (False, msg)
success = False
return df, cd, success
@staticmethod
def compare_start_end_date_raw(df, cd, cds_name, cds, vk='vk'):
df = df.copy()
df[vmc.vendorkey] = vk
date_col_name = cds.p[vmc.date][0]
if str(date_col_name) == 'nan' or date_col_name not in df.columns:
msg = 'Date not specified or not column names.'
msg = (False, msg)
else:
df[vmc.date] = df[cds.p[vmc.date][0]]
df = utl.data_to_type(df=df, date_col=vmc.datadatecol)
df = df[[vmc.vendorkey, vmc.date]].groupby([vmc.vendorkey]).agg(
{vmc.date: [np.min, np.max]})
df.columns = [' - '.join(col).strip() for col in df.columns]
tdf = df.reset_index()
max_date = tdf['{} - amax'.format(vmc.date)][0].date()
min_date = tdf['{} - amin'.format(vmc.date)][0].date()
sd = cds.p[vmc.startdate].date()
ed = cds.p[vmc.enddate].date()
if max_date < sd:
msg = ('Last day in raw file {} is less than start date {}.\n'
'Result will be blank. Change start date.'.format(
max_date, sd))
msg = (False, msg)
elif min_date > ed:
msg = ('First day in raw file {} is less than end date {}.\n'
'Result will be blank. Change end date.'.format(
min_date, ed))
msg = (False, msg)
else:
msg = ('Some or all data in raw file with date range {} - {} '
'falls between start and end dates {} - {}'.format(
sd, ed, min_date, max_date))
msg = (True, msg)
cd[vmc.startdate][cds_name] = msg
return cd
def check_raw_file_against_plan_net(self, df, cd, cds_name):
plan_df = self.matrix.vendor_get(vm.plan_key)
if plan_df.empty:
msg = (False, 'Plan net is empty could not check.')
else:
plan_names = self.matrix.vendor_set(vm.plan_key)[vmc.fullplacename]
df = vm.full_placement_creation(df, None, dctc.FPN, plan_names)
missing = [x for x in df[dctc.FPN].unique()
if x not in plan_df[dctc.FPN].unique()]
if not missing:
msg = (True, 'All values defined in plan net.')
else:
missing = ', '.join(missing)
msg = (False, 'The following values were not in the plan net '
'dictionary: {}'.format(missing))
cd[vm.plan_key][cds_name] = msg
return cd
@staticmethod
def write_raw_file_dict(vk, cd):
utl.dir_check(utl.tmp_file_suffix)
file_name = '{}.json'.format(vk)
file_name = os.path.join(utl.tmp_file_suffix, file_name)
with open(file_name, 'w') as fp:
json.dump(cd, fp, cls=utl.NpEncoder)
@staticmethod
def check_combine_col_totals(cd, df, cds_name, c_cols):
for col in c_cols:
if col in df.columns:
total = df[col].sum()
if total <= 0:
msg = (False, 'Sum of column {} was {}'.format(col, total))
else:
msg = (True, int(total))
if cds_name == 'New':
if 'Old' not in cd[col]:
old_total = 0
else:
old_total = cd[col]['Old'][1]
if (not isinstance(old_total, str) and
not isinstance(total, str) and old_total > total):
msg = (
False, 'Old file total {} was greater than new '
'file total {} for col {}'.format(
old_total, total, col))
cd[col][cds_name] = msg
return cd
@staticmethod
def get_base_raw_file_dict(ds):
cd = {'file_load': {},
vmc.fullplacename: {},
vmc.placement: {},
vmc.date: {},
'empty': {},
vmc.startdate: {}}
c_cols = [x for x in vmc.datafloatcol if ds.p[x] != ['nan']]
clean_functions = {
'get and merge dictionary': ds.get_and_merge_dictionary,
'combine data': ds.combine_data,
'remove cols and make calculations':
ds.remove_cols_and_make_calculations}
for x in c_cols + list(clean_functions.keys()) + [vm.plan_key]:
cd[x] = {}
return cd, clean_functions, c_cols
@staticmethod
def check_sheet_names(tds, sheet_names):
missing_sheets = []
xl = pd.read_excel(tds.p[vmc.filename], None)
sheet_lists = list(xl.keys())
for sheet_name in sheet_names:
if sheet_name not in sheet_lists:
missing_sheets.append(sheet_name)
return missing_sheets
def compare_raw_files(self, vk):
ds = self.matrix.get_data_source(vk)
tds = self.matrix.get_data_source(vk)
file_type = os.path.splitext(ds.p[vmc.filename_true])[1]
tmp_file = ds.p[vmc.filename_true].replace(
file_type, '{}{}'.format(utl.tmp_file_suffix, file_type))
tds.p[vmc.filename_true] = tmp_file
tds.p[vmc.filename] = tmp_file
missing_sheets = []
if ':::' in ds.p[vmc.filename]:
sheet_names = ds.p[vmc.filename].split(':::')[1:]
sheet_info = ':::' + ':::'.join(sheet_names)
missing_sheets = self.check_sheet_names(tds, sheet_names)
tds.p[vmc.filename] += sheet_info
cd, clean_functions, c_cols = self.get_base_raw_file_dict(ds)
for cds_name, cds in {'Old': ds, 'New': tds}.items():
try:
df = cds.get_raw_df()
except Exception as e:
logging.warning('Unknown exception: {}'.format(e))
if cds_name == 'New':
if missing_sheets:
missing_sheets = ', '.join(missing_sheets).upper()
msg = ('Xlsx file is missing the following sheets: '
'{}. Rename sheets if naming is wrong. Else, '
'check w/ vendor to get all needed sheets.'
).format(missing_sheets)
else:
msg = 'Please open the file in excel, select all '
'columns, select General in the Number format '
'dropdown, save as a csv and retry.'
else:
msg = ('The old file may not exist. '
'Please save the new file.')
cd['file_load'][cds_name] = (
False,
'{} file could not be loaded. {}'.format(cds_name, msg))
continue
cd['file_load'][cds_name] = (True, 'File was successfully read.')
for col in [vmc.fullplacename, vmc.placement, vmc.date] + c_cols:
cols_to_check = ds.p[col]
if col == vmc.placement:
cols_to_check = [ds.p[col]]
missing_cols = [x for x in cols_to_check
if x.replace('::', '') not in df.columns]
if missing_cols:
msg = (False,
'Columns specified in the {} are not in the'
' new file those columns are: '
'{}'.format(col, ','.join(missing_cols)))
else:
msg = (True, '{} columns are in the raw file.'.format(col))
cd[col][cds_name] = msg
if df is None or df.empty:
msg = '{} file is empty skipping checks.'.format(cds_name)
cd['empty'][cds_name] = (False, msg)
continue
total_mb = int(round(df.memory_usage(index=True).sum() / 1000000))
msg = '{} file has {} rows and is {}MB.'.format(
cds_name, len(df.index), total_mb)
cd['empty'][cds_name] = (True, msg)
cd = self.compare_start_end_date_raw(df, cd, cds_name, cds, vk)
df, cd, success = self.processor_clean_functions(
df, cd, cds_name, clean_functions)
if not success:
for col in [vm.plan_key]:
msg = ('Could not fully process files so no '
'additional checks could be made.')
cd[col][cds_name] = (False, msg)
cd = self.check_combine_col_totals(cd, df, cds_name, c_cols)
cd = self.check_raw_file_against_plan_net(df, cd, cds_name)
cds.df = df
self.write_raw_file_dict(vk, cd)
def find_missing_serving(self):
groups = [vmc.vendorkey, dctc.SRV, dctc.AM, dctc.PN]
metrics = []
serving_vals = ['1x1 Click & Imp', '1x1 Click Only', 'In-Banner',
'In-Stream Video', 'No Tracking', 'Rich Media',
'Standard', 'VAST', 'VPAID']
df = self.generate_df_table(groups, metrics, sort=None,
data_filter=None)
df = df.reset_index()
if df.empty:
logging.warning('Dataframe empty, '
'could not determine missing serving.')
return False
df = df[(df[vmc.vendorkey].str.contains(vmc.api_dc_key)) |
(df[vmc.vendorkey].str.contains(vmc.api_szk_key))]
df = df[(df[dctc.AM] == 'nan') | (df[dctc.AM] == 0) |
(df[dctc.AM].isnull())]
df = df[~df[dctc.SRV].isin(serving_vals)]
df = df.astype({dctc.SRV: str, dctc.AM: str})
if not df.empty:
msg = ('The following placements are under an adserver w/o '
'a recognized serving model. Add via Edit Processor Files'
'Translate or in platform:')
logging.info('{}\n{}'.format(msg, df.to_string()))
else:
msg = ('All placements under an adserver have an associated '
'serving model.')
logging.info('{}'.format(msg))
self.add_to_analysis_dict(key_col=self.missing_serving,
message=msg, data=df.to_dict())
return True
def find_missing_ad_rate(self):
groups = [vmc.vendorkey, dctc.SRV, dctc.AM, dctc.AR]
metrics = []
df = self.generate_df_table(groups, metrics, sort=None,
data_filter=None)
df = df.reset_index()
if df.empty:
logging.warning(
'Dataframe empty, could not determine missing ad rate.')
return False
df = df[((df[vmc.vendorkey].str.contains(vmc.api_dc_key)) |
(df[vmc.vendorkey].str.contains(vmc.api_szk_key)))
& (df[dctc.SRV] != 'No Tracking')]
df = df[(df[dctc.AR] == 0) | (df[dctc.AR].isnull()) |
(df[dctc.AR] == 'nan')]
df = df.astype({dctc.SRV: str, dctc.AM: str, dctc.AR: str})
df = df.drop(columns=vmc.vendorkey)
if not df.empty:
msg = ('The following Adserving Models are missing associated '
'rates. Add via Edit Processor Files -> Edit Relation '
'Dictionaries -> Relation - Serving:')
logging.info('{}\n{}'.format(msg, df.to_string()))
else:
msg = ('All placements w/ Adserving Models have associated '
'adserving rates.')
logging.info('{}'.format(msg))
self.add_to_analysis_dict(key_col=self.missing_ad_rate,
message=msg, data=df.to_dict())
return True
def find_in_analysis_dict(self, key, param=None, param_2=None,
split_col=None, filter_col=None, filter_val=None,
analysis_dict=None):
if not analysis_dict:
analysis_dict = self.analysis_dict
item = [x for x in analysis_dict
if x[self.analysis_dict_key_col] == key]
if param:
item = [x for x in item if x[self.analysis_dict_param_col] == param]
if param_2:
item = [x for x in item if
x[self.analysis_dict_param_2_col] == param_2]
if split_col:
item = [x for x in item if
x[self.analysis_dict_split_col] == split_col]
if filter_col:
item = [x for x in item if
x[self.analysis_dict_filter_col] == filter_col]
if filter_val:
item = [x for x in item if
x[self.analysis_dict_filter_val] == filter_val]
return item
def write_analysis_dict(self):
with open(self.analysis_dict_file_name, 'w') as fp:
json.dump(self.analysis_dict, fp)
def do_all_analysis(self):
self.backup_files()
self.check_delivery(self.df)
self.check_plan_error(self.df)
self.generate_topline_and_weekly_metrics()
self.evaluate_on_kpis()
self.get_metrics_by_vendor_key()
self.find_missing_metrics()
self.flag_errant_metrics()
self.find_missing_serving()
self.find_missing_ad_rate()
for analysis_class in self.class_list:
analysis_class(self).do_analysis()
self.write_analysis_dict()
def load_old_raw_file_dict(self, new, cu):
old = None
if os.path.exists(self.analysis_dict_file_name):
try:
with open(self.analysis_dict_file_name, 'r') as f:
old = json.load(f)
except json.decoder.JSONDecodeError as e:
logging.warning('Json error assuming new sources: {}'.format(e))
if old:
old = self.find_in_analysis_dict(key=self.raw_file_update_col,
analysis_dict=old)
old = pd.DataFrame(old[0]['data'])
else:
logging.warning('No analysis dict assuming all new sources.')
old = new.copy()
old[cu.update_tier_col] = cu.update_tier_never
return old
def get_new_files(self):
cu = CheckRawFileUpdateTime(self)
cu.do_analysis()
new = self.find_in_analysis_dict(key=self.raw_file_update_col)
if not new:
logging.warning('Could not find update times.')
return False
new = pd.DataFrame(new[0]['data'])
old = self.load_old_raw_file_dict(new, cu)
if vmc.vendorkey not in old.columns:
logging.warning('Old df missing vendor key column.')
return []
df = new.merge(old, how='left', on=vmc.vendorkey)
df = df[df['{}_y'.format(cu.update_tier_col)] == cu.update_tier_never]
df = df[df['{}_x'.format(cu.update_tier_col)] != cu.update_tier_never]
new_sources = df[vmc.vendorkey].to_list()
return new_sources
def do_analysis_and_fix_processor(self, pre_run=False, first_run=False,
new_files=False):
new_file_check = []
if new_files:
new_file_check = self.get_new_files()
kwargs = {'only_new_files': new_files,
'new_file_list': new_file_check}
for analysis_class in self.class_list:
if analysis_class.fix:
is_pre_run = pre_run and analysis_class.pre_run
is_new_file = new_files and analysis_class.new_files
is_all_files = analysis_class.all_files
if new_files and is_all_files:
kwargs['only_new_files'] = False
kwargs['new_file_list'] = []
if is_pre_run or first_run or is_new_file:
analysis_class(self).do_and_fix_analysis(**kwargs)
self.matrix = vm.VendorMatrix(display_log=False)
return self.fixes_to_run
class AnalyzeBase(object):
name = ''
fix = False
pre_run = False
new_files = False
all_files = False
def __init__(self, analyze_class=None):
self.aly = analyze_class
self.matrix = self.aly.matrix
def do_analysis(self):
self.not_implemented_warning('do_analysis')
def fix_analysis(self, aly_dict, write=True):
self.not_implemented_warning('fix_analysis')
return None
def not_implemented_warning(self, func_name):
logging.warning('{} function not implemented for: {}'.format(
func_name, self.name))
def do_and_fix_analysis(self, only_new_files=False, new_file_list=None):
self.do_analysis()
aly_dict = self.aly.find_in_analysis_dict(self.name)
if (len(aly_dict) > 0 and 'data' in aly_dict[0]
and len(aly_dict[0]['data']) > 0):
aly_dict = aly_dict[0]['data']
if only_new_files:
df = pd.DataFrame(aly_dict)
df = df[df[vmc.vendorkey].isin(new_file_list)]
aly_dict = df.to_dict(orient='records')
self.aly.fixes_to_run = True
self.fix_analysis(pd.DataFrame(aly_dict))
def add_to_analysis_dict(self, df, msg):
self.aly.add_to_analysis_dict(
key_col=self.name, message=msg, data=df.to_dict())
class CheckAutoDictOrder(AnalyzeBase):
name = Analyze.change_auto_order
fix = True
new_files = True
@staticmethod
def get_vendor_list(col=dctc.VEN):
tc = dct.DictTranslationConfig()
tc.read(dctc.filename_tran_config)
ven_list = []
if dctc.DICT_COL_NAME not in tc.df.columns:
return ven_list
tdf = tc.df[tc.df[dctc.DICT_COL_NAME] == col]
for col in [dctc.DICT_COL_VALUE, dctc.DICT_COL_NVALUE]:
new_ven_list = tdf[col].unique().tolist()
ven_list = list(set(ven_list + new_ven_list))
ven_list = [x for x in ven_list if x not in ['nan', '0', 'None']]
return ven_list
def do_analysis_on_data_source(self, source, df, ven_list=None,
cou_list=None):
if vmc.autodicord not in source.p:
return df
if not ven_list:
ven_list = self.get_vendor_list()
if not cou_list:
cou_list = self.get_vendor_list(dctc.COU)
auto_dict_idx = (source.p[vmc.autodicord].index(dctc.VEN)
if dctc.VEN in source.p[vmc.autodicord] else None)
auto_order = source.p[vmc.autodicord]
if not auto_dict_idx or (len(auto_order) <= (auto_dict_idx + 1)):
return df
cou_after_ven = auto_order[auto_dict_idx + 1] == dctc.COU
if not cou_after_ven:
return df
tdf = source.get_raw_df()
if dctc.FPN not in tdf.columns or tdf.empty:
return df
auto_place = source.p[vmc.autodicplace]
if auto_place == dctc.PN:
auto_place = source.p[vmc.placement]
tdf = pd.DataFrame(tdf[auto_place].str.split('_').to_list())
max_idx = 0
max_val = 0
ven_counts = 0
for col in tdf.columns:
cou_counts = tdf[col].isin(cou_list).sum()
total = ven_counts + cou_counts
if total > max_val:
max_val = total
max_idx = col - 1
ven_counts = tdf[col].isin(ven_list).sum()
if auto_dict_idx and max_idx != auto_dict_idx and max_val > 0:
diff = auto_dict_idx - max_idx
if diff > 0:
new_order = auto_order[diff:]
else:
new_order = (diff * -1) * [dctc.MIS] + auto_order
data_dict = {vmc.vendorkey: source.key, self.name: new_order}
if df is None:
df = []
df.append(data_dict)
return df
def do_analysis(self):
data_sources = self.matrix.get_all_data_sources()
df = []
ven_list = self.get_vendor_list()
cou_list = self.get_vendor_list(dctc.COU)
for ds in data_sources:
df = self.do_analysis_on_data_source(ds, df, ven_list, cou_list)
df = pd.DataFrame(df)
if df.empty:
msg = 'No new proposed order.'
else:
msg = 'Proposed new order by key as follows:'
logging.info('{}\n{}'.format(msg, df.to_string()))
self.aly.add_to_analysis_dict(key_col=self.name,
message=msg, data=df.to_dict())
def fix_analysis_for_data_source(self, source_aly_dict, write=True):
vk = source_aly_dict[vmc.vendorkey]
new_order = '|'.join(source_aly_dict[self.name])
logging.info('Changing order for {} to {}'.format(vk, new_order))
data_source = self.aly.matrix.get_data_source(vk)
try:
os.remove(os.path.join(utl.dict_path,
data_source.p[vmc.filenamedict]))
except FileNotFoundError as e:
logging.warning('File not found error: {}'.format(e))
self.aly.matrix.vm_change_on_key(vk, vmc.autodicord, new_order)
if write:
self.aly.matrix.write()
def fix_analysis(self, aly_dict, write=True):
aly_dict = aly_dict.to_dict(orient='records')
for x in aly_dict:
self.fix_analysis_for_data_source(x, write=write)
if write:
self.aly.matrix.write()
return self.aly.matrix.vm_df
class CheckFirstRow(AnalyzeBase):
name = Analyze.blank_lines
fix = True
new_files = True
all_files = True
new_first_line = 'new_first_line'
def find_first_row(self, source, df):
"""
finds the first row in a raw file where any column in FPN appears
loops through only first 10 rows in case of major error
source -> an item from the VM
new_first_row -> row to be found
If first row is incorrect, returns a data frame containing:
vendor key and new_first_row
returns empty df otherwise
"""
l_df = df
if vmc.filename not in source.p:
return l_df
raw_file = source.p[vmc.filename]
place_cols = source.p[dctc.FPN]
place_cols = [s.strip('::') if s.startswith('::')
else s for s in place_cols]
old_first_row = int(source.p[vmc.firstrow])
df = utl.import_read_csv(raw_file, nrows=10)
if df.empty:
return l_df
for idx in range(len(df)):
tdf = utl.first_last_adj(df, idx, 0)
check = [x for x in place_cols if x in tdf.columns]
if check:
if idx == old_first_row:
break
new_first_row = str(idx)
data_dict = pd.DataFrame({vmc.vendorkey: [source.key],
self.new_first_line: [new_first_row]})
l_df = pd.concat([data_dict, l_df], ignore_index=True)
break
return l_df
def do_analysis(self):
data_sources = self.matrix.get_all_data_sources()
df = pd.DataFrame()
for source in data_sources:
df = self.find_first_row(source, df)
if df.empty:
msg = 'All first and last rows seem correct'
else:
msg = 'Suggested new row adjustments:'
logging.info('{}\n{}'.format(msg, df.to_string()))
self.aly.add_to_analysis_dict(key_col=self.name,
message=msg, data=df.to_dict())
def fix_analysis_for_data_source(self, source, write=True):
"""
Plugs in new first line from aly dict to the VM
source -> data source from aly dict (created from find_first_row)
"""
vk = source[vmc.vendorkey]
new_first_line = source[self.new_first_line]
if int(new_first_line) > 0:
logging.info('Changing {} {} to {}'.format(
vk, vmc.firstrow, new_first_line))
self.aly.matrix.vm_change_on_key(vk, vmc.firstrow, new_first_line)
if write:
self.aly.matrix.write()
self.matrix = vm.VendorMatrix(display_log=False)
def fix_analysis(self, aly_dict, write=True):
aly_dict = aly_dict.to_dict(orient='records')
for x in aly_dict:
self.fix_analysis_for_data_source(x, write=write)
if write:
self.aly.matrix.write()
self.matrix = vm.VendorMatrix(display_log=False)
return self.aly.matrix.vm_df
class CheckPackageCapping(AnalyzeBase):
name = Analyze.package_cap
cap_name = Analyze.cap_name
package_vendor_good = 'Package_Vendor_Good'
package_vendor_bad = 'Package_Vendor_Bad'
plan_net_temp = 'Planned Net Cost - TEMP'
net_cost_capped = 'Net Cost (Capped)'
pre_run = True
fix = False
def initialize_cap_file(self):
"""
gets Cap Config file and the file to be capped
df -> raw data appended with cap file (will later be grouped cleaner)
temp_package_cap -> the column name to be capped on, stated in cap file
c -> config file
pdf -> cap file data
cap_file -> MetricCap() object
"""
df = self.aly.df
df = cal.net_cost_calculation(df).reset_index(drop=True)
cap_file = cal.MetricCap()
df = cap_file.apply_all_caps(df, final_calculation=False)
temp_package_cap = cap_file.c[cap_file.proc_dim]
return df, temp_package_cap, cap_file.c, cap_file.pdf, cap_file
def check_package_cap(self, df, temp_package_cap):
"""
Checks if a package used for capping has reached or exceeded its cap
Prints to logfile
Make sure cap file exists, set as pdf and append to our dataframe
temp_package_cap -> column name we are capping on from raw file
'plan_net_temp -> how much we cap,taken from raw file
"""
cols = [temp_package_cap, self.plan_net_temp, vmc.cost]
missing_cols = [x for x in cols if x not in df.columns]
if any(missing_cols):
logging.warning('Missing columns: {}'.format(missing_cols))
return pd.DataFrame()
df = df[cols]
df = df.groupby([temp_package_cap])
df = df.apply(lambda x:
0 if x[self.plan_net_temp].sum() == 0
else x[vmc.cost].sum() / x[self.plan_net_temp].sum())
f_df = df[df >= 1]
if f_df.empty:
delivery_msg = 'No Packages have exceeded their cap'
logging.info(delivery_msg)
self.aly.add_to_analysis_dict(
key_col=self.cap_name,
param=self.aly.under_delivery_col,
message=delivery_msg)
return f_df
else:
del_p = f_df.apply(lambda x: "{0:.2f}%".format(x * 100))
delivery_msg = 'The following packages have delivered in full: '
logging.info('{}\n{}'.format(delivery_msg, del_p))
data = del_p.reset_index().rename(columns={0: 'Cap'})
self.aly.add_to_analysis_dict(
key_col=self.cap_name,
param=self.aly.full_delivery_col,
message=delivery_msg,
data=data.to_dict())
o_df = f_df[f_df > 1.5]
if not o_df.empty:
del_p = o_df.apply(lambda x:
"{0:.2f}%".format(x * 100))
delivery_msg = 'The following packages have over-delivered:'
logging.info('{}\n{}'.format(delivery_msg, del_p))
data = del_p.reset_index().rename(columns={0: 'Cap'})
self.aly.add_to_analysis_dict(
key_col=self.cap_name,
param=self.aly.over_delivery_col,
message=delivery_msg,
data=data.to_dict())
return data
def check_package_vendor(self, df, temp_package_cap, pdf):
"""
Warns if the package cap file will affect multiple vendors
creates dataframe grouped by cap and vendor
counts unique members,
if there are more vendors than there are caps, raise a warning
return df of packages with multiple vendors associated
"""
cols = [dctc.VEN, vmc.vendorkey, dctc.PN, temp_package_cap,
self.plan_net_temp, vmc.cost]
missing_cols = [x for x in cols if x not in df.columns]
if any(missing_cols) or df.empty:
logging.warning('Missing columns: {}'.format(missing_cols))
return pd.DataFrame()
df = df[cols]
try:
df = df.groupby([temp_package_cap, dctc.VEN])
except ValueError as e:
logging.warning('ValueError as follows: {}'.format(e))
return pd.DataFrame()
try:
df = df.size().reset_index(name='count')
except ValueError as e:
logging.warning('ValueError as follows: {}'.format(e))
return pd.DataFrame()
df = df[[temp_package_cap, dctc.VEN]]
if (temp_package_cap not in df.columns or
temp_package_cap not in pdf.columns):
return pd.DataFrame()
df = df[df[temp_package_cap].isin(pdf[temp_package_cap])]
df = df[df.duplicated(subset=temp_package_cap, keep=False)]
if not df.empty:
delivery_msg = ('One or more of the packages you are capping on is '
'associated with multiple vendors')
logging.warning('{}\n{}'.format(delivery_msg, df))
self.aly.add_to_analysis_dict(key_col=self.name,
param=self.aly.package_vendor_bad,
message=delivery_msg,
data=df.to_dict())
return df
else:
delivery_msg = "All packages are capping on a single vendor"
logging.info('{}\n{}'.format(delivery_msg, df))
self.aly.add_to_analysis_dict(key_col=self.name,
param=self.aly.package_vendor_good,
message=delivery_msg,
data=df.to_dict())
return df
def fix_package_vendor(self, temp_package_cap, c, pdf, cap_file,
write=None, aly_dict=None):
"""
Takes in capped packages that are associated with more than one vendor
Changes their names to be unique
Translates all instances in dictionaries to match
"""
df = aly_dict
if not df.empty:
t_df = pd.DataFrame({dctc.DICT_COL_NAME: [],
dctc.DICT_COL_VALUE: [],
dctc.DICT_COL_NVALUE: [],
dctc.DICT_COL_FNC: [],
dctc.DICT_COL_SEL: [], 'index': []})
t_df[dctc.DICT_COL_SEL] = df[dctc.VEN]
t_df[dctc.DICT_COL_NAME] = temp_package_cap
t_df[dctc.DICT_COL_VALUE] = df[temp_package_cap]
for temp_package_cap in df[[temp_package_cap]]:
df[temp_package_cap] = df[temp_package_cap] + '-' + df[dctc.VEN]
df[c[cap_file.file_metric]] = pdf[self.plan_net_temp]
df = df[[temp_package_cap, c[cap_file.file_metric]]]
df = df.fillna(0)
path = c[cap_file.file_name]
df = pd.concat([pdf, df])
df.to_csv(path, index=False, encoding='utf-8')
t_df[dctc.DICT_COL_NVALUE] = df[temp_package_cap].copy()
t_df[dctc.DICT_COL_FNC] = 'Select::mpVendor'
t_df = t_df[[dctc.DICT_COL_NAME, dctc.DICT_COL_VALUE,
dctc.DICT_COL_NVALUE, dctc.DICT_COL_FNC,
dctc.DICT_COL_SEL]]
if write:
translation = dct.DictTranslationConfig()
translation.read(dctc.filename_tran_config)
translation.df = pd.concat([translation.df, t_df])
translation.write(translation.df, dctc.filename_tran_config)
fix_msg = 'Automatically changing capped package names:'
logging.info('{}\n{}'.format(fix_msg, t_df))
return t_df
def do_analysis(self):
try:
df, temp_package_cap, c, pdf, cap_file = self.initialize_cap_file()
except TypeError:
logging.debug("cap config file is missing")
return None
except AttributeError:
logging.debug("one of the files may be empty")
return None
except KeyError:
logging.debug("mpPlacement name does not exist")
return None
self.check_package_cap(df, temp_package_cap)
self.check_package_vendor(df, temp_package_cap, pdf)
def fix_analysis(self, aly_dict, write=True):
try:
df, temp_package_cap, c, pdf, cap_file = self.initialize_cap_file()
except TypeError:
logging.debug("cap config file is missing")
return None
except AttributeError:
logging.debug("one of the files may be empty")
return None
except KeyError:
logging.debug("mpPlacement name does not exist")
return None
self.fix_package_vendor(temp_package_cap, c, pdf, cap_file,
write=write, aly_dict=aly_dict)
class FindPlacementNameCol(AnalyzeBase):
name = Analyze.placement_col
fix = True
new_files = True
@staticmethod
def do_analysis_on_data_source(source, df):
if vmc.filename not in source.p:
return pd.DataFrame()
file_name = source.p[vmc.filename]
first_row = source.p[vmc.firstrow]
transforms = str(source.p[vmc.transform]).split(':::')
transforms = [x for x in transforms if x.split('::')[0]
in ['FilterCol', 'MergeReplaceExclude']]
p_col = source.p[vmc.placement]
if os.path.exists(file_name):
tdf = source.get_raw_df(nrows=first_row + 3)
if tdf.empty and transforms:
tdf = source.get_raw_df()
tdf = tdf.drop([vmc.fullplacename], axis=1, errors='ignore')
if tdf.empty:
return df
tdf = tdf.applymap(
lambda x: str(x).count('_')).apply(lambda x: sum(x))
max_col = tdf.idxmax()
max_exists = max_col in tdf
p_exists = p_col in tdf
no_p_check = (not p_exists and max_exists)
p_check = (max_exists and p_exists and
tdf[max_col] >= (tdf[p_col] + 9)
and 75 <= tdf[max_col] <= 105)
if no_p_check or p_check:
data_dict = {vmc.vendorkey: source.key,
'Current Placement Col': p_col,
'Suggested Col': max_col}
df.append(data_dict)
return df
def do_analysis(self):
self.matrix = vm.VendorMatrix(display_log=False)
data_sources = self.matrix.get_all_data_sources()
df = []
for source in data_sources:
df = self.do_analysis_on_data_source(source, df)
df = pd.DataFrame(df)
if df.empty:
msg = ('Placement Name columns look correct. '
'No columns w/ more breakouts.')
logging.info('{}'.format(msg))
else:
msg = ('The following data sources have more breakouts in '
'another column. Consider changing placement name '
'source:')
logging.info('{}\n{}'.format(msg, df.to_string()))
self.aly.add_to_analysis_dict(key_col=self.name,
message=msg, data=df.to_dict())
def fix_analysis_for_data_source(self, source_aly_dict, write=True,
col=vmc.placement):
vk = source_aly_dict[vmc.vendorkey]
new_col = source_aly_dict['Suggested Col']
logging.info('Changing {} {} to {}'.format(vk, col, new_col))
self.aly.matrix.vm_change_on_key(vk, col, new_col)
if write:
self.aly.matrix.write()
def fix_analysis(self, aly_dict, write=True):
aly_dict = aly_dict.to_dict(orient='records')
for x in aly_dict:
self.fix_analysis_for_data_source(x, False)
if write:
self.aly.matrix.write()
return self.aly.matrix.vm_df
class CheckApiDateLength(AnalyzeBase):
"""Checks APIs for max date length and splits data sources if necessary."""
name = Analyze.max_api_length
fix = True
pre_run = True
def do_analysis(self):
"""
Loops through all data sources checking and flagging through those that
are too long. Those sources are added to a df and the analysis dict
"""
vk_list = []
data_sources = self.matrix.get_all_data_sources()
max_date_dict = {
vmc.api_amz_key: 60, vmc.api_szk_key: 60, vmc.api_db_key: 60,
vmc.api_tik_key: 30, vmc.api_ttd_key: 80, vmc.api_sc_key: 30,
vmc.api_amd_key: 30}
data_sources = [x for x in data_sources if 'API_' in x.key]
for ds in data_sources:
if 'API_' in ds.key:
key = ds.key.split('_')[1]
if key in max_date_dict.keys():
max_date = max_date_dict[key]
date_range = (ds.p[vmc.enddate] - ds.p[vmc.startdate]).days
if date_range > (max_date - 3):
vk_list.append(ds.key)
mdf = pd.DataFrame({vmc.vendorkey: vk_list})
mdf[self.name] = ''
if vk_list:
msg = 'The following APIs are within 3 days of their max length:'
logging.info('{}\n{}'.format(msg, vk_list))
mdf[self.name] = mdf[vmc.vendorkey].str.split(
'_').str[1].replace(max_date_dict)
else:
msg = 'No APIs within 3 days of max length.'
logging.info('{}'.format(msg))
self.add_to_analysis_dict(df=mdf, msg=msg)
def fix_analysis(self, aly_dict, write=True):
"""
Takes data sources that are too long and splits them based on date.
:param aly_dict: a df containing items to fix
:param write: boolean will write the vm as csv when true
:returns: the vm as a df
"""
tdf = aly_dict.to_dict(orient='records')
df = self.aly.matrix.vm_df
for x in tdf:
vk = x[vmc.vendorkey]
logging.info('Duplicating vendor key {}'.format(vk))
max_date_length = x[self.name]
ndf = df[df[vmc.vendorkey] == vk].reset_index(drop=True)
ndf = utl.data_to_type(ndf, date_col=[vmc.startdate])
new_sd = ndf[vmc.startdate][0] + dt.timedelta(
days=max_date_length - 3)
if new_sd.date() >= dt.datetime.today().date():
new_sd = dt.datetime.today() - dt.timedelta(days=3)
new_str_sd = new_sd.strftime('%Y-%m-%d')
ndf.loc[0, vmc.startdate] = new_str_sd
ndf.loc[0, vmc.enddate] = ''
new_vk = '{}_{}'.format(vk, new_str_sd)
ndf.loc[0, vmc.vendorkey] = new_vk
file_type = os.path.splitext(ndf[vmc.filename][0])[1].lower()
new_fn = '{}{}'.format(new_vk.replace('API_', '').lower(),
file_type)
ndf.loc[0, vmc.filename] = new_fn
idx = df[df[vmc.vendorkey] == vk].index
df.loc[idx, vmc.vendorkey] = df.loc[
idx, vmc.vendorkey][idx[0]].replace('API_', '')
old_ed = new_sd - dt.timedelta(days=1)
df.loc[idx, vmc.enddate] = old_ed.strftime('%Y-%m-%d')
df = pd.concat([df, ndf]).reset_index(drop=True)
self.aly.matrix.vm_df = df
if write:
self.aly.matrix.write()
return self.aly.matrix.vm_df
class CheckColumnNames(AnalyzeBase):
"""Checks raw data for column names and reassigns if necessary."""
name = Analyze.raw_columns
fix = True
new_files = True
all_files = True
def do_analysis(self):
"""
Loops through all data sources adds column names and flags if
missing active metrics.
"""
self.matrix = vm.VendorMatrix(display_log=False)
data_sources = self.matrix.get_all_data_sources()
data = []
for source in data_sources:
if vmc.firstrow not in source.p:
continue
first_row = source.p[vmc.firstrow]
transforms = str(source.p[vmc.transform]).split(':::')
transforms = [x for x in transforms if x.split('::')[0]
in ['FilterCol', 'MergeReplaceExclude']]
missing_cols = []
tdf = source.get_raw_df(nrows=first_row+5)
if tdf.empty and transforms:
tdf = source.get_raw_df()
cols = [str(x) for x in tdf.columns if str(x) != 'nan']
active_metrics = source.get_active_metrics()
active_metrics[vmc.placement] = [source.p[vmc.placement]]
for k, v in active_metrics.items():
for c in v:
if c not in cols:
missing_cols.append({k: c})
data_dict = {vmc.vendorkey: source.key, self.name: cols,
'missing': missing_cols}
data.append(data_dict)
df = pd.DataFrame(data)
df = df.fillna('')
update_msg = 'Columns and missing columns by key as follows:'
logging.info('{}\n{}'.format(update_msg, df))
self.add_to_analysis_dict(df=df, msg=update_msg)
def fix_analysis(self, aly_dict, write=True):
"""
Adjusts placement name and auto dict order of data sources when those
values are missing.
:param aly_dict: a df containing items to fix
:param write: boolean will write the vm as csv when true
:returns: the vm as a df
"""
aly_dicts = aly_dict.to_dict(orient='records')
self.matrix = vm.VendorMatrix(display_log=False)
df = self.aly.matrix.vm_df
aly_dicts = [x for x in aly_dicts
if x['missing'] and x[Analyze.raw_columns]]
for aly_dict in aly_dicts:
vk = aly_dict[vmc.vendorkey]
source = self.matrix.get_data_source(vk)
placement_missing = [x for x in aly_dict['missing'] if
vmc.placement in x.keys()]
if placement_missing:
logging.info('Placement name missing for {}. '
'Attempting to find.'.format(vk))
fnc = FindPlacementNameCol(self.aly)
tdf = fnc.do_analysis_on_data_source(source, [])
if tdf:
tdf = tdf[0]
for col in [vmc.placement, vmc.fullplacename]:
fnc.fix_analysis_for_data_source(tdf, True, col)
self.matrix = vm.VendorMatrix(display_log=False)
source = self.matrix.get_data_source(vk)
cad = CheckAutoDictOrder(self.aly)
tdf = cad.do_analysis_on_data_source(source, [])
tdf = pd.DataFrame(tdf)
if not tdf.empty:
tdf = tdf.to_dict(orient='records')[0]
cad.fix_analysis_for_data_source(tdf, True)
self.matrix = vm.VendorMatrix(display_log=False)
date_missing = [x for x in aly_dict['missing'] if
vmc.date in x.keys()]
if date_missing:
logging.info('Date col missing for {}. '
'Attempting to find.'.format(vk))
tdf = source.get_raw_df()
for col in tdf.columns:
try:
tdf[col] = utl.data_to_type(tdf[col].reset_index(),
date_col=[col])[col]
except:
tdf[col] = pd.NaT
date_col = (tdf.isnull().sum() * 100 / len(tdf)).idxmin()
logging.info('Changing {} date col to {} '.format(vk, date_col))
self.aly.matrix.vm_change_on_key(vk, vmc.date, date_col)
self.aly.matrix.write()
self.matrix = vm.VendorMatrix(display_log=False)
self.aly.matrix.vm_df = df
if write:
self.aly.matrix.write()
return self.aly.matrix.vm_df
class CheckFlatSpends(AnalyzeBase):
"""Checks for past flat packages reassigns placement date if necessary."""
name = Analyze.missing_flat
first_click_col = 'First Click Date'
error_col = 'Error'
missing_clicks_error = 'No Clicks'
placement_date_error = 'Incorrect Placement Date'
missing_rate_error = 'Missing Buy Rate'
fix = True
pre_run = True
def merge_first_click_date(self, df, tdf, groups):
df = df.merge(tdf.drop_duplicates(),
on=groups,
how='left', indicator=True)
df = df.drop(columns=['Clicks_y'])
df = df.rename(columns={vmc.date: self.first_click_col,
'Clicks_x': vmc.clicks})
df = df.astype({vmc.clicks: str})
df[dctc.PD] = df[dctc.PD].dt.strftime('%Y-%m-%d %H:%M:%S')
df[self.first_click_col] = df[
self.first_click_col].dt.strftime('%Y-%m-%d %H:%M:%S')
return df
def find_missing_flat_spend(self, df):
"""
Checks for flat packages w/ no attributed cost past placement date.
Sorts into missing clicks, no buy model, or wrong placement date.
"""
pn_groups = [dctc.VEN, dctc.COU, dctc.PN, dctc.PKD, dctc.PD, dctc.BM,
dctc.BR, vmc.date]
metrics = [cal.NCF, vmc.clicks]
metrics = [metric for metric in metrics if metric in df.columns]
df = self.aly.generate_df_table(pn_groups, metrics, sort=None,
data_filter=None, df=df)
df.reset_index(inplace=True)
if dctc.BM in df.columns:
df = df[(df[dctc.BM] == cal.BM_FLAT) |
(df[dctc.BM] == cal.BM_FLAT2)]
if not df.empty:
pk_groups = [dctc.VEN, dctc.COU, dctc.PKD]
tdf = df.groupby(pk_groups).sum(numeric_only=True)
tdf.reset_index(inplace=True)
tdf = tdf[tdf[cal.NCF] == 0]
df = df.merge(tdf[pk_groups], how='right')
if not df.empty:
pn_groups.remove(vmc.date)
tdf = df[df[vmc.clicks] > 0]
tdf = tdf.groupby(pn_groups).min()
tdf.reset_index(inplace=True)
if cal.NCF not in tdf:
return pd.DataFrame()
tdf = tdf.drop(columns=[cal.NCF])
tdf = utl.data_to_type(tdf, date_col=[dctc.PD, vmc.date])
df = df.groupby(pn_groups).sum(numeric_only=True)
df.reset_index(inplace=True)
df = utl.data_to_type(df, date_col=[dctc.PD])
df = self.merge_first_click_date(df, tdf, pn_groups)
df = utl.data_to_type(df, date_col=[dctc.PD])
rdf = df[df[dctc.BR] == 0]
if not rdf.empty:
rdf = rdf.drop(columns='_merge')
rdf[self.error_col] = self.missing_rate_error
df = df[df[dctc.PD] <= dt.datetime.today()]
if not df.empty:
cdf = df[df['_merge'] == 'both']
cdf = cdf.iloc[:, :-1]
cdf = cdf[cdf[self.first_click_col] != cdf[dctc.PD]]
cdf[self.error_col] = self.placement_date_error
ndf = df[df['_merge'] == 'left_only']
ndf = ndf.drop(columns=['_merge'])
ndf[self.error_col] = self.missing_clicks_error
df = pd.concat([cdf, rdf], ignore_index=True)
df = pd.concat([df, ndf], ignore_index=True)
df = df.reset_index(drop=True)
df = df.dropna(how='all')
df_cols = [x for x in df.columns if x != '_merge']
df = df[df_cols]
for col in df.columns:
try:
df[col] = df[col].fillna('')
except TypeError as e:
logging.warning('Error for {}: {}'.format(col, e))
df = utl.data_to_type(df, str_col=[dctc.PD, self.first_click_col])
return df
def do_analysis(self):
df = self.aly.df
rdf = self.find_missing_flat_spend(df)
if rdf.empty:
msg = ('All flat packages with clicks past their placement date '
'have associated net cost.')
logging.info('{}'.format(msg))
else:
msg = ('The following flat packages are not calculating net cost '
'for the following reasons:')
logging.info('{}\n{}'.format(msg, rdf.to_string()))
self.add_to_analysis_dict(df=rdf, msg=msg)
def fix_analysis(self, aly_dict, write=True):
"""
Translates flat packages w/ missing spends placement date to first w/
clicks.
:param aly_dict: a df containing items to fix
:param write: boolean will write the translational_dict as csv when true
:returns: the lines added to translational_dict
"""
if (aly_dict.empty or self.placement_date_error
not in aly_dict[self.error_col].values):
return pd.DataFrame()
translation = dct.DictTranslationConfig()
translation.read(dctc.filename_tran_config)
translation_df = translation.get()
aly_dicts = aly_dict.to_dict(orient='records')
tdf = pd.DataFrame(columns=translation_df.columns)
for aly_dict in aly_dicts:
if aly_dict[self.error_col] == self.placement_date_error:
old_val = aly_dict[dctc.PD].strip('00:00:00').strip()
new_val = aly_dict[
self.first_click_col].strip('00:00:00').strip()
try:
trans = [[dctc.PD, old_val, new_val,
'Select::' + dctc.PN,
aly_dict[dctc.PN]]]
row = pd.DataFrame(trans, columns=translation_df.columns)
tdf = pd.concat([tdf, row], ignore_index=True)
except ValueError:
trans = [[dctc.PD, old_val, new_val,
'Select::' + dctc.PN,
aly_dict[dctc.PN], 0]]
row = pd.DataFrame(trans, columns=translation_df.columns)
tdf = pd.concat([tdf, row], ignore_index=True)
translation_df = pd.concat([translation_df, tdf], ignore_index=True)
if write:
translation.write(translation_df, dctc.filename_tran_config)
return tdf
class CheckDoubleCounting(AnalyzeBase):
"""
Checks for double counting datasources.
If double counting all placements, removes metric from one of the
datasources.
"""
name = Analyze.double_counting_all
error_col = 'Error'
double_counting_all = 'All'
double_counting_partial = 'Partial'
tmp_col = 'temp'
metric_col = 'Metric'
total_placement_count = 'Total Num Placements'
num_duplicates = 'Num Duplicates'
fix = True
pre_run = True
def count_unique_placements(self, df, col):
df = df.groupby([dctc.VEN, vmc.vendorkey, dctc.PN]).size()
df = df.reset_index().rename(columns={0: self.tmp_col})
df = df.groupby([dctc.VEN, vmc.vendorkey]).size()
df = df.reset_index().rename(columns={0: col})
return df
def find_metric_double_counting(self, df):
rdf = pd.DataFrame()
groups = [dctc.VEN, vmc.vendorkey, dctc.PN, vmc.date]
metrics = [cal.NCF, vmc.impressions, vmc.clicks, vmc.video_plays,
vmc.views, vmc.views25, vmc.views50, vmc.views75,
vmc.views100]
metrics = [metric for metric in metrics if metric in df.columns]
df = self.aly.generate_df_table(groups, metrics, sort=None,
data_filter=None, df=df)
df.reset_index(inplace=True)
if df.empty:
return df
sdf = self.count_unique_placements(df, self.total_placement_count)
sdf = sdf.groupby(dctc.VEN).max().reset_index()
df = df[df.duplicated(subset=[dctc.VEN, dctc.PN, vmc.date], keep=False)]
if not df.empty:
for metric in metrics:
tdf = df[df[metric] > 0]
tdf = tdf[tdf.duplicated(
subset=[dctc.PN, vmc.date], keep=False)]
if not tdf.empty:
tdf = self.count_unique_placements(tdf, self.num_duplicates)
tdf[self.metric_col] = metric
rdf = pd.concat([rdf, tdf], ignore_index=True)
if not rdf.empty:
rdf = sdf[[dctc.VEN, self.total_placement_count]].merge(
rdf, how='inner', on=dctc.VEN)
rdf = rdf.groupby([dctc.VEN, self.metric_col,
self.total_placement_count,
self.num_duplicates])[vmc.vendorkey].apply(
lambda x: ','.join(x)).reset_index()
rdf = rdf.groupby([dctc.VEN, self.metric_col, vmc.vendorkey,
self.num_duplicates]).max().reset_index()
rdf[self.error_col] = np.where(
rdf[self.total_placement_count] == rdf[self.num_duplicates],
self.double_counting_all, self.double_counting_partial)
return rdf
def do_analysis(self):
df = self.aly.df
rdf = self.find_metric_double_counting(df)
if rdf.empty:
msg = ('No datasources are double counting placements for any '
'metric.')
logging.info('{}'.format(msg))
else:
msg = ('The following datasources are double counting the following'
' metrics on all or some placements:')
logging.info('{}\n{}'.format(msg, rdf.to_string()))
self.add_to_analysis_dict(df=rdf, msg=msg)
@staticmethod
def remove_metric(vm_df, vk, metric):
if metric == cal.NCF:
metric = vmc.cost
idx = vm_df[vm_df[vmc.vendorkey] == vk].index
vm_df.loc[idx, metric] = ''
logging.info('Removing {} from {}.'.format(metric, vk))
return vm_df
@staticmethod
def update_rule(vm_df, vk, metric, vendor, idx, query_str, metric_str):
if metric == cal.NCF:
metric = vmc.cost
if vendor not in str(vm_df.loc[idx, query_str].values):
vm_df.loc[idx, query_str] = (
vm_df.loc[idx, query_str][idx[0]] + ',' + vendor)
if not (metric in str(vm_df.loc[idx, metric_str].values)):
vm_df.loc[idx, metric_str] = (
vm_df.loc[idx, metric_str][idx[0]] +
'|' + metric)
logging.info('Adding rule for {} to remove {} {}.'.format(
vk, vendor, metric))
return vm_df
@staticmethod
def add_rule(vm_df, vk, rule_num, idx, metric, vendor):
if metric == cal.NCF:
metric = vmc.cost
metric_str = "_".join([utl.RULE_PREF, str(rule_num), utl.RULE_METRIC])
query_str = "_".join([utl.RULE_PREF, str(rule_num), utl.RULE_QUERY])
factor_str = "_".join([utl.RULE_PREF, str(rule_num), utl.RULE_FACTOR])
vm_df.loc[idx, factor_str] = 0.0
vm_df.loc[idx, metric_str] = ('POST' + '::' + metric)
vm_df.loc[idx, query_str] = (dctc.VEN + '::' + vendor)
logging.info('Adding rule for {} to remove ''{} {}.'.format(vk, vendor,
metric))
return vm_df
def fix_all(self, aly_dict):
aly_dict = aly_dict.sort_values(by=[dctc.VEN, self.metric_col])
metric_buckets = {
'ctr_metrics': [vmc.impressions, vmc.clicks],
'vtr_metrics': [
vmc.views25, vmc.views50, vmc.views75, vmc.views100],
'video_play_metrics': [vmc.video_plays, vmc.views],
'net_cost_metrics': [cal.NCF]
}
vm_df = self.aly.matrix.vm_df
logging.info('Attempting to remove double counting.')
for index, row in aly_dict.iterrows():
vks = row[vmc.vendorkey].split(',')
raw_vks = [x for x in vks if vmc.api_raw_key in x
or vmc.api_gs_key in x]
serve_vks = [x for x in vks if vmc.api_szk_key in x
or vmc.api_dc_key in x]
first_empty = None
added = False
bucket = [k for k, v in metric_buckets.items()
if row[self.metric_col] in v]
if not bucket:
bucket = row[self.metric_col]
else:
bucket = bucket[0]
for vk in raw_vks:
if len(vks) > 1:
vm_df = self.remove_metric(vm_df, vk, row[self.metric_col])
vks.remove(vk)
for vk in serve_vks:
if len(vks) > 1:
idx = vm_df[vm_df[vmc.vendorkey] == vk].index
for i in range(1, 7):
metric_str = "_".join(
[utl.RULE_PREF, str(i), utl.RULE_METRIC])
query_str = "_".join(
[utl.RULE_PREF, str(i), utl.RULE_QUERY])
if ([x for x in metric_buckets[bucket]
if x in str(vm_df.loc[idx, metric_str].values)]):
vm_df = self.update_rule(
vm_df, vk, row[self.metric_col],
row[dctc.VEN], idx, query_str, metric_str)
added = True
break
if not vm_df.loc[idx, query_str].any():
if not first_empty:
first_empty = i
continue
if not added:
if first_empty:
self.add_rule(vm_df, vk, first_empty, idx,
row[self.metric_col], row[dctc.VEN])
else:
logging.warning('No empty rules for {}. Could not '
'auto-fix double counting.'
.format(vk))
vks.remove(vk)
self.aly.matrix.vm_df = vm_df
return vm_df
def fix_analysis(self, aly_dict, write=True):
"""
Removes duplicate metrics if all placements duplicated.
Prioritizes removal from rawfiles first, adservers otherwise.
:param aly_dict: a df containing items to fix
:param write: boolean will write the vendormatrix as csv when true
:returns: the vendormatrix as a df
"""
if aly_dict.empty:
return pd.DataFrame()
self.fix_all(
aly_dict[aly_dict[self.error_col] == self.double_counting_all])
if write:
self.aly.matrix.write()
return self.aly.matrix.vm_df
class GetPacingAnalysis(AnalyzeBase):
name = Analyze.delivery_comp_col
fix = False
pre_run = False
delivery_col = 'Delivery'
proj_completion_col = 'Projected Full Delivery'
pacing_goal_col = '% Through Campaign'
@staticmethod
def get_rolling_mean_df(df, value_col, group_cols):
"""
Gets rolling means to project delivery from
:param df: a df containing dates and desired values/groups
:param value_col: values to calculate rolling means of
:param group_cols: column breakouts to base rolling means on
:returns: df w/ groups cols, value_cols, and 3,7,30 day rolling means
"""
if df.empty:
logging.warning('Dataframe empty, could not get rolling mean.')
return df
pdf = pd.pivot_table(df, index=vmc.date, columns=group_cols,
values=value_col, aggfunc=np.sum)
if len(pdf.columns) > 10000:
logging.warning('Maximum 10K combos for calculation, data set '
'has {}'.format(len(pdf.columns)))
return pd.DataFrame()
df = pdf.unstack().reset_index().rename(columns={0: value_col})
for x in [3, 7, 30]:
ndf = pdf.rolling(
window=x, min_periods=1).mean().unstack().reset_index().rename(
columns={0: '{} rolling {}'.format(value_col, x)})
df = df.merge(ndf, on=group_cols + [vmc.date])
return df
def project_delivery_completion(self, df, average_df, plan_names,
final_cols):
"""
Use rolling means to project delivery completion date.
:param df: df where planned costs greater than net
:param average_df: return df from get_rolling_mean_df
:param plan_names: planned net full placement columns
:param final_cols: desired columns in final df
:returns: original df w/ added projected completion column
"""
df = df.merge(average_df, how='left', on=plan_names)
df['days'] = (df[dctc.PNC] - df[vmc.cost]) / df[
'{} rolling {}'.format(vmc.cost, 3)]
df['days'] = df['days'].replace(
[np.inf, -np.inf], np.nan).fillna(10000)
df['days'] = np.where(df['days'] > 10000, 10000, df['days'])
df[self.proj_completion_col] = pd.to_datetime(
df[vmc.date]) + pd.to_timedelta(
np.ceil(df['days']).astype(int), unit='D')
no_date_map = ((df[self.proj_completion_col] >
dt.datetime.today() + dt.timedelta(days=365)) |
(df[self.proj_completion_col] <
dt.datetime.today() - dt.timedelta(days=365)))
df[self.proj_completion_col] = df[
self.proj_completion_col].dt.strftime('%Y-%m-%d')
df.loc[
no_date_map, self.proj_completion_col] = 'Greater than 1 Year'
df[self.proj_completion_col] = df[
self.proj_completion_col].replace(
[np.inf, -np.inf, np.datetime64('NaT'), 'NaT'], np.nan
).fillna('Greater than 1 Year')
df = df[final_cols]
return df
def get_actual_delivery(self, df):
"""
Calculate delivery metrics
:param df: df w/ topline planned and actual spend metrics
:returns: original df w/ delivery and pacing metrics
"""
df[self.delivery_col] = (df[vmc.cost] / df[dctc.PNC] * 100).round(2)
df[self.delivery_col] = df[self.delivery_col].replace(
[np.inf, -np.inf], np.nan).fillna(0)
df[self.delivery_col] = df[self.delivery_col].astype(str) + '%'
df[self.pacing_goal_col] = ((pd.Timestamp.today(None) - df[dctc.SD]
) / (df[dctc.ED] - df[dctc.SD])
* 100).round(2)
df[self.pacing_goal_col] = np.where(
df[self.pacing_goal_col] > 100, 100, df[self.pacing_goal_col])
df[self.pacing_goal_col] = df[self.pacing_goal_col].replace(
[np.inf, -np.inf], np.nan).fillna(0)
df[self.pacing_goal_col] = df[self.pacing_goal_col].astype(str) + '%'
return df
def get_pacing_analysis(self, df):
"""
Calculate topline level pacing data for use in pacing table and alerts.
:param df: full output df
"""
if df.empty:
logging.warning('Dataframe empty could not get pacing analysis.')
return pd.DataFrame()
plan_names = self.matrix.vendor_set(vm.plan_key)[vmc.fullplacename]
average_df = self.get_rolling_mean_df(
df=df, value_col=vmc.cost, group_cols=plan_names)
if average_df.empty:
msg = ('Average df empty, maybe too large. '
'Could not project delivery completion.')
logging.warning(msg)
self.aly.add_to_analysis_dict(key_col=self.aly.delivery_comp_col,
message=msg)
return pd.DataFrame()
last_date = dt.datetime.strftime(
dt.datetime.today() - dt.timedelta(days=1), '%Y-%m-%d')
average_df = average_df[average_df[vmc.date] == last_date]
average_df = average_df.drop(columns=[vmc.cost])
start_dates, end_dates = self.aly.get_start_end_dates(
df, plan_names)
cols = [vmc.cost, dctc.PNC, vmc.AD_COST]
missing_cols = [x for x in cols if x not in df.columns]
if missing_cols:
logging.warning('Missing columns: {}'.format(missing_cols))
return pd.DataFrame()
df = df.groupby(plan_names)[cols].sum()
df = df.reset_index()
df = df[(df[vmc.cost] > 0) | (df[dctc.PNC] > 0)]
tdf = df[df[dctc.PNC] > df[vmc.cost]]
df = df[df[dctc.PNC] <= df[vmc.cost]]
final_cols = (plan_names + [dctc.PNC] + [vmc.cost] + [vmc.AD_COST] +
[self.proj_completion_col])
if not tdf.empty:
tdf = self.project_delivery_completion(
tdf, average_df, plan_names, final_cols)
if not df.empty:
df[self.proj_completion_col] = [
'No Planned' if x == 0 else 'Delivered' for x in df[dctc.PNC]]
df = df[final_cols]
if not tdf.empty:
tdf = tdf.merge(
df, how='outer', on=final_cols)
else:
tdf = df
over_delv = self.aly.find_in_analysis_dict(
self.delivery_col, param=self.aly.over_delivery_col)
if over_delv:
df = pd.DataFrame(over_delv[0]['data'])
tdf = tdf.merge(
df[plan_names], on=plan_names, how='left', indicator=True)
tdf[self.proj_completion_col] = [
'Over Delivered' if tdf['_merge'][x] == 'both'
else tdf[self.proj_completion_col][x] for x in tdf.index]
tdf = tdf.drop(columns=['_merge'])
tdf = tdf.merge(start_dates, how='left', on=plan_names)
tdf = tdf.merge(end_dates, how='left', on=plan_names)
tdf = self.get_actual_delivery(tdf)
final_cols = (plan_names + [dctc.SD] + [dctc.ED] + [vmc.cost] +
[dctc.PNC] + [self.delivery_col] +
[self.proj_completion_col] + [self.pacing_goal_col] +
[vmc.AD_COST])
final_cols = [x for x in final_cols if x in tdf.columns]
tdf = tdf[final_cols]
tdf = tdf.replace([np.inf, -np.inf], np.nan).fillna(0)
tdf = utl.data_to_type(
tdf, float_col=[vmc.cost, dctc.PNC, vmc.AD_COST])
for col in [dctc.PNC, vmc.cost, vmc.AD_COST]:
tdf[col] = '$' + tdf[col].round(2).astype(str)
for col in [dctc.SD, dctc.ED]:
tdf[col] = [str(0) if x == 0
else str(pd.to_datetime(x).date()) for x in tdf[col]]
return tdf
def do_analysis(self):
df = self.aly.df
df = self.get_pacing_analysis(df)
if df.empty:
msg = 'Could not calculate pacing data.'
logging.info('{}'.format(msg))
else:
msg = ('Projected delivery completion and current pacing '
'is as follows:')
logging.info('{}\n{}'.format(msg, df.to_string()))
self.aly.add_to_analysis_dict(key_col=self.aly.delivery_comp_col,
message=msg, data=df.to_dict())
class GetDailyDelivery(AnalyzeBase):
name = Analyze.placement_col
fix = False
pre_run = False
num_days = 'Num Days'
daily_spend_goal = 'Daily Spend Goal'
day_pacing = 'Day Pacing'
def get_daily_delivery(self, df):
"""
Get daily delivery data for each unique planned net level breakout
:param df: full output df
"""
daily_dfs = []
if df.empty:
logging.warning('Dataframe empty cannot get daily delivery')
return daily_dfs
plan_names = self.matrix.vendor_set(vm.plan_key)[vmc.fullplacename]
start_dates, end_dates = self.aly.get_start_end_dates(df, plan_names)
pdf_cols = plan_names + [dctc.PNC, dctc.UNC]
pdf = self.matrix.vendor_get(vm.plan_key)
pdf = pdf[pdf_cols]
groups = plan_names + [vmc.date]
metrics = [cal.NCF]
df = df.groupby(groups)[metrics].sum().reset_index()
df = utl.data_to_type(df, date_col=[vmc.date])
unique_breakouts = df.groupby(plan_names).first().reset_index()
unique_breakouts = unique_breakouts[plan_names]
sort_ascending = [True for _ in plan_names]
sort_ascending.append(False)
for index, row in unique_breakouts.iterrows():
tdf = df
for x in plan_names:
tdf = tdf[tdf[x] == row[x]]
tdf = tdf.merge(start_dates, how='left', on=plan_names)
tdf = tdf.merge(end_dates, how='left', on=plan_names)
tdf = tdf.merge(pdf, how='left', on=plan_names)
tdf = utl.data_to_type(tdf, float_col=[dctc.PNC])
tdf[self.num_days] = (tdf[dctc.ED] - tdf[dctc.SD]).dt.days
tdf = tdf.replace([np.inf, -np.inf], np.nan).fillna(0)
if tdf[self.num_days][0] == 0 or tdf[dctc.PNC][0] == 0:
tdf[self.daily_spend_goal] = 0
tdf[self.day_pacing] = '0%'
else:
daily_spend_goal = (tdf[dctc.PNC][0] / tdf[self.num_days][0])
stop_date = (tdf[dctc.SD][0] +
dt.timedelta(days=int(tdf[self.num_days][0])))
tdf[self.daily_spend_goal] = [daily_spend_goal if
(tdf[dctc.SD][0] <= x <= stop_date
) else 0 for x in tdf[vmc.date]]
tdf[self.day_pacing] = (
((tdf[cal.NCF] / tdf[self.daily_spend_goal]) - 1) * 100)
tdf[self.day_pacing] = tdf[self.day_pacing].replace(
[np.inf, -np.inf], np.nan).fillna(0.0)
tdf[self.day_pacing] = (
tdf[self.day_pacing].round(2).astype(str) + '%')
tdf = tdf.sort_values(
plan_names + [vmc.date], ascending=sort_ascending)
tdf[[dctc.SD, dctc.ED, vmc.date]] = tdf[
[dctc.SD, dctc.ED, vmc.date]].astype(str)
tdf = tdf.reset_index(drop=True)
daily_dfs.append(tdf.to_dict())
return daily_dfs
def do_analysis(self):
df = self.aly.df
dfs = self.get_daily_delivery(df)
msg = 'Daily delivery is as follows:'
self.aly.add_to_analysis_dict(key_col=self.aly.daily_delivery_col,
message=msg, data=dfs)
class GetServingAlerts(AnalyzeBase):
name = Analyze.placement_col
fix = False
pre_run = False
adserving_ratio = 'Adserving %'
prog_vendors = ['DV360', 'dv360', 'DV 360', 'Verizon', 'VERIZON']
def get_serving_alerts(self):
"""
Check for adserving overages -- over 6% of net cost (> 2 stddevs)
"""
pacing_analysis = self.aly.find_in_analysis_dict(
self.aly.delivery_comp_col)[0]
df = pd.DataFrame(pacing_analysis['data'])
plan_names = self.aly.get_plan_names()
if not plan_names:
return pd.DataFrame()
final_cols = plan_names + [vmc.cost, vmc.AD_COST, self.adserving_ratio]
if not df.empty and dctc.VEN in df:
df = utl.data_to_type(df, float_col=[vmc.cost, vmc.AD_COST])
df[self.adserving_ratio] = df.apply(
lambda row: 0 if row[vmc.cost] == 0
else (row[vmc.AD_COST] / row[vmc.cost]) * 100, axis=1)
df = df[(df[self.adserving_ratio] > 9) |
((df[self.adserving_ratio] > 6) &
~(df[dctc.VEN].isin(self.prog_vendors)))]
if not df.empty:
df[[vmc.cost, vmc.AD_COST]] = (
'$' + df[[vmc.cost, vmc.AD_COST]].round(2).astype(str))
df[self.adserving_ratio] = (
df[self.adserving_ratio].round(2).astype(str) + "%")
df = df[final_cols]
return df
def do_analysis(self):
df = self.get_serving_alerts()
if df.empty:
msg = 'No significant adserving overages.'
logging.info('{}\n{}'.format(msg, df))
else:
msg = 'Adserving cost significantly OVER for the following: '
logging.info('{}\n{}'.format(msg, df))
self.aly.add_to_analysis_dict(key_col=self.aly.adserving_alert,
message=msg, data=df.to_dict())
class CheckRawFileUpdateTime(AnalyzeBase):
name = Analyze.raw_file_update_col
update_tier_today = 'Today'
update_tier_week = 'Within A Week'
update_tier_greater_week = 'Greater Than One Week'
update_tier_never = 'Never'
update_time_col = 'update_time'
update_tier_col = 'update_tier'
last_update_does_not_exist = 'Does Not Exist'
def do_analysis(self):
data_sources = self.matrix.get_all_data_sources()
df = []
for source in data_sources:
if vmc.filename not in source.p:
continue
file_name = source.p[vmc.filename]
if os.path.exists(file_name):
t = os.path.getmtime(file_name)
last_update = dt.datetime.fromtimestamp(t)
if last_update.date() == dt.datetime.today().date():
update_tier = self.update_tier_today
elif last_update.date() > (
dt.datetime.today() - dt.timedelta(days=7)).date():
update_tier = self.update_tier_week
else:
update_tier = self.update_tier_greater_week
else:
last_update = self.last_update_does_not_exist
update_tier = self.update_tier_never
data_dict = {vmc.vendorkey: source.key,
self.update_time_col: last_update,
self.update_tier_col: update_tier}
df.append(data_dict)
df = pd.DataFrame(df)
if df.empty:
return False
df[self.update_time_col] = df[self.update_time_col].astype('U')
update_msg = 'Raw File update times and tiers are as follows:'
logging.info('{}\n{}'.format(update_msg, df.to_string()))
self.aly.add_to_analysis_dict(key_col=self.name,
message=update_msg, data=df.to_dict())
class GetDailyPacingAlerts(AnalyzeBase):
name = Analyze.placement_col
fix = False
pre_run = False
day_pacing = 'Day Pacing'
def get_daily_pacing_alerts(self):
"""
Check daily pacing issues -- +/- 20% of daily pacing goal
"""
dfs_dict = self.aly.find_in_analysis_dict(
self.aly.daily_delivery_col)[0]['data']
if not dfs_dict:
logging.warning('Dataframes empty could not get alerts')
return pd.DataFrame(), pd.DataFrame()
yesterday = dt.datetime.strftime(
dt.datetime.today() - dt.timedelta(days=1), '%Y-%m-%d')
over_df = pd.DataFrame(columns=pd.DataFrame(dfs_dict[0]).columns)
under_df = pd.DataFrame(columns=pd.DataFrame(dfs_dict[0]).columns)
for data in dfs_dict:
df = pd.DataFrame(data)
if not df.empty:
df = df[df[vmc.date] == yesterday]
if not df.empty:
val = df[self.day_pacing].iloc[0]
val = float(val.replace("%", ""))
if val >= 20:
over_df = pd.concat([over_df, df], ignore_index=True)
if val <= -20:
df[self.day_pacing].iloc[0] = (
df[self.day_pacing].iloc[0].replace("-", ""))
under_df = pd.concat([under_df, df], ignore_index=True)
return over_df, under_df
def do_analysis(self):
over_df, under_df = self.get_daily_pacing_alerts()
if over_df.empty:
msg = 'No significant daily pacing overages.'
logging.info('{}\n{}'.format(msg, over_df))
else:
msg = ('Yesterday\'s spend for the following exceeded '
'daily pacing goal by:')
logging.info('{}\n{}'.format(msg, over_df))
self.aly.add_to_analysis_dict(
key_col=self.aly.daily_pacing_alert, message=msg,
param=self.aly.over_daily_pace, data=over_df.to_dict())
if under_df.empty:
msg = 'No significant daily under pacing.'
logging.info('{}\n{}'.format(msg, under_df))
else:
msg = ('Yesterday\'s spend for the following under paced the '
'daily goal by:')
logging.info('{}\n{}'.format(msg, under_df))
self.aly.add_to_analysis_dict(
key_col=self.aly.daily_pacing_alert, message=msg,
param=self.aly.under_daily_pace, data=under_df.to_dict())
class ValueCalc(object):
file_name = os.path.join(utl.config_path, 'aly_grouped_metrics.csv')
metric_name = 'Metric Name'
formula = 'Formula'
operations = {'+': operator.add, '-': operator.sub, '/': operator.truediv,
'*': operator.mul, '%': operator.mod, '^': operator.xor}
def __init__(self):
self.calculations = self.get_grouped_metrics()
self.metric_names = [self.calculations[x][self.metric_name]
for x in self.calculations]
self.parse_formulas()
@staticmethod
def get_default_metrics():
metric_names = ['CTR', 'CPC', 'CPA', 'CPLP', 'CPBC', 'View to 100',
'CPCV', 'CPLPV', 'CPP', 'CPM', 'VCR', 'CPV']
formula = ['Clicks/Impressions', 'Net Cost Final/Clicks',
'Net Cost Final/Conv1_CPA', 'Net Cost Final/Landing Page',
'Net Cost Final/Button Click', 'Video Views 100/Video Views',
'Net Cost Final/Video Views 100',
'Net Cost Final/Landing Page', 'Net Cost Final/Purchase',
'Net Cost Final/Impressions', 'Video Views 100/Video Views',
'Net Cost Final/Video Views']
df = pd.DataFrame({'Metric Name': metric_names, 'Formula': formula})
return df
def get_grouped_metrics(self):
if os.path.isfile(self.file_name):
df = pd.read_csv(self.file_name)
else:
df = self.get_default_metrics()
calculations = df.to_dict(orient='index')
return calculations
def parse_formulas(self):
for gm in self.calculations:
formula = self.calculations[gm][self.formula]
reg_operators = '([' + ''.join(self.operations.keys()) + '])'
formula = re.split(reg_operators, formula)
self.calculations[gm][self.formula] = formula
def get_metric_formula(self, metric_name):
f = [self.calculations[x][self.formula] for x in self.calculations if
self.calculations[x][self.metric_name] == metric_name][0]
return f
def calculate_all_metrics(self, metric_names, df=None, db_translate=False):
for metric_name in metric_names:
if metric_name in self.metric_names:
df = self.calculate_metric(metric_name, df,
db_translate=db_translate)
return df
def calculate_metric(self, metric_name, df=None, db_translate=False):
col = metric_name
formula = self.get_metric_formula(metric_name)
current_op = None
if db_translate:
formula = list(utl.db_df_translation(formula).values())
for item in formula:
if item.lower() == 'impressions' and 'Clicks' not in formula:
df[item] = df[item] / 1000
if current_op:
if col in df and item in df:
df[col] = self.operations[current_op](df[col], df[item])
current_op = None
else:
logging.warning('{} missing could not calc.'.format(item))
return df
elif item in self.operations:
current_op = item
else:
if item not in df.columns:
df[item] = 0
df[col] = df[item]
return df
class AliChat(object):
openai_found = 'Here is the openai gpt response: '
openai_msg = 'I had trouble understanding but the openai gpt response is:'
found_model_msg = 'Here are some links:'
create_success_msg = 'The object has been successfully created. '
def __init__(self, config_name='openai.json', config_path='reporting'):
self.config_name = config_name
self.config_path = config_path
self.db = None
self.current_user = None
self.config = self.load_config(self.config_name, self.config_path)
@staticmethod
def load_config(config_name='openai.json', config_path='reporting'):
file_name = os.path.join(config_path, config_name)
try:
with open(file_name, 'r') as f:
config = json.load(f)
except IOError:
logging.error('{} not found.'.format(file_name))
return config
def get_openai_response(self, message):
openai.api_key = self.config['SECRET_KEY']
prompt = f"User: {message}\nAI:"
response = openai.Completion.create(
engine="text-davinci-002",
prompt=prompt,
max_tokens=1024,
n=1,
stop=None,
temperature=0.5,
)
return response.choices[0].text.strip()
@staticmethod
def index_db_model_by_word(db_model):
word_idx = {}
db_all = db_model.query.all()
for obj in db_all:
words = utl.lower_words_from_str(obj.name)
for word in words:
if word in word_idx:
word_idx[word].append(obj.id)
else:
word_idx[word] = [obj.id]
return word_idx
@staticmethod
def convert_model_ids_to_message(db_model, model_ids, message='',
html_table=False, table_name=''):
message = message + '<br>'
html_response = ''
for idx, model_id in enumerate(model_ids):
obj = db_model.query.get(model_id)
if obj:
html_response += """
{}. <a href="{}" target="_blank">{}</a><br>
""".format(idx + 1, obj.get_url(), obj.name)
if html_table:
table_elem = obj.get_table_elem(table_name)
html_response += '<br>{}'.format(table_elem)
return message, html_response
@staticmethod
def check_db_model_table(db_model, words, model_ids):
table_response = ''
tables = [x for x in db_model.get_table_name_to_task_dict().keys()]
cur_model = db_model.query.get(next(iter(model_ids)))
cur_model_name = re.split(r'[_\s]|(?<=[a-z])(?=[A-Z])', cur_model.name)
cur_model_name = [x.lower() for x in cur_model_name]
for table in tables:
t_list = re.split(r'[_\s]|(?<=[a-z])(?=[A-Z])', table)
t_list = [x.lower() for x in t_list]
model_name = db_model.get_model_name_list()
table_match = [
x for x in t_list if
x.lower() in words and
x.lower() not in model_name and
x.lower() not in cur_model_name]
if table_match:
table_response = table
break
return table_response
def find_db_model(self, db_model, message):
word_idx = self.index_db_model_by_word(db_model)
nltk.download('stopwords')
stop_words = list(nltk.corpus.stopwords.words('english'))
words = utl.lower_words_from_str(message)
words = [x for x in words if
x not in db_model.get_model_name_list() + stop_words]
model_ids = {}
for word in words:
if word in word_idx:
new_model_ids = word_idx[word]
for new_model_id in new_model_ids:
if new_model_id in model_ids:
model_ids[new_model_id] += 1
else:
model_ids[new_model_id] = 1
if model_ids:
max_value = max(model_ids.values())
model_ids = {k: v for k, v in model_ids.items() if v == max_value}
return model_ids, words
def search_db_models(self, db_model, message, response, html_response):
model_ids, words = self.find_db_model(db_model, message)
if model_ids:
table_name = self.check_db_model_table(db_model, words, model_ids)
edit_made = self.edit_db_model(db_model, words, model_ids)
table_bool = True if table_name else False
response = self.found_model_msg
if edit_made:
response = '{}<br>{}'.format(edit_made, self.found_model_msg)
response, html_response = self.convert_model_ids_to_message(
db_model, model_ids, response, table_bool, table_name)
return response, html_response
@staticmethod
def db_model_name_in_message(message, db_model):
words = utl.lower_words_from_str(message)
db_model_name = db_model.get_model_name_list()
in_message = utl.is_list_in_list(db_model_name, words, True)
return in_message
@staticmethod
def get_parent_for_db_model(db_model, words):
parent = db_model.get_parent()
g_parent = parent.get_parent()
gg_parent = g_parent.get_parent()
prev_model = None
for parent_model in [gg_parent, g_parent, parent]:
model_name_list = parent_model.get_model_name_list()
name = utl.get_next_value_from_list(words, model_name_list)
if not name:
name_list = parent_model.get_name_list()
name = utl.get_dict_values_from_list(words, name_list, True)
if name:
name = [name[0][next(iter(name[0]))]]
else:
name = parent_model.get_default_name()
new_model = parent_model()
new_model.set_from_form({'name': name[0]}, prev_model)
new_model = new_model.check_and_add()
prev_model = new_model
return prev_model
@staticmethod
def check_children(words, new_g_child):
new_model = new_g_child.get_children()
if new_model:
new_model.check_col_in_words(new_model, words, new_g_child.id)
new_model.create_from_rules(new_model, new_g_child.id)
def create_db_model_children(self, cur_model, words):
response = ''
cur_children = cur_model.get_current_children()
db_model_child = cur_model.get_children()
child_list = db_model_child.get_name_list()
child_name = [x for x in words if x in child_list]
if not child_name and not cur_children:
child_name = child_list
if child_name:
new_child = db_model_child()
new_child.set_from_form({'name': child_name[0]}, cur_model)
self.db.session.add(new_child)
self.db.session.commit()
msg = 'The {} is named {}. '.format(
db_model_child.__name__, child_name[0])
response += msg
else:
new_child = [x for x in cur_children if x.name in words]
if not new_child:
new_child = cur_children
new_child = new_child[0]
db_model_g_child = new_child.get_children()
partner_list, partner_type_list = db_model_g_child.get_name_list()
p_list = utl.get_dict_values_from_list(words, partner_list, True)
if p_list:
response += '{}(s) added '.format(db_model_g_child.__name__)
for g_child in p_list:
g_child_name = g_child[next(iter(g_child))]
lower_name = g_child_name.lower()
post_words = words[words.index(lower_name):]
cost = [x for x in post_words if
any(y.isdigit() for y in x) and x != cur_model.name]
if cost:
cost = cost[0].replace('k', '000')
else:
cost = 0
g_child['total_budget'] = cost
new_g_child = db_model_g_child()
new_g_child.set_from_form(g_child, new_child)
self.db.session.add(new_g_child)
self.db.session.commit()
response += '{} ({}) '.format(g_child_name, cost)
self.check_children(words, new_g_child)
return response
def create_db_model(self, db_model, message, response, html_response):
create_words = ['create', 'make', 'new']
words = utl.lower_words_from_str(message)
is_create = utl.is_list_in_list(create_words, words)
if is_create:
name_words = ['named', 'called', 'name', 'title']
name = utl.get_next_value_from_list(words, name_words)
if not name:
name = [self.current_user.username]
parent_model = self.get_parent_for_db_model(db_model, words)
new_model = db_model()
name = new_model.get_first_unique_name(name[0])
new_model.set_from_form({'name': name}, parent_model,
self.current_user.id)
self.db.session.add(new_model)
self.db.session.commit()
response = self.create_db_model_children(new_model, words)
response = '{}{}'.format(self.create_success_msg, response)
response, html_response = self.convert_model_ids_to_message(
db_model, [new_model.id], response, True)
return response, html_response
def check_db_model_col(self, db_model, words, cur_model, omit_list=None):
response = ''
if not omit_list:
omit_list = []
for k, v in db_model.__dict__.items():
check_words = re.split(r'[_\s]|(?<=[a-z])(?=[A-Z])', k)
check_words = [x for x in check_words if x]
in_list = utl.is_list_in_list(check_words, words, True, True)
if in_list:
pw = words[words.index(in_list[0]) + 1:]
skip_words = [cur_model.name.lower()] + omit_list
pw = [x for x in pw if x not in skip_words]
new_val = re.split('[?.,]', ' '.join(pw))[0].rstrip()
setattr(cur_model, k, new_val)
self.db.session.commit()
response = 'The {} for {} was changed to {}'.format(
k, cur_model.name, new_val)
break
return response
def check_children_for_edit(self, cur_model, words):
response = ''
children = cur_model.get_current_children()
omit_list = [cur_model.name]
for child in children:
lower_name = child.name.lower()
in_words = utl.is_list_in_list([lower_name], words, True)
if in_words:
response = self.check_db_model_col(
child, words, child, omit_list)
break
else:
g_children = child.get_current_children()
omit_list.append(lower_name)
for g_child in g_children:
lower_name = g_child.name.lower()
in_words = utl.is_list_in_list([lower_name], words, True)
if in_words:
response = self.check_db_model_col(
g_child, words, g_child, omit_list)
break
if not response:
response = self.check_db_model_col(cur_model, words, cur_model)
if not response:
response = self.create_db_model_children(cur_model, words)
return response
def edit_db_model(self, db_model, words, model_ids):
response = ''
edit_words = ['change', 'edit', 'adjust', 'alter', 'add']
is_edit = utl.is_list_in_list(edit_words, words)
if is_edit:
cur_model = self.db.session.get(db_model, next(iter(model_ids)))
response = self.check_children_for_edit(cur_model, words)
return response
def db_model_response_functions(self, db_model, message):
response = False
html_response = False
model_functions = [self.create_db_model, self.search_db_models]
args = [db_model, message, response, html_response]
for model_func in model_functions:
response, html_response = model_func(*args)
if response:
break
return response, html_response
def format_openai_response(self, message, pre_resp):
response = self.get_openai_response(message)
response = '{}<br>{}'.format(pre_resp, response)
html_response = ''
return response, html_response
def check_if_openai_message(self, message):
response = ''
html_response = ''
open_words = ['openai', 'chatgpt', 'gpt4', 'gpt3']
words = utl.lower_words_from_str(message)
in_message = utl.is_list_in_list(open_words, words, True)
if in_message:
response, html_response = self.format_openai_response(
message, self.openai_found)
return response, html_response
def get_response(self, message, models_to_search=None, db=None,
current_user=None):
self.db = db
self.current_user = current_user
response, html_response = self.check_if_openai_message(message)
if not response and models_to_search:
for db_model in models_to_search:
in_message = self.db_model_name_in_message(message, db_model)
if in_message:
response, html_response = self.db_model_response_functions(
db_model, message)
break
if not response:
for db_model in models_to_search:
r, hr = self.search_db_models(
db_model, message, response, html_response)
if r:
response = r
hr = '{}<br>{}'.format(
db_model.get_model_name_list()[0].upper(), hr)
if not html_response:
html_response = ''
html_response += hr
if not response:
response, html_response = self.format_openai_response(
message, self.openai_msg)
return response, html_response
| [
"User: PLACEHOLDER\nAI:"
] |
2024-01-10 | soukron/SimulationService | simulation_microservice.py | import requests
from time import time
from uuid import uuid4
import numpy as np
import re
import os
import openai
from time import time,sleep
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
def save_file(filepath, content):
with open(filepath, 'w', encoding='utf-8') as outfile:
outfile.write(content)
openai.api_key = open_file('openaiapikey.txt')
scene_dir = 'scenes/'
service_name = 'sensor_simulation'
content_prefix = 'Sensory input scene: '
tempo = 30
def gpt3_completion(prompt, engine='text-davinci-002', temp=0.7, top_p=1.0, tokens=1000, freq_pen=0.0, pres_pen=0.0, stop=['asdfasdf', 'asdasdf']):
max_retry = 5
retry = 0
prompt = prompt.encode(encoding='ASCII',errors='ignore').decode()
while True:
try:
response = openai.Completion.create(
engine=engine,
prompt=prompt,
temperature=temp,
max_tokens=tokens,
top_p=top_p,
frequency_penalty=freq_pen,
presence_penalty=pres_pen,
stop=stop)
text = response['choices'][0]['text'].strip()
text = re.sub('\s+', ' ', text)
filename = '%s_gpt3.txt' % time()
save_file('gpt3_logs/%s' % filename, prompt + '\n\n==========\n\n' + text)
return text
except Exception as oops:
retry += 1
if retry >= max_retry:
return "GPT3 error: %s" % oops
print('Error communicating with OpenAI:', oops)
sleep(1)
def get_embedding(payload): # payload is a list of strings
# payload example: ['bacon bacon bacon', 'ham ham ham']
# response example: [{'string': 'bacon bacon bacon', 'vector': '[1, 1 ... ]'}, {'string': 'ham ham ham', 'vector': '[1, 1 ... ]'}]
# embedding is already rendered as a JSON-friendly string
url = 'http://127.0.0.1:999' # currently the USEv5 service, about 0.02 seconds per transaction!
response = requests.request(method='POST', url=url, json=payload)
return response.json()
def nexus_send(payload): # REQUIRED: content
url = 'http://127.0.0.1:8888/add'
payload['time'] = time()
payload['uuid'] = str(uuid4())
payload['content'] = content_prefix + payload['content']
embeddings = get_embedding([payload['content']])
payload['vector'] = embeddings[0]['vector']
payload['service'] = service_name
response = requests.request(method='POST', url=url, json=payload)
print(response.text)
def nexus_search(payload):
url = 'http://127.0.0.1:8888/search'
response = requests.request(method='POST', url=url, json=payload)
return response.json()
def nexus_bound(payload):
url = 'http://127.0.0.1:8888/bound'
response = requests.request(method='POST', url=url, json=payload)
#print(response)
return response.json()
def nexus_save():
url = 'http://127.0.0.1:8888/save'
response = requests.request(method='POST', url=url)
print(response.text)
def find_actions(memories):
for m in memories:
if m['service'] == 'executive_action':
return m['content']
return None # no actions detected in memories
if __name__ == '__main__':
new_scene = 'Two men are sitting at a stone chess table in Central Park. They are playing chess. The sun is shining and birds are singing. It is a summer day. Children are running and playing in the distance. Horns honking and the bustle of New York can be heard in the background.'
backstory = new_scene
while True:
last_scene = new_scene
# generate event
prompt = open_file('prompt_event.txt').replace('<<SCENE>>', last_scene).replace('<<STORY>>', backstory).replace('<<RARITY>>', 'likely')
event = gpt3_completion(prompt)
filename = '%s_event.txt' % time()
save_file(scene_dir + filename, event)
nexus_send({'content': event})
# incorporate actions from the nexus
payload = {'lower_bound': time() - tempo, 'upper_bound': time()}
memories = nexus_bound(payload)
action = find_actions(memories)
if action:
event = event + '\nAction I will take: %s' % action
print('\n\nEVENT:', event)
# new scene
prompt = open_file('prompt_scene.txt').replace('<<SCENE>>', last_scene).replace('<<EVENT>>', event).replace('<<STORY>>', backstory)
new_scene = gpt3_completion(prompt)
print('\n\nSCENE:', new_scene)
# save scene
filename = '%s_scene.txt' % time()
save_file(scene_dir + filename, new_scene)
nexus_send({'content': new_scene})
# summarize backstory up to this point
backstory = (backstory + ' ' + event + ' ' + new_scene).strip()
prompt = open_file('prompt_concise_summary.txt').replace('<<STORY>>', backstory)
backstory = gpt3_completion(prompt)
print('\n\nBACKSTORY:', backstory)
# wait
sleep(tempo) | [
"<<RARITY>>",
"prompt_event.txt",
"prompt_scene.txt",
"ignore",
"prompt_concise_summary.txt"
] |
2024-01-10 | lirabenjamin/eaai_sparklearn_sim | simulate%20conversations.py | import openai
import pandas as pd
import numpy as np
import concurrent.futures
import os
import datetime
import dotenv
dotenv.load_dotenv()
output_dir = "data/simulated_conversations5"
TEMP = 1
openai.api_key = os.getenv("OPENAI_KEY")
data = pd.read_csv("data/data50_w_correct.csv")
ids = data['UserId'].tolist()
prompts = essays = data['prompt'].tolist()
def simulate_conversation(id, prompt):
response = openai.ChatCompletion.create(
model="gpt-4",
temperature=TEMP,
messages=[
{"role": "system", "content": prompt},
{"role": "user", "content": "Generate one full conversation between the student and the tutor"},
]
)
result = response.choices[0].message.content
now = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
with open(f"{output_dir}/{id}_{now}_temp{TEMP}.txt", "w") as f:
f.write(result)
simulate_conversation(ids[0], essays[0])
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(simulate_conversation, ids, essays)
def read_all_files_to_dataframe(directory):
all_files = [os.path.join(directory, file) for file in os.listdir(directory) if file.endswith('.txt')]
df_list = []
for filename in all_files:
with open(filename, 'r') as f:
content = f.read()
df_list.append({"filename": filename, "content": content})
return pd.DataFrame(df_list)
combined_df = read_all_files_to_dataframe(output_dir)
combined_df.columns = ["id", "content"]
combined_df["id"] = combined_df["id"].str.replace("data/simulated_conversations5/", "")
combined_df["id"] = combined_df["id"].str.replace(".txt", "")
combined_df[["id", "timestamp", "temp"]] = combined_df["id"].str.split("_", expand=True)
combined_df.to_parquet("data/simulated_conversations5.parquet")
# Now rate them for motivation
prompt = """
I will show you an exchange between a student learning math and an intelligent tutoring system.
Your goal is to pay attention to what the student is saying, and estimate how this student is feeling with regards to five motivational states. Score them on a scale from 0 to 10.
Confidence: How confident is the student in their ability to solve the problem?
Frustration: How frustrated is the student with their learning experience?
Boredom: How bored is the student with their learning experience?
Curiosity/Interest: How interested/curious is the student about the topic?
Engagement: How engaged is the student with the learning experience?
Your response should be formatted as a python dictionary, with the five motivational states as keys, and the scores as values.
"""
output_dir = "data/conversations5_ratings"
def rate_conversation(id, prompt, conversation):
response = openai.ChatCompletion.create(
model="gpt-4",
temperature=TEMP,
messages=[
{"role": "system", "content": prompt},
{"role": "user", "content": f"Here is the conversation:\n{conversation}"},
]
)
result = response.choices[0].message.content
now = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
with open(f"{output_dir}/{id}_{now}_temp{TEMP}.txt", "w") as f:
f.write(result)
conversations = combined_df["content"].tolist()
rate_conversation(ids[0], prompt, conversations[0])
with concurrent.futures.ThreadPoolExecutor() as executor:
executor.map(rate_conversation, ids, [prompt] * len(ids), conversations)
combined_df = read_all_files_to_dataframe(output_dir)
combined_df.columns = ["id", "content"]
combined_df["id"] = combined_df["id"].str.replace("data/conversations5_ratings/", "")
combined_df["id"] = combined_df["id"].str.replace(".txt", "")
combined_df[["id", "timestamp", "temp"]] = combined_df["id"].str.split("_", expand=True)
# unroll the dictionary
import ast
combined_df["content"] = combined_df["content"].apply(ast.literal_eval)
df = pd.DataFrame(combined_df.content.tolist())
# combine df and combined_df
df = pd.concat([combined_df, df], axis=1)
df.to_parquet("data/conversations5_ratings.parquet")
# do ratings match ground truth?
df = pd.read_parquet("data/conversations5_ratings.parquet")
ground_truth = pd.read_csv("data/data50_w_correct.csv")[["UserId", "confidence", "frustration", "boredom", "curiosity", "engagement"]]
# join by id and UserId
df['id'] = df['id'].astype(int)
df = df.merge(ground_truth, left_on="id", right_on="UserId")
# get correlations between ground truth and ratings
df[["confidence", "frustration", "boredom", "curiosity", "engagement"]].corrwith(df[["Confidence", "Frustration", "Boredom", "Curiosity/Interest", "Engagement"]])
print(df[["confidence", "frustration", "boredom", "curiosity", "engagement"]].isnull().sum())
print(df[["Confidence", "Frustration", "Boredom", "Curiosity/Interest", "Engagement"]].isnull().sum())
print(df[["confidence", "frustration", "boredom", "curiosity", "engagement"]].nunique())
print(df[["Confidence", "Frustration", "Boredom", "Curiosity/Interest", "Engagement"]].nunique())
print((df[["confidence", "frustration", "boredom", "curiosity", "engagement"]].index == df[["Confidence", "Frustration", "Boredom", "Curiosity/Interest", "Engagement"]].index).all())
print(df["confidence"].corr(df["Confidence"]))
print(df["frustration"].corr(df["Frustration"]))
print(df["boredom"].corr(df["Boredom"]))
print(df["curiosity"].corr(df["Curiosity/Interest"]))
print(df["engagement"].corr(df["Engagement"]))
df[['confidence', 'frustration', 'boredom', 'curiosity', 'engagement','Confidence', 'Frustration', 'Boredom', 'Curiosity/Interest', 'Engagement']].corr()
df.to_csv("data/conversations5_gptratings_and_truth.csv", index=False) | [
"Generate one full conversation between the student and the tutor",
"\nI will show you an exchange between a student learning math and an intelligent tutoring system. \nYour goal is to pay attention to what the student is saying, and estimate how this student is feeling with regards to five motivational states. Score them on a scale from 0 to 10.\nConfidence: How confident is the student in their ability to solve the problem?\nFrustration: How frustrated is the student with their learning experience?\nBoredom: How bored is the student with their learning experience?\nCuriosity/Interest: How interested/curious is the student about the topic?\nEngagement: How engaged is the student with the learning experience?\n\nYour response should be formatted as a python dictionary, with the five motivational states as keys, and the scores as values.\n",
"Here is the conversation:\nPLACEHOLDER"
] |
2024-01-10 | TakehikoEsaka/odekakekun | project~backend~users~routes~suggest.py | from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session
import pandas as pd
from pathlib import Path
from users import models
from users.database import get_db
from users import oauth2
import os
import json
from dotenv import load_dotenv
import openai
from io import StringIO
import uuid
load_dotenv(Path(__file__).resolve().parent.parent.parent / Path(".env"), verbose=True)
try:
openai.api_key = json.loads(os.environ.get("OPENAI_API_KEY"))["OPENAI_API_KEY"]
except:
openai.api_key = os.environ.get("OPENAI_API_KEY")
router = APIRouter()
# def get_suggest(db: Session, email: str):
# suggests = db.query(models.Suggest).filter(models.Suggest.email == email).first()
# if not suggests:
# raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f'Suggests with {email} not found')
# return suggests
def ask_chatgpt(question):
try:
response = openai.ChatCompletion.create(model="gpt-3.5-turbo",
messages=[{"role": "user", "content": question}],
timeout=1).choices[0]["message"]["content"].strip()
df = pd.read_csv(StringIO(response), sep='|', header=0, skiprows=[1], skipinitialspace=True)
# 列目に空白が入る時があるので除外
df.columns = df.columns.str.strip()
# 余分なカラムが生成される場合があるので除外・英語名に変換
df = df[["場所名", "説明", "距離"]].rename(columns={"場所名": "suggest_place", "説明": "suggest_description", "距離": "suggest_distance"})
# 値に空白が入る時があるので除外
for col in df.columns:
df[col] = df[col].str.strip()
return df.to_dict(orient='dict')
except Exception as e:
print(e)
return None
@router.post("/api/suggest", tags=["suggest"])
def suggest(place: str, time: str, way: str, current_user: models.UserInfo = Depends(oauth2.get_current_active_user), db: Session = Depends(get_db)):
question = "{}から{}以内で{}を使っていけるおすすめの場所を3つ表形式で教えて下さい。場所名・距離・説明を列にして下さい".format(place, time, way)
# ここでanswerをchat-gptからget
print("guess start")
answer = ask_chatgpt(question)
print("guess end")
print("answer is ", answer)
if answer is None:
print("answer is None")
return None
# TODO モデルにGoogleMapのリンクを入れるようにする
# ログインしている時はDBに追加・そうでない時は追加しない
if current_user:
question_uuid = str(uuid.uuid4())
new_suggests = []
for i in range(len(answer["suggest_place"])):
suggest_place = answer["suggest_place"][i]
suggest_description = answer["suggest_description"][i]
suggest_distance = answer["suggest_distance"][i]
new_suggests.append({
"user_id": current_user.user_id,
"question_uuid": question_uuid,
"place": place,
"time": time,
"way": way,
"suggest_place": suggest_place,
"suggest_description": suggest_description,
"suggest_distance": suggest_distance})
db.bulk_insert_mappings(models.Suggest, new_suggests)
# db.bulk_update_mappings(models.Suggest, new_suggests)
db.commit()
else:
pass
return answer
@router.get("/api/get_all_suggest", tags=["suggest"])
def get_suggest(current_user: models.UserInfo = Depends(oauth2.get_current_active_user), db: Session = Depends(get_db)):
if current_user:
user = db.query(models.UserInfo).filter(models.UserInfo.user_id == current_user.user_id).first()
df = pd.DataFrame(columns=["question_uuid", "place", "time", "way", "suggest_place"])
for s in user.suggestions[-1:-16:-1]:
df = pd.concat([df, pd.DataFrame([{"question_uuid": s.question_uuid,
"place": s.place,
"time": s.time,
"way": s.way,
"suggest_place": s.suggest_place}])], ignore_index=True)
# print("last 9 histories is following" , df)
return df.to_dict(orient="records")
else:
None
| [] |
2024-01-10 | CodeHero0/Nondeterminism-of-ChatGPT-in-Code-Generation | Modify_HumanEval.py | import json
import os
import openai
import re
import subprocess
log_file = './log/demo.log'
problem_list = []
model = 'gpt-3.5-turbo'
topn = 5
temperature = float(1)
openai.api_key = ''
# for_list = [32, 38, 44, 50, 53, 65, 66, 68, 71, 74, 76, 79, 83, 84, 88, 89, 91, 92, 93, 94, 100, 101, 105, 107, 108, 109, 111, 113, 114, 115, 116, 122, 123, 126, 128, 132, 133, 140, 143, 145, 151, 152, 154, 157, 159, 160, 163]
def run_test_case(i):
test_cases = test_case_dic[problem_list[i]['task_id']]
demo_file = 'demo.py'
with open(demo_file, 'w') as f:
f.write(problem_list[i]['prompt'] + problem_list[i]['canonical_solution'])
call_demo_file = 'call_demo.py'
unpassed_test_case = []
for j in range(len(test_cases)):
if test_cases[j]['relation'] == '==':
with open('./call_demo.py', 'w') as f:
f.write('from %s import %s\nprint(%s(%s))' % (
demo_file.split('.')[0],
problem_list[i]['entry_point'],
problem_list[i]['entry_point'],
test_cases[j]['input']
))
try:
output = subprocess.run(["python", call_demo_file], capture_output=True, text=True, timeout=3)
except subprocess.TimeoutExpired as e:
print(e, flush=True)
unpassed_test_case.append([j, 'Timeout'])
continue
except Exception as e:
print(e, flush=True)
unpassed_test_case.append([j, 'Exception'])
continue
if test_cases[j]['output'].strip() != output.stdout.strip():
unpassed_test_case.append([j, 'false'])
else:
unpassed_test_case.append([j, 'True'])
else:
if '$input$' in test_cases[j]['relation'] or '$demo$' in test_cases[j]['relation']:
with open('./call_demo.py', 'w') as f:
f.write('from %s import %s\n%s' % (
demo_file.split('.')[0],
problem_list[i]['entry_point'],
test_cases[j]['relation'].replace('$input$', str(test_cases[j]['input'])).replace('$demo$', demo_file.split('.')[0])
))
else:
with open('./call_demo.py', 'w') as f:
f.write('from %s import %s\nprint(%s)' % (demo_file.split('.')[0],
problem_list[i]['entry_point'],
test_cases[j]['relation'].replace('candidate', problem_list[i]['entry_point'])))
try:
output = subprocess.run(["python", call_demo_file], capture_output=True, text=True, timeout=3)
except subprocess.TimeoutExpired as e:
print(e, flush=True)
unpassed_test_case.append([j, 'Timeout'])
continue
except Exception as e:
print(e, flush=True)
unpassed_test_case.append([j, 'Exception'])
continue
if output.stdout.strip() != 'True':
unpassed_test_case.append([j, 'false'])
else:
unpassed_test_case.append([j, 'True'])
if len(set([i[1] for i in unpassed_test_case])) == 1 and unpassed_test_case[0][1] == 'True':
print('ALL TRUE')
print(unpassed_test_case)
def description_2_code(description, model, topn, temperature):
prompt = 'Generate Python3 code (Markdown):\n'
completion = openai.ChatCompletion.create(
model=model,
n=topn,
temperature=temperature,
messages=[{"role": "user",
"content": prompt + description},
]
)
response_list = []
for i in completion['choices']:
response_list.append(i['message']['content'])
return response_list
with open('./HumanEval/HumanEval.jsonl', 'r') as f:
for line in f.readlines():
problem_list.append(json.loads(line))
def demo():
i = 0
while not input():
print(problem_list[i]['task_id'])
print(problem_list[i]['test'])
# print(problem_list[i]['prompt'] + problem_list[i]['canonical_solution']+problem_list[i]['test'])
i += 1
def test(i):
print(problem_list[i]['task_id'])
print(problem_list[i]['test'])
test_case_dic = {}
p = r'candidate\((.*?)\) == (.*)'
# p = r'candidate\((.*?)\)\s+==\s+(.*)'
for problem in problem_list:
name = problem['task_id']
testcase = []
for line in problem['test'].split('assert'):
if 'candidate(' in line:
# input
p1 = re.search(p, line)
if p1:
# print('input:'+p1.group(1))
# print('output:'+p1.group(2))
output = p1.group(2).strip()
if ('\'' in output[0] and '\'' in output[-1]) or \
('\"' in output[0] and '\"' in output[-1]):
res = {
'input': p1.group(1),
'output': output[1:-1],
'relation': '=='
}
else:
res = {
'input': p1.group(1),
'output': p1.group(2),
'relation': '=='
}
testcase.append(res)
test_case_dic[name] = testcase
test_case_dic['HumanEval/1'] = [
{'input': "'(()()) ((())) () ((())()())'", 'output': "['(()())', '((()))', '()', '((())()())']", 'relation': '=='},
{'input': "'() (()) ((())) (((())))'", 'output': "['()', '(())', '((()))', '(((())))']", 'relation': '=='},
{'input': "'(()(())((())))'", 'output': "['(()(())((())))']", 'relation': '=='},
{'input': "'( ) (( )) (( )( ))'", 'output': "['()', '(())', '(()())']", 'relation': '=='}
]
test_case_dic['HumanEval/2'] = [
{'input': '3.5', 'output': '0.5', 'relation': '=='},
{'input': '1.33', 'output': '1e-6', 'relation': 'abs(candidate(1.33) - 0.33) < 1e-6'},
{'input': '123.456', 'output': '1e-6', 'relation': 'abs(candidate(123.456) - 0.456) < 1e-6'}
]
test_case_dic['HumanEval/4'] = [
{'input': '[1.0, 2.0, 3.0]', 'output': '1e-6', 'relation': 'abs(candidate([1.0, 2.0, 3.0]) - 2.0/3.0) < 1e-6'},
{'input': '[1.0, 2.0, 3.0, 4.0]', 'output': '1e-6', 'relation': 'abs(candidate([1.0, 2.0, 3.0, 4.0]) - 1.0) < 1e-6'},
{'input': '[1.0, 2.0, 3.0, 4.0, 5.0]', 'output': '1e-6', 'relation': 'abs(candidate([1.0, 2.0, 3.0, 4.0, 5.0]) - 6.0/5.0) < 1e-6'}
]
test_case_dic['HumanEval/8'][3] = {'input': '[3, 5, 7]', 'output': '(15, 105)', 'relation': '=='}
test_case_dic['HumanEval/33'] = [
{'input': '[5, 6, 3, 4, 8, 9, 2]', 'output': '[2, 6, 3, 4, 8, 9, 5]', 'relation': '=='},
{'input': '[5, 8, 3, 4, 6, 9, 2]', 'output': '[2, 8, 3, 4, 6, 9, 5]', 'relation': '=='},
{'input': '[5, 6, 9, 4, 8, 3, 2]', 'output': '[2, 6, 9, 4, 8, 3, 5]', 'relation': '=='},
{'input': '[5, 6, 3, 4, 8, 9, 2, 1]', 'output': '[2, 6, 3, 4, 8, 9, 5, 1]', 'relation': '=='}
]
test_case_dic['HumanEval/37'] = [
{'input': '[1, 2, 3]', 'output': '[1, 2, 3]', 'relation': '=='},
{'input': '[5, 3, -5, 2, -3, 3, 9, 0, 123, 1, -10]', 'output': '[-10, 3, -5, 2, -3, 3, 5, 0, 9, 1, 123]', 'relation': '=='},
{'input': '[5, 8, -12, 4, 23, 2, 3, 11, 12, -10]', 'output': '[-12, 8, 3, 4, 5, 2, 12, 11, 23, -10]', 'relation': '=='}
]
test_case_dic['HumanEval/32'] = [{'input': [-10, -2], 'output': 1.1641532182693481e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-3, -6, -7, 7], 'output': 9.76619674020185e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [8, 3], 'output': 5.820766091346741e-11, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-10, -8], 'output': 4.656612873077393e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-3, 6, 9, -10], 'output': 1.337379096355562e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [10, 7, 3, -3], 'output': 1.3840022461408807e-09, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [8, -2, -10, -5, 3, 1, -2, -6], 'output': 6.92455426332117e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [1, -7, -8, 2], 'output': 2.1342083655895294e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [1, 1], 'output': 0.0, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-9, 4, 7, -7, 2, -8], 'output': 1.1405965061328516e-09, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [10, 9, 1, 8, -4, -8], 'output': 4.0877967677488414e-11, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-3, -1], 'output': 5.820766091346741e-11, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-3, -7], 'output': 5.820766091346741e-11, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-2, 4, 10, 1, -5, 1, 1, -4], 'output': 4.5996983999430086e-11, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [10, -8, 9, 10, -5, 7], 'output': 4.412106235918145e-09, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-5, 4, 2, -2], 'output': 7.292131343206165e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [1, -9, -3, -9], 'output': 1.7145054993783493e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [2, -2, -8, -4, 8, 1], 'output': 3.6866111552402714e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [10, 5, 2, 10], 'output': 1.015466821741029e-09, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-6, -2, -6, -3, 7, 7, -2, 8], 'output': 2.469873194854699e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [8, 2, 1, -3, -6, 6, 5, -8], 'output': 4.654125973502232e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-7, -6], 'output': 1.1641532182693481e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [3, 9, -8, 2], 'output': 4.748736473492166e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [9, 4, 6, -2, 7, -10, -7, 7], 'output': 1.0656506788109255e-09, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [10, 1, -7, -1, 3, -5], 'output': 6.19443163429878e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-10, -2, 6, -5, 6, -7, 10, -1], 'output': 1.039987151951749e-09, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-6, 1, -5, 7], 'output': 8.558842523598287e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [9, 1], 'output': 5.820766091346741e-11, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-10, -7, 1, -1, -3, -9, -3, 8], 'output': 9.059419880941277e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-8, 5], 'output': 1.1641532182693481e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [7, -6], 'output': 2.3283064365386963e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [5, 7, -5, -2], 'output': 3.864730757641155e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-4, 7, -4, -1, 2, 10, 1, 4], 'output': 1.152398176884617e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-7, -3, -3, -8, 1, -10, 8, 7], 'output': 1.1465629556894896e-09, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [8, -3, -10, -8], 'output': 8.052962741089686e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-3, -8], 'output': 4.656612873077393e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [1, -8], 'output': 4.656612873077393e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-2, 5, -4, 7], 'output': 2.8748137204104296e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [8, 8, 5, -3], 'output': 7.751452812954085e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [3, -4, -7, -7, 3, 1, 3, 3], 'output': 3.0882091502093534e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-9, 10, 10, -7, -9, 2, 1, -7], 'output': 2.323840675444444e-09, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-4, -4, 7, 4], 'output': 0.0, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [3, -5, -2, 4], 'output': 2.471778337564956e-11, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-8, 4, 7, -7], 'output': 5.787530454881562e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [10, 7], 'output': 5.820766091346741e-11, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-8, -3], 'output': 5.820766091346741e-11, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [3, 5, 5, -4], 'output': 4.028066769024008e-11, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-9, -5, 2, -10, 2, -2, 4, -1], 'output': 1.2186199688235533e-09, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [7, 5, -6, -4, -1, -4, -9, 8], 'output': 7.55201901014857e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [1, -9], 'output': 4.0745362639427185e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [8, 5], 'output': 1.7462298274040222e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-9, 6, -8, -5], 'output': 7.17989223630866e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [9, -8], 'output': 4.656612873077393e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [2, -7, 8, -3], 'output': 1.2934986415302774e-11, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [9, -8], 'output': 4.656612873077393e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [8, 8, 6, 1, -2, -4, 1, -3], 'output': 8.968825682131865e-11, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [2, -6, 10, -1, 4, 1], 'output': 1.2246800906723365e-08, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-10, 4], 'output': 2.3283064365386963e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-8, 7], 'output': 1.1641532182693481e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [6, -2, -6, 1], 'output': 4.1145209461745935e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-3, 1], 'output': 5.820766091346741e-11, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-5, 4, 7, -1, 9, 10], 'output': 2.8451518918615193e-11, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [7, -1], 'output': 5.820766091346741e-11, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-6, -2], 'output': 1.1641532182693481e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-7, 7], 'output': 4.0745362639427185e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-2, -1, 9, -4], 'output': 5.314582107729393e-12, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-4, 10, -2, 6, 5, -2], 'output': 5.341000801351026e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-8, 10], 'output': 1.1641532182693481e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-2, -9, -10, 1, -6, 10, -2, -5], 'output': 1.4370016288012266e-08, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [7, 3, 7, -10, -7, -8, -6, 7], 'output': 1.0816925133383393e-09, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [1, 8], 'output': 4.656612873077393e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [3, -6, -9, -1], 'output': 4.090063773776187e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-9, 1, -4, -3, -7, 1], 'output': 6.964910426177084e-08, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [9, -6, -3, -5, -5, 3, -10, -5], 'output': 1.3005894139439533e-09, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [3, -3, -2, -5, -7, 2], 'output': 0.0, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [5, -3], 'output': 1.1641532182693481e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [4, 1, -1, -3], 'output': 1.2522427539352066e-11, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-10, -4, 2, 1], 'output': 7.0775918459276e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-8, -2, 1, 10, 6, 2], 'output': 1.0347153134304676e-09, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-10, -7, -2, -5, 8, -2], 'output': 4.458877711499554e-12, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-7, 9], 'output': 2.3283064365386963e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [1, 1, 3, 9, 6, -7, 2, 8], 'output': 6.708447131131834e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-2, -9, 3, -10], 'output': 1.3271347909515896e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [1, 3, -8, 1], 'output': 9.151792171313566e-11, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-7, -1, 6, -1, 3, 1], 'output': 9.165997960636219e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-1, 7, -6, -4, 3, 2, -5, 9], 'output': 1.2270528522298832e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [2, 7, -10, -1, -1, -4], 'output': 8.104050763790838e-11, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [8, 9, 10, 1, 4, 4, 4, -4], 'output': 2.9445686777762603e-08, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-5, -8, -1, 6, 10, 9, 1, -8], 'output': 2.796114451086851e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-1, -3, -4, -6], 'output': 8.562428543967826e-11, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-9, -3], 'output': 1.7462298274040222e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [9, -8, 4, 3, 10, 8, -4, 2], 'output': 4.614358672938579e-09, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [2, -3, -6, 10, -10, -7, 3, -3], 'output': 2.5733340805467186e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [6, 4, -9, 7], 'output': 4.689382215872229e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-7, 4, -6, 4], 'output': 9.2210683533267e-12, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [4, 9, 6, 3, 7, 4], 'output': 2.5149304860860866e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [5, 4, -2, -3], 'output': 1.9339907453286287e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [6, 5, 10, -3, -2, 4], 'output': 1.9849579757647007e-09, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [-1, -3], 'output': 1.1641532182693481e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}, {'input': [1, 1, 7, -8, -6, -6], 'output': 4.970059919173764e-10, 'relation': 'from $demo$ import poly\nimport math\nsolution = find_zero($input$)\nprint(math.fabs(poly($input$, solution)) < 1e-4)'}]
test_case_dic['HumanEval/38'] = [{'input': "'axdhhixdexrvsncacbgh'", 'output': 'daxihhexdvxrcsnbacgh', 'relation': '=='}, {'input': "'artwugrnwoshzaizfy'", 'output': 'targwuwrnhosizayzf', 'relation': '=='}, {'input': "'iekykgcmdlldiztb'", 'output': 'kiegykdcmdlltizb', 'relation': '=='}, {'input': "'dmrrjctlugwsbvchy'", 'output': 'rdmcrjutlsgwcbvhy', 'relation': '=='}, {'input': "'hdciomlfulglvi'", 'output': 'chdmioulfllgvi', 'relation': '=='}, {'input': "'ctufruhfxmiowruvkhyy'", 'output': 'uctufrxhfomiuwrhvkyy', 'relation': '=='}, {'input': "'bzhmikgscw'", 'output': 'hbzkmicgsw', 'relation': '=='}, {'input': "'upguomieexrhixr'", 'output': 'gupmuoeiehxrrix', 'relation': '=='}, {'input': "'smnhelpcqbdyufevnzt'", 'output': 'nsmlheqpcybdeufzvnt', 'relation': '=='}, {'input': "'mtmqioavrxd'", 'output': 'mmtoqiravxd', 'relation': '=='}, {'input': "'yirukyjndoafxixyfqqd'", 'output': 'ryiyukdjnfoaxxiqyfqd', 'relation': '=='}, {'input': "'uqjgetyflyqrtkaadplz'", 'output': 'juqtgelyfryqatkpadlz', 'relation': '=='}, {'input': "'bhhccspcxryyee'", 'output': 'hbhsccxpcyryee', 'relation': '=='}, {'input': "'rfpqtigrnxwywjgvumlo'", 'output': 'prfiqtngryxwgwjmvulo', 'relation': '=='}, {'input': "'dhockhsrashhcwabhu'", 'output': 'odhhckasrhshacwubh', 'relation': '=='}, {'input': "'kcbhiqpgvre'", 'output': 'bkcqhivpgre', 'relation': '=='}, {'input': "'phspzzgdnvndnnlxbov'", 'output': 'sphzpzngddvnlnnoxbv', 'relation': '=='}, {'input': "'dbuxkmdhzgrgenoiofhc'", 'output': 'udbmxkzdhggroenfiohc', 'relation': '=='}, {'input': "'rdzurbcyafnhpgpmb'", 'output': 'zrdburacyhfnppgmb', 'relation': '=='}, {'input': "'ammzzijnoxzw'", 'output': 'mamizzojnwxz', 'relation': '=='}, {'input': "'wpvgjebsgrbxkbxspb'", 'output': 'vwpegjgbsxrbxkbbsp', 'relation': '=='}, {'input': "'fbqcfqtcchmvshdtbs'", 'output': 'qfbqcfctcvhmdshstb', 'relation': '=='}, {'input': "'nvcsqsigkwkvimhvuej'", 'output': 'cnvssqkigvwkhimevuj', 'relation': '=='}, {'input': "'yckotadcsgqrelich'", 'output': 'kycaotsdcrgqielch', 'relation': '=='}, {'input': "'fojwjrzutavqjvr'", 'output': 'jforwjtzuqavrjv', 'relation': '=='}, {'input': "'idexrdijetg'", 'output': 'eiddxreijtg', 'relation': '=='}, {'input': "'vugqpibciniuakb'", 'output': 'gvuiqpibcunibak', 'relation': '=='}, {'input': "'ifuorxnrwdca'", 'output': 'uifxorwnradc', 'relation': '=='}, {'input': "'blrresebnlzj'", 'output': 'rblsrenebjlz', 'relation': '=='}, {'input': "'gvlvdhyrln'", 'output': 'lgvhvdlyrn', 'relation': '=='}, {'input': "'ehxzzfnafxkfnzzxzvh'", 'output': 'xehfzzfnafxkznzvxzh', 'relation': '=='}, {'input': "'zwfmbdhgpljozh'", 'output': 'fzwdmbphgoljzh', 'relation': '=='}, {'input': "'vgakimyicuqlm'", 'output': 'avgmkicyiluqm', 'relation': '=='}, {'input': "'karifdibstndxzlntkqd'", 'output': 'rkadifsibdtnlxzkntqd', 'relation': '=='}, {'input': "'giswnbqzavxrxvxg'", 'output': 'sgibwnaqzrvxxxvg', 'relation': '=='}, {'input': "'cvntkkdxvqjjnkv'", 'output': 'ncvktkvdxjqjvnk', 'relation': '=='}, {'input': "'jrwgnemvvftxjmsr'", 'output': 'wjregnvmvxftsjmr', 'relation': '=='}, {'input': "'jgjzsnukto'", 'output': 'jjgnzstuko', 'relation': '=='}, {'input': "'vgopzqxfzcjvvuqtk'", 'output': 'ovgqpzzxfvcjqvutk', 'relation': '=='}, {'input': "'hvyhzjeagbh'", 'output': 'yhvjhzgeabh', 'relation': '=='}, {'input': "'yctnuogwsmpwhemuw'", 'output': 'tyconusgwwmpmheuw', 'relation': '=='}, {'input': "'ydynhyzwfq'", 'output': 'yydynhfzwq', 'relation': '=='}, {'input': "'rhboedovzrtqyoktx'", 'output': 'brhdoezovqrtkyotx', 'relation': '=='}, {'input': "'ronxpfiyouihyqyuhp'", 'output': 'nrofxpoiyhuiyyqpuh', 'relation': '=='}, {'input': "'cwohijkrkeechm'", 'output': 'ocwjhikkrceehm', 'relation': '=='}, {'input': "'gcwnknonrgnb'", 'output': 'wgcnnkronbgn', 'relation': '=='}, {'input': "'swyysapamjylnrmx'", 'output': 'yswaysmpaljymnrx', 'relation': '=='}, {'input': "'thzhippankvmzmvfox'", 'output': 'zthphinpamkvvzmxfo', 'relation': '=='}, {'input': "'ratssmacvneu'", 'output': 'tramssvacune', 'relation': '=='}, {'input': "'bifkgmkkomiyniycp'", 'output': 'fbimkgokkymiynicp', 'relation': '=='}, {'input': "'rbxhulyucb'", 'output': 'xrblhucyub', 'relation': '=='}, {'input': "'gahehtpved'", 'output': 'hgatehepvd', 'relation': '=='}, {'input': "'owgylittfwdxfjysadj'", 'output': 'gowiylfttxwdyfjdsaj', 'relation': '=='}, {'input': "'mmvgcwwusdwhjvyzdtz'", 'output': 'vmmwgcswuhdwyjvtzdz', 'relation': '=='}, {'input': "'blznvrcqlkaupdnluno'", 'output': 'zblrnvlcqukanpdnluo', 'relation': '=='}, {'input': "'fxnuiqzrtpoy'", 'output': 'nfxquitzrypo', 'relation': '=='}, {'input': "'sixhckohiosyvmtk'", 'output': 'xsikhciohyostvmk', 'relation': '=='}, {'input': "'kfpglpikzi'", 'output': 'pkfpglziki', 'relation': '=='}, {'input': "'irwqgahxcprnhwyuwpp'", 'output': 'wiraqgchxnpryhwpuwp', 'relation': '=='}, {'input': "'aczhmjhjwslvrqpln'", 'output': 'zacjhmwhjvslprqln', 'relation': '=='}, {'input': "'lwkijohdigkxxrdwfy'", 'output': 'klwoijihdxgkdxrywf', 'relation': '=='}, {'input': "'xpgxsiqtydgjj'", 'output': 'gxpixsyqtjdgj', 'relation': '=='}, {'input': "'fjlwraiberjbw'", 'output': 'lfjawreibbrjw', 'relation': '=='}, {'input': "'ypuasdppjkfo'", 'output': 'uypdasjppokf', 'relation': '=='}, {'input': "'pdimpcsucv'", 'output': 'ipdcmpcsuv', 'relation': '=='}, {'input': "'ezejcsdrhy'", 'output': 'eezsjchdry', 'relation': '=='}, {'input': "'tzthytmoqjsojsnt'", 'output': 'ttzthyqmoojsnjst', 'relation': '=='}, {'input': "'xdtguyivgc'", 'output': 'txdygugivc', 'relation': '=='}, {'input': "'frhfacownpjt'", 'output': 'hfrcfanowtpj', 'relation': '=='}, {'input': "'jwhwojvhci'", 'output': 'hjwjwocvhi', 'relation': '=='}, {'input': "'vzsndghurieebfcjtzxs'", 'output': 'svzgndrhueiecbfzjtxs', 'relation': '=='}, {'input': "'doojwwiqmporct'", 'output': 'odowjwmiqrpoct', 'relation': '=='}, {'input': "'xkniathvcs'", 'output': 'nxktiachvs', 'relation': '=='}, {'input': "'yvasbiyfyqupifonusp'", 'output': 'ayvisbyyfpquoifsnup', 'relation': '=='}, {'input': "'lnpkvkfkdnw'", 'output': 'plnkkvdfknw', 'relation': '=='}, {'input': "'vmjrbyckokdimqyav'", 'output': 'jvmyrbockikdymqav', 'relation': '=='}, {'input': "'nboqlgyptoyugibejr'", 'output': 'onbgqltypuoybgirej', 'relation': '=='}, {'input': "'pdwutahwzjrfrnach'", 'output': 'wpdautzhwfjrarnch', 'relation': '=='}, {'input': "'duopweqwjin'", 'output': 'oduepwjqwin', 'relation': '=='}, {'input': "'hopemrtqgecxyzink'", 'output': 'phoremgtqxeciyznk', 'relation': '=='}, {'input': "'ajijsxvpsorelkpyrr'", 'output': 'iajxjssvpeorplkryr', 'relation': '=='}, {'input': "'kgohswhymbknpwxz'", 'output': 'okgwhsmhynbkxpwz', 'relation': '=='}, {'input': "'vzmepueqbkdsdqoo'", 'output': 'mvzuepbeqskdodqo', 'relation': '=='}, {'input': "'enxecuzipk'", 'output': 'xenuecpzik', 'relation': '=='}, {'input': "'muwkvcmkrwyurbpchtu'", 'output': 'wmuckvrmkuwyprbtchu', 'relation': '=='}, {'input': "'hxjndcuwyofdjawkzbbj'", 'output': 'jhxcndyuwdofwjabkzbj', 'relation': '=='}, {'input': "'nelqnhvzsffftmc'", 'output': 'lnehqnsvzfffctm', 'relation': '=='}, {'input': "'hpvehsuioivozoavrjf'", 'output': 'vhpsehouioivazojvrf', 'relation': '=='}, {'input': "'lsounjiowjg'", 'output': 'olsjunwiojg', 'relation': '=='}, {'input': "'dhpslmjwsavjiams'", 'output': 'pdhmslsjwjavmias', 'relation': '=='}, {'input': "'xbyxptyzjtzhhultigvy'", 'output': 'yxbtxpjyzhtzlhugtivy', 'relation': '=='}, {'input': "'euvuudjzbbsoxeljkcxn'", 'output': 'veuduubjzobslxecjkxn', 'relation': '=='}, {'input': "'ezglqrifqpzi'", 'output': 'gezrlqqifipz', 'relation': '=='}, {'input': "'kzxocdyhexvvmz'", 'output': 'xkzdoceyhvxvmz', 'relation': '=='}, {'input': "'czlaimdorvxlisvulm'", 'output': 'lczmairdolvxvismul', 'relation': '=='}, {'input': "'hpvtrathkuc'", 'output': 'vhpatrkthuc', 'relation': '=='}, {'input': "'wjondubbepdjhrdmoelv'", 'output': 'owjundebbjpddhremolv', 'relation': '=='}, {'input': "'sxnenxdpunitwlboog'", 'output': 'nsxxenudptnibwlgoo', 'relation': '=='}, {'input': "'dvlrulbmlgdio'", 'output': 'ldvlrulbmigdo', 'relation': '=='}, {'input': "'guvtauzkbhe'", 'output': 'vguutabzkhe', 'relation': '=='}]
test_case_dic['HumanEval/44'] = test_case_dic['HumanEval/44'][:-1]
for x in range(2, 8):
test_case_dic['HumanEval/44'].append({
'input': '%s, %s' % (x, x+1),
'output': '%s' % (x),
'relation': '=='
})
test_case_dic['HumanEval/52'] = [
{'input': '[1, 2, 4, 10], 100', 'output': 'True', 'relation': '=='},
{'input': '[1, 20, 4, 10], 5', 'output': 'False', 'relation': '=='},
{'input': '[1, 20, 4, 10], 21', 'output': 'True', 'relation': '=='},
{'input': '[1, 20, 4, 10], 22', 'output': 'True', 'relation': '=='},
{'input': '[1, 8, 4, 10], 11', 'output': 'True', 'relation': '=='},
{'input': '[1, 8, 4, 10], 10', 'output': 'False', 'relation': '=='}
]
test_case_dic['HumanEval/50'] = [{'input': "'ifcnmmjciacwhxsgfhlm'", 'output': 'daxihhexdvxrcsnbacgh', 'relation': '=='}, {'input': "'yfwlbzbwsmtxnefdek'", 'output': 'targwuwrnhosizayzf', 'relation': '=='}, {'input': "'pnjldpihriqqyneg'", 'output': 'kiegykdcmdlltizb', 'relation': '=='}, {'input': "'wirhwozyqxlbhgamd'", 'output': 'rdmcrjutlsgwcbvhy', 'relation': '=='}, {'input': "'hmirntzqkqqlan'", 'output': 'chdmioulfllgvi', 'relation': '=='}, {'input': "'zhyzkwcmktrnzbwmapdd'", 'output': 'uctufrxhfomiuwrhvkyy', 'relation': '=='}, {'input': "'mgeprnhlxb'", 'output': 'hbzkmicgsw', 'relation': '=='}, {'input': "'lzurztjnjmcwwnc'", 'output': 'gupmuoeiehxrrix', 'relation': '=='}, {'input': "'sxrqmjvuhdgijzkeasy'", 'output': 'nsmlheqpcybdeufzvnt', 'relation': '=='}, {'input': "'rrytvnwfaci'", 'output': 'mmtoqiravxd', 'relation': '=='}, {'input': "'wdndzpiosktfccnvdkvi'", 'output': 'ryiyukdjnfoaxxiqyfqd', 'relation': '=='}, {'input': "'ozvyljqdkwdvfypufiqe'", 'output': 'juqtgelyfryqatkpadlz', 'relation': '=='}, {'input': "'mgmxhhcuhdwdjj'", 'output': 'hbhsccxpcyryee', 'relation': '=='}, {'input': "'uwknvyslwdcblborazqt'", 'output': 'prfiqtngryxwgwjmvulo', 'relation': '=='}, {'input': "'timmhpfxwmxmfhbzgm'", 'output': 'odhhckasrhshacwubh', 'relation': '=='}, {'input': "'gphvmnaulwj'", 'output': 'bkcqhivpgre', 'relation': '=='}, {'input': "'xumeuesliiasqsstcga'", 'output': 'sphzpzngddvnlnnoxbv', 'relation': '=='}, {'input': "'zigrcpeimllwtjskntmh'", 'output': 'udbmxkzdhggroenfiohc', 'relation': '=='}, {'input': "'ewigzwfhdmksuulrg'", 'output': 'zrdburacyhfnppgmb', 'relation': '=='}, {'input': "'rfrneetosbce'", 'output': 'mamizzojnwxz', 'relation': '=='}, {'input': "'abujlolgxcwgcpggxu'", 'output': 'vwpegjgbsxrbxkbbsp', 'relation': '=='}, {'input': "'vkgvhkhyhamrixmxyg'", 'output': 'qfbqcfctcvhmdshstb', 'relation': '=='}, {'input': "'hsaxxvpnlabpmnrjazo'", 'output': 'cnvssqkigvwkhimevuj', 'relation': '=='}, {'input': "'pdhftyxihwlvnjqhm'", 'output': 'kycaotsdcrgqielch', 'relation': '=='}, {'input': "'oktwboyezvfawoa'", 'output': 'jforwjtzuqavrjv', 'relation': '=='}, {'input': "'jniicwjnoyl'", 'output': 'eiddxreijtg', 'relation': '=='}, {'input': "'laznvunghzsngfp'", 'output': 'gvuiqpibcunibak', 'relation': '=='}, {'input': "'znkctwbswfih'", 'output': 'uifxorwnradc', 'relation': '=='}, {'input': "'wgqxwjsjgoqe'", 'output': 'rblsrenebjlz', 'relation': '=='}, {'input': "'qlamaiqdws'", 'output': 'lgvhvdlyrn', 'relation': '=='}, {'input': "'cjmkeeksfkcpeseacem'", 'output': 'xehfzzfnafxkznzvxzh', 'relation': '=='}, {'input': "'kebirgumltqoem'", 'output': 'fzwdmbphgoljzh', 'relation': '=='}, {'input': "'falrpnhdnqzvr'", 'output': 'avgmkicyiluqm', 'relation': '=='}, {'input': "'wpfinkxngiysqcepsyvi'", 'output': 'rkadifsibdtnlxzkntqd', 'relation': '=='}, {'input': "'xlngbsfvewacccal'", 'output': 'sgibwnaqzrvxxxvg', 'relation': '=='}, {'input': "'shapypaicovoasp'", 'output': 'ncvktkvdxjqjvnk', 'relation': '=='}, {'input': "'bowjlsarackyxorw'", 'output': 'wjregnvmvxftsjmr', 'relation': '=='}, {'input': "'oolsexyzpt'", 'output': 'jjgnzstuko', 'relation': '=='}, {'input': "'talvueeckahovazyp'", 'output': 'ovgqpzzxfvcjqvutk', 'relation': '=='}, {'input': "'dmaomeljfgm'", 'output': 'yhvjhzgeabh', 'relation': '=='}, {'input': "'ydhtszxlbbrurmjzb'", 'output': 'tyconusgwwmpmheuw', 'relation': '=='}, {'input': "'ddidsmkebv'", 'output': 'yydynhfzwq', 'relation': '=='}, {'input': "'gwmitjetavwypdtyc'", 'output': 'brhdoezovqrtkyotx', 'relation': '=='}, {'input': "'swtkcutndmznddvuzm'", 'output': 'nrofxpoiyhuiyyqpuh', 'relation': '=='}, {'input': "'thbomnppwhjjmr'", 'output': 'ocwjhikkrceehm', 'relation': '=='}, {'input': "'blhsspwtsgls'", 'output': 'wgcnnkronbgn', 'relation': '=='}, {'input': "'dxbfdxrufqodrswc'", 'output': 'yswaysmpaljymnrx', 'relation': '=='}, {'input': "'eymumnsufrpaaerckt'", 'output': 'zthphinpamkvvzmxfo', 'relation': '=='}, {'input': "'ywfrxxafhzsj'", 'output': 'tramssvacune', 'relation': '=='}, {'input': "'kgnrpltppdrndsnhu'", 'output': 'fbimkgokkymiynicp', 'relation': '=='}, {'input': "'cwgqmzhdzg'", 'output': 'xrblhucyub', 'relation': '=='}, {'input': "'mlfyjmjuai'", 'output': 'hgatehepvd', 'relation': '=='}, {'input': "'ltbndqkyycbidkoixfo'", 'output': 'gowiylfttxwdyfjdsaj', 'relation': '=='}, {'input': "'arrblhxbzmibdoayeie'", 'output': 'vmmwgcswuhdwyjvtzdz', 'relation': '=='}, {'input': "'egqwsaqhvzpfsuisqzt'", 'output': 'zblrnvlcqukanpdnluo', 'relation': '=='}, {'input': "'skcvznyewdut'", 'output': 'nfxquitzrypo', 'relation': '=='}, {'input': "'cxnpmhntmdtxyarp'", 'output': 'xsikhciohyostvmk', 'relation': '=='}, {'input': "'upkulqenpn'", 'output': 'pkfpglziki', 'relation': '=='}, {'input': "'bnwfvlhmcsuwdmbuzbu'", 'output': 'wiraqgchxnpryhwpuwp', 'relation': '=='}, {'input': "'efhomrbmoaxquwvqs'", 'output': 'zacjhmwhjvslprqln', 'relation': '=='}, {'input': "'pqbtnonmiclpicwdbk'", 'output': 'klwoijihdxgkdxrywf', 'relation': '=='}, {'input': "'lcuncxdvyoilo'", 'output': 'gxpixsyqtjdgj', 'relation': '=='}, {'input': "'qkofbwjnggwob'", 'output': 'lfjawreibbrjw', 'relation': '=='}, {'input': "'zduifxouutpk'", 'output': 'uypdasjppokf', 'relation': '=='}, {'input': "'nuihruhxza'", 'output': 'ipdcmpcsuv', 'relation': '=='}, {'input': "'jjexohmiwd'", 'output': 'eezsjchdry', 'relation': '=='}, {'input': "'yyeymdvrttoxsoxy'", 'output': 'ttzthyqmoojsnjst', 'relation': '=='}, {'input': "'ycidlzlnah'", 'output': 'txdygugivc', 'relation': '=='}, {'input': "'mkwhkfstbyuo'", 'output': 'hfrcfanowtpj', 'relation': '=='}, {'input': "'mobobthamn'", 'output': 'hjwjwocvhi', 'relation': '=='}, {'input': "'xaelsiwmzjnjhgkeoycx'", 'output': 'svzgndrhueiecbfzjtxs', 'relation': '=='}, {'input': "'titbobrnvwuthy'", 'output': 'odowjwmiqrpoct', 'relation': '=='}, {'input': "'scpynfhmax'", 'output': 'nxktiachvs', 'relation': '=='}, {'input': "'fdanxgddkuvztnkxszu'", 'output': 'ayvisbyyfpquoifsnup', 'relation': '=='}, {'input': "'uqsppaikpsb'", 'output': 'plnkkvdfknw', 'relation': '=='}, {'input': "'oardwgthpnpidrvfa'", 'output': 'jvmyrbockikdymqav', 'relation': '=='}, {'input': "'tsglvqyduztdglnwjo'", 'output': 'onbgqltypuoybgirej', 'relation': '=='}, {'input': "'buifzyembkowfwshm'", 'output': 'wpdautzhwfjrarnch', 'relation': '=='}, {'input': "'tizjubovbns'", 'output': 'oduepwjqwin', 'relation': '=='}, {'input': "'umtwjrlyvcjhndesp'", 'output': 'phoremgtqxeciyznk', 'relation': '=='}, {'input': "'nfocoxxaujtwuqpwdw'", 'output': 'iajxjssvpeorplkryr', 'relation': '=='}, {'input': "'tplbmxrmdsgpcube'", 'output': 'okgwhsmhynbkxpwz', 'relation': '=='}, {'input': "'raezjugjvxpitivt'", 'output': 'mvzuepbeqskdodqo', 'relation': '=='}, {'input': "'cjszjhuenp'", 'output': 'xenuecpzik', 'relation': '=='}, {'input': "'brzhpawrpzbduwgyhmz'", 'output': 'wmuckvrmkuwyprbtchu', 'relation': '=='}, {'input': "'omchsidzbitkbofgpego'", 'output': 'jhxcndyuwdofwjabkzbj', 'relation': '=='}, {'input': "'qsjmvsxaekkkhyr'", 'output': 'lnehqnsvzfffctm', 'relation': '=='}, {'input': "'amuxjmtzntnafetoawk'", 'output': 'vhpsehouioivazojvrf', 'relation': '=='}, {'input': "'tqxozsbntol'", 'output': 'olsjunwiojg', 'relation': '=='}, {'input': "'uimrxqxobofarnfx'", 'output': 'pdhmslsjwjavmias', 'relation': '=='}, {'input': "'dcgycuodemyeqmzlynad'", 'output': 'yxbtxpjyzhtzlhugtivy', 'relation': '=='}, {'input': "'ajzizzgoetgxqcjhopcs'", 'output': 'veuduubjzobslxecjkxn', 'relation': '=='}, {'input': "'ljewqvvnknue'", 'output': 'gezrlqqifipz', 'relation': '=='}, {'input': "'cpeithjdmacare'", 'output': 'xkzdoceyhvxvmz', 'relation': '=='}, {'input': "'qherfnwitqacanxrzq'", 'output': 'lczmairdolvxvismul', 'relation': '=='}, {'input': "'amufywpymzh'", 'output': 'vhpatrkthuc', 'relation': '=='}, {'input': "'tbozsijggouiimwjrtqa'", 'output': 'owjundebbjpddhremolv', 'relation': '=='}, {'input': "'sxccjsziuysngbqltt'", 'output': 'nsxxenudptnibwlgoo', 'relation': '=='}, {'input': "'qiaqwzqgrnlit'", 'output': 'ldvlrulbmigdo', 'relation': '=='}, {'input': "'alzzyfgepmj'", 'output': 'vguutabzkhe', 'relation': '=='}]
test_case_dic['HumanEval/51'][1] = {'input': '"abcdef\\nghijklm"', 'output': 'bcdf\nghjklm', 'relation': '=='}
test_case_dic['HumanEval/53'] = test_case_dic['HumanEval/53'][:-1] + [{'input': '654, 114', 'output': '768', 'relation': '=='}, {'input': '25, 759', 'output': '784', 'relation': '=='}, {'input': '281, 250', 'output': '531', 'relation': '=='}, {'input': '228, 142', 'output': '370', 'relation': '=='}, {'input': '754, 104', 'output': '858', 'relation': '=='}, {'input': '692, 758', 'output': '1450', 'relation': '=='}, {'input': '913, 558', 'output': '1471', 'relation': '=='}, {'input': '89, 604', 'output': '693', 'relation': '=='}, {'input': '432, 32', 'output': '464', 'relation': '=='}, {'input': '30, 95', 'output': '125', 'relation': '=='}, {'input': '223, 238', 'output': '461', 'relation': '=='}, {'input': '517, 616', 'output': '1133', 'relation': '=='}, {'input': '27, 574', 'output': '601', 'relation': '=='}, {'input': '203, 733', 'output': '936', 'relation': '=='}, {'input': '665, 718', 'output': '1383', 'relation': '=='}, {'input': '558, 429', 'output': '987', 'relation': '=='}, {'input': '225, 459', 'output': '684', 'relation': '=='}, {'input': '603, 284', 'output': '887', 'relation': '=='}, {'input': '828, 890', 'output': '1718', 'relation': '=='}, {'input': '6, 777', 'output': '783', 'relation': '=='}, {'input': '825, 163', 'output': '988', 'relation': '=='}, {'input': '714, 432', 'output': '1146', 'relation': '=='}, {'input': '348, 284', 'output': '632', 'relation': '=='}, {'input': '159, 220', 'output': '379', 'relation': '=='}, {'input': '980, 781', 'output': '1761', 'relation': '=='}, {'input': '344, 104', 'output': '448', 'relation': '=='}, {'input': '94, 389', 'output': '483', 'relation': '=='}, {'input': '99, 367', 'output': '466', 'relation': '=='}, {'input': '867, 352', 'output': '1219', 'relation': '=='}, {'input': '618, 270', 'output': '888', 'relation': '=='}, {'input': '826, 44', 'output': '870', 'relation': '=='}, {'input': '747, 470', 'output': '1217', 'relation': '=='}, {'input': '549, 127', 'output': '676', 'relation': '=='}, {'input': '996, 944', 'output': '1940', 'relation': '=='}, {'input': '387, 80', 'output': '467', 'relation': '=='}, {'input': '565, 300', 'output': '865', 'relation': '=='}, {'input': '849, 643', 'output': '1492', 'relation': '=='}, {'input': '633, 906', 'output': '1539', 'relation': '=='}, {'input': '882, 370', 'output': '1252', 'relation': '=='}, {'input': '591, 196', 'output': '787', 'relation': '=='}, {'input': '721, 71', 'output': '792', 'relation': '=='}, {'input': '46, 677', 'output': '723', 'relation': '=='}, {'input': '233, 791', 'output': '1024', 'relation': '=='}, {'input': '296, 81', 'output': '377', 'relation': '=='}, {'input': '875, 238', 'output': '1113', 'relation': '=='}, {'input': '887, 103', 'output': '990', 'relation': '=='}, {'input': '389, 284', 'output': '673', 'relation': '=='}, {'input': '464, 650', 'output': '1114', 'relation': '=='}, {'input': '854, 373', 'output': '1227', 'relation': '=='}, {'input': '166, 379', 'output': '545', 'relation': '=='}, {'input': '363, 214', 'output': '577', 'relation': '=='}, {'input': '686, 273', 'output': '959', 'relation': '=='}, {'input': '718, 959', 'output': '1677', 'relation': '=='}, {'input': '699, 663', 'output': '1362', 'relation': '=='}, {'input': '73, 623', 'output': '696', 'relation': '=='}, {'input': '650, 175', 'output': '825', 'relation': '=='}, {'input': '546, 746', 'output': '1292', 'relation': '=='}, {'input': '250, 167', 'output': '417', 'relation': '=='}, {'input': '473, 388', 'output': '861', 'relation': '=='}, {'input': '276, 947', 'output': '1223', 'relation': '=='}, {'input': '655, 704', 'output': '1359', 'relation': '=='}, {'input': '570, 224', 'output': '794', 'relation': '=='}, {'input': '701, 332', 'output': '1033', 'relation': '=='}, {'input': '863, 786', 'output': '1649', 'relation': '=='}, {'input': '794, 57', 'output': '851', 'relation': '=='}, {'input': '234, 841', 'output': '1075', 'relation': '=='}, {'input': '32, 824', 'output': '856', 'relation': '=='}, {'input': '323, 410', 'output': '733', 'relation': '=='}, {'input': '274, 67', 'output': '341', 'relation': '=='}, {'input': '216, 935', 'output': '1151', 'relation': '=='}, {'input': '965, 580', 'output': '1545', 'relation': '=='}, {'input': '897, 735', 'output': '1632', 'relation': '=='}, {'input': '322, 217', 'output': '539', 'relation': '=='}, {'input': '671, 511', 'output': '1182', 'relation': '=='}, {'input': '405, 905', 'output': '1310', 'relation': '=='}, {'input': '936, 658', 'output': '1594', 'relation': '=='}, {'input': '469, 146', 'output': '615', 'relation': '=='}, {'input': '271, 142', 'output': '413', 'relation': '=='}, {'input': '252, 762', 'output': '1014', 'relation': '=='}, {'input': '574, 551', 'output': '1125', 'relation': '=='}, {'input': '269, 764', 'output': '1033', 'relation': '=='}, {'input': '598, 438', 'output': '1036', 'relation': '=='}, {'input': '919, 597', 'output': '1516', 'relation': '=='}, {'input': '408, 370', 'output': '778', 'relation': '=='}, {'input': '224, 141', 'output': '365', 'relation': '=='}, {'input': '521, 505', 'output': '1026', 'relation': '=='}, {'input': '93, 773', 'output': '866', 'relation': '=='}, {'input': '48, 881', 'output': '929', 'relation': '=='}, {'input': '112, 156', 'output': '268', 'relation': '=='}, {'input': '642, 163', 'output': '805', 'relation': '=='}, {'input': '811, 696', 'output': '1507', 'relation': '=='}, {'input': '432, 610', 'output': '1042', 'relation': '=='}, {'input': '65, 394', 'output': '459', 'relation': '=='}, {'input': '390, 610', 'output': '1000', 'relation': '=='}, {'input': '479, 541', 'output': '1020', 'relation': '=='}, {'input': '257, 994', 'output': '1251', 'relation': '=='}, {'input': '566, 881', 'output': '1447', 'relation': '=='}, {'input': '965, 11', 'output': '976', 'relation': '=='}, {'input': '696, 738', 'output': '1434', 'relation': '=='}, {'input': '117, 698', 'output': '815', 'relation': '=='}]
test_case_dic['HumanEval/56'] = [
{'input': "'<>'", 'output': 'True', 'relation': '=='},
{'input': "'<<><>>'", 'output': 'True', 'relation': '=='},
{'input': "'<><><<><>><>'", 'output': 'True', 'relation': '=='},
{'input': "'<><><<<><><>><>><<><><<>>>'", 'output': 'True', 'relation': '=='},
{'input': "'<<<><>>>>'", 'output': 'False', 'relation': '=='},
{'input': "'><<>'", 'output': 'False', 'relation': '=='},
{'input': "'<'", 'output': 'False', 'relation': '=='},
{'input': "'<<<<'", 'output': 'False', 'relation': '=='},
{'input': "'>'", 'output': 'False', 'relation': '=='},
{'input': "'<<>'", 'output': 'False', 'relation': '=='},
{'input': "'<><><<><>><>><<>'", 'output': 'False', 'relation': '=='},
{'input': "'<><><<><>><>>><>'", 'output': 'False', 'relation': '=='}
]
test_case_dic['HumanEval/61'] = [
{'input': "'()'", 'output': 'True', 'relation': '=='},
{'input': "'(()())'", 'output': 'True', 'relation': '=='},
{'input': "'()()(()())()'", 'output': 'True', 'relation': '=='},
{'input': "'()()((()()())())(()()(()))'", 'output': 'True', 'relation': '=='},
{'input': "'((()())))'", 'output': 'False', 'relation': '=='},
{'input': "')(()'", 'output': 'False', 'relation': '=='},
{'input': "'('", 'output': 'False', 'relation': '=='},
{'input': "'(((('", 'output': 'False', 'relation': '=='},
{'input': "')'", 'output': 'False', 'relation': '=='},
{'input': "'(()'", 'output': 'False', 'relation': '=='},
{'input': "'()()(()())())(()'", 'output': 'False', 'relation': '=='},
{'input': "'()()(()())()))()'", 'output': 'False', 'relation': '=='}
]
test_case_dic['HumanEval/72'] = [
{'input': '[3, 2, 3], 9', 'output': 'True', 'relation': '=='},
{'input': '[1, 2], 5', 'output': 'False', 'relation': '=='},
{'input': '[3], 5', 'output': 'True', 'relation': '=='},
{'input': '[3, 2, 3], 1', 'output': 'False', 'relation': '=='},
{'input': '[1, 2, 3], 6', 'output': 'False', 'relation': '=='},
{'input': '[5], 5', 'output': 'True', 'relation': '=='}
]
test_case_dic['HumanEval/76'] = [
{'input': '16, 2', 'output': 'True', 'relation': '=='},
{'input': '143214, 16', 'output': 'False', 'relation': '=='},
{'input': '4, 2', 'output': 'True', 'relation': '=='},
{'input': '9, 3', 'output': 'True', 'relation': '=='},
{'input': '16, 4', 'output': 'True', 'relation': '=='},
{'input': '24, 2', 'output': 'False', 'relation': '=='},
{'input': '128, 4', 'output': 'False', 'relation': '=='},
{'input': '12, 6', 'output': 'False', 'relation': '=='}
]
test_case_dic['HumanEval/92'] = [
{'input': '2, 3, 1', 'output': 'True', 'relation': '=='},
{'input': '2.5, 2, 3', 'output': 'False', 'relation': '=='},
{'input': '1.5, 5, 3.5', 'output': 'False', 'relation': '=='},
{'input': '2, 6, 2', 'output': 'False', 'relation': '=='},
{'input': '4, 2, 2', 'output': 'True', 'relation': '=='},
{'input': '2.2, 2.2, 2.2', 'output': 'False', 'relation': '=='},
{'input': '-4, 6, 2', 'output': 'True', 'relation': '=='},
{'input': '2, 1, 1', 'output': 'True', 'relation': '=='},
{'input': '3, 4, 7', 'output': 'True', 'relation': '=='},
{'input': '3.0, 4, 7', 'output': 'False', 'relation': '=='}
]
test_case_dic['HumanEval/133'] = [
{'input': '[1, 2, 3]', 'output': '14', 'relation': '=='},
{'input': '[1.0, 2, 3]', 'output': '14', 'relation': '=='},
{'input': '[1,3,5,7]', 'output': '84', 'relation': '=='},
{'input': '[1.4,4.2,0]', 'output': '29', 'relation': '=='},
{'input': '[-2.4,1,1]', 'output': '6', 'relation': '=='},
{'input': '[100,1,15,2]', 'output': '10230', 'relation': '=='},
{'input': '[10000,10000]', 'output': '200000000', 'relation': '=='},
{'input': '[-1.4,4.6,6.3]', 'output': '75', 'relation': '=='},
{'input': '[-1.4,17.9,18.9,19.9]', 'output': '1086', 'relation': '=='},
{'input': '[0]', 'output': '0', 'relation': '=='},
{'input': '[-1]', 'output': '1', 'relation': '=='},
{'input': '[-1,1,0]', 'output': '2', 'relation': '=='}
]
test_case_dic['HumanEval/135'] = [
{'input': '[1,2,4,3,5]', 'output': '3', 'relation': '=='},
{'input': '[1,2,4,5]', 'output': '-1', 'relation': '=='},
{'input': '[1,4,2,5,6,7,8,9,10]', 'output': '2', 'relation': '=='},
{'input': '[4,8,5,7,3]', 'output': '4', 'relation': '=='},
{'input': '[]', 'output': '-1', 'relation': '=='}
]
for case in test_case_dic['HumanEval/68']:
case['output'] = case['output'].replace(', "Error"', '')
for case in test_case_dic['HumanEval/88']:
case['output'] = case['output'].replace(', "Error"', '')
for case in test_case_dic['HumanEval/105']:
case['output'] = case['output'].replace(', "Error"', '')
for case in test_case_dic['HumanEval/159']:
case['output'] = case['output'].replace(', "Error"', '')
test_case_dic['HumanEval/109'] = [
{'input': '[3, 4, 5, 1, 2]', 'output': 'True', 'relation': '=='},
{'input': '[3, 5, 10, 1, 2]', 'output': 'True', 'relation': '=='},
{'input': '[4, 3, 1, 2]', 'output': 'False', 'relation': '=='},
{'input': '[3, 5, 4, 1, 2]', 'output': 'False', 'relation': '=='},
{'input': '[]', 'output': 'True', 'relation': '=='}
]
test_case_dic['HumanEval/111'] = [
{'input': "'a b b a'", 'output': "{'a':2,'b': 2}", 'relation': '=='},
{'input': "'a b c a b'", 'output': "{'a': 2, 'b': 2}", 'relation': '=='},
{'input': "'a b c d g'", 'output': "{'a': 1, 'b': 1, 'c': 1, 'd': 1, 'g': 1}", 'relation': '=='},
{'input': "'r t g'", 'output': "{'r': 1,'t': 1,'g': 1}", 'relation': '=='},
{'input': "'b b b b a'", 'output': "{'b': 4}", 'relation': '=='},
{'input': "''", 'output': "{}", 'relation': '=='},
{'input': "'a'", 'output': "{'a': 1}", 'relation': '=='}
]
test_case_dic['HumanEval/113'] = [
{'input': "['1234567']", 'output': "['the number of odd elements 4n the str4ng 4 of the 4nput.']", 'relation': '=='},
{'input': "['3','11111111']", 'output': "['the number of odd elements 1n the str1ng 1 of the 1nput.', 'the number of odd elements 8n the str8ng 8 of the 8nput.']", 'relation': '=='},
{'input': "['271', '137', '314']", 'output': "['the number of odd elements 2n the str2ng 2 of the 2nput.', 'the number of odd elements 3n the str3ng 3 of the 3nput.', 'the number of odd elements 2n the str2ng 2 of the 2nput.']", 'relation': '=='}
]
# statement after assertation false
assertation_comment_list = [64, 65, 66, 71, 76, 77, 79, 78, 80, 84, 89, 91, 92, 93, 94, 95, 97, 99, 107, 114, 115, 122, 126, 132, 133, 139, 140, 144, 151, 154, 157, 160]
for i in assertation_comment_list:
for case in test_case_dic['HumanEval/%s' % (i)]:
# if ',' in res['output']:
# res['output'] = res['output'].split(',')[0]
case['output'] = case['output'].split(',')[0]
test_case_dic['HumanEval/65'][3]['output'] = test_case_dic['HumanEval/65'][3]['output'][1:-1]
test_case_dic['HumanEval/65'][4]['output'] = test_case_dic['HumanEval/65'][4]['output'][1:-1]
test_case_dic['HumanEval/79'][3]['output'] = test_case_dic['HumanEval/79'][3]['output'][1:-1]
for case in test_case_dic['HumanEval/84']:
case['output'] = case['output'][:-1]
quota_list = [89, 93, 140]
for i in quota_list:
for case in test_case_dic['HumanEval/%s' % (i)]:
case['output'] = case['output'][1:-1]
test_case_dic['HumanEval/100'] = [
{'input': '3', 'output': '[3, 5, 7]', 'relation': '=='},
{'input': '4', 'output': '[4, 6, 8, 10]', 'relation': '=='},
{'input': '5', 'output': '[5, 7, 9, 11, 13]', 'relation': '=='},
{'input': '6', 'output': '[6, 8, 10, 12, 14, 16]', 'relation': '=='},
{'input': '8', 'output': '[8, 10, 12, 14, 16, 18, 20, 22]', 'relation': '=='}
]
test_case_dic['HumanEval/117'] = [
{'input': '"Mary had a little lamb", 4','output': '["little"]', 'relation': '=='},
{'input': '"Mary had a little lamb", 3','output': '["Mary", "lamb"]', 'relation': '=='},
{'input': '"simple white space", 2', 'output': '[]', 'relation': '=='},
{'input': '"Hello world", 4', 'output': '["world"]', 'relation': '=='},
{'input': '"Uncle sam", 3', 'output': '["Uncle"]', 'relation': '=='},
{'input': '"", 4', 'output': '[]', 'relation': '=='},
{'input': '"a b c d e f", 1', 'output': '["b", "c", "d", "f"]', 'relation': '=='}
]
test_case_dic['HumanEval/148'] = [
{'input': '"Jupiter", "Neptune"', 'output': '("Saturn", "Uranus")', 'relation': '=='},
{'input': '"Earth", "Mercury"', 'output': '("Venus",)', 'relation': '=='},
{'input': '"Mercury", "Uranus"', 'output': '("Venus", "Earth", "Mars", "Jupiter", "Saturn")', 'relation': '=='},
{'input': '"Neptune", "Venus"', 'output': '("Earth", "Mars", "Jupiter", "Saturn", "Uranus")', 'relation': '=='},
{'input': '"Earth", "Earth"', 'output': '()', 'relation': '=='},
{'input': '"Mars", "Earth"', 'output': '()', 'relation': '=='},
{'input': '"Jupiter", "Makemake"', 'output': '()', 'relation': '=='}
]
test_case_dic['HumanEval/158'] = [
{'input': '["name", "of", "string"]', 'output': 'string', 'relation': '=='},
{'input': '["name", "enam", "game"]', 'output': 'enam', 'relation': '=='},
{'input': '["aaaaaaa", "bb", "cc"]', 'output': 'aaaaaaa', 'relation': '=='},
{'input': '["abc", "cba"]', 'output': 'abc', 'relation': '=='},
{'input': '["play", "this", "game", "of","footbott"]', 'output': 'footbott', 'relation': '=='},
{'input': '["we", "are", "gonna", "rock"]', 'output': 'gonna', 'relation': '=='},
{'input': '["we", "are", "a", "mad", "nation"]', 'output': 'nation', 'relation': '=='},
{'input': '["this", "is", "a", "prrk"]', 'output': 'this', 'relation': '=='},
{'input': '["b"]', 'output': 'b', 'relation': '=='},
{'input': '["play", "play", "play"]', 'output': 'play', 'relation': '=='}
]
test_case_dic['HumanEval/151'][-1] = {
'input': '[-99, -97, -95, -93, -91, -89, -87, -85, -83, -81, -79, -77, -75, -73, -71, -69, -67, -65, -63, -61, -59, -57, -55, -53, -51, -49, -47, -45, -43, -41, -39, -37, -35, -33, -31, -29, -27, -25, -23, -21, -19, -17, -15, -13, -11, -9, -7, -5, -3, -1, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 97, 99]',
'output': '166650',
'relation': '=='
}
test_case_dic['HumanEval/152'] = [
{'input': '[1,2,3,4,5,1],[1,2,3,4,2,-2]', 'output': '[0,0,0,0,3,3]', 'relation': '=='},
{'input': '[0,0,0,0,0,0],[0,0,0,0,0,0]', 'output': '[0,0,0,0,0,0]', 'relation': '=='},
{'input': '[1,2,3],[-1,-2,-3]', 'output': '[2,4,6]', 'relation': '=='},
{'input': '[1,2,3,5],[-1,2,3,4]', 'output': '[2,0,0,1]', 'relation': '=='}
]
test_case_dic['HumanEval/123'] = [
{'input': '14', 'output': '[1, 5, 7, 11, 13, 17]', 'relation': '=='},
{'input': '5', 'output': '[1, 5]', 'relation': '=='},
{'input': '12', 'output': '[1, 3, 5]', 'relation': '=='},
{'input': '1', 'output': '[1]', 'relation': '=='}
]
test_case_dic['HumanEval/107'] = [
{'input': '123', 'output': '(8, 13)', 'relation': '=='},
{'input': '12', 'output': '(4, 6)', 'relation': '=='},
{'input': '3', 'output': '(1, 2)', 'relation': '=='},
{'input': '63', 'output': '(6, 8)', 'relation': '=='},
{'input': '25', 'output': '(5, 6)', 'relation': '=='},
{'input': '19', 'output': '(4, 6)', 'relation': '=='},
{'input': '9', 'output': '(4, 5)', 'relation': '=='},
{'input': '1', 'output': '(0, 1)', 'relation': '=='}
]
test_case_dic['HumanEval/163'] = [
{'input': '2, 10', 'output': '[2, 4, 6, 8]', 'relation': '=='},
{'input': '10, 2', 'output': '[2, 4, 6, 8]', 'relation': '=='},
{'input': '132, 2', 'output': '[2, 4, 6, 8]', 'relation': '=='},
{'input': '17, 89', 'output': '[]', 'relation': '=='}
]
# format problem
format_problem_list = [71, 96, 101, 105, 108, 111, 112, 117, 125, 148, 152, 149]
for i in format_problem_list:
for case in test_case_dic['HumanEval/%s' % (i)]:
case['output'] = str(eval(case['output']))
# def test_solution():
case_status = []
for i in range(len(problem_list)):
test_cases = test_case_dic[problem_list[i]['task_id']]
demo_file = 'demo.py'
with open(demo_file, 'w', encoding='utf-8') as f:
f.write(problem_list[i]['prompt'] + problem_list[i]['canonical_solution'])
call_demo_file = 'call_demo.py'
unpassed_test_case = []
for j in range(len(test_cases)):
if test_cases[j]['relation'] == '==':
with open('./call_demo.py', 'w') as f:
f.write('from %s import %s\nprint(%s(%s))' % (
demo_file.split('.')[0],
problem_list[i]['entry_point'],
problem_list[i]['entry_point'],
test_cases[j]['input']
))
try:
output = subprocess.run(["python", call_demo_file], capture_output=True, text=True, timeout=3)
except subprocess.TimeoutExpired as e:
print(e, flush=True)
unpassed_test_case.append([j, 'Timeout'])
continue
except Exception as e:
print(e, flush=True)
unpassed_test_case.append([j, 'Exception'])
continue
if test_cases[j]['output'].strip() != output.stdout.strip():
unpassed_test_case.append([j, 'false'])
else:
unpassed_test_case.append([j, 'True'])
else:
if '$input$' in test_cases[j]['relation'] or '$demo$' in test_cases[j]['relation']:
with open('./call_demo.py', 'w') as f:
f.write('from %s import %s\n%s' % (
demo_file.split('.')[0],
problem_list[i]['entry_point'],
test_cases[j]['relation'].replace('$input$', str(test_cases[j]['input'])).replace('$demo$', demo_file.split('.')[0])
))
else:
with open('./call_demo.py', 'w') as f:
f.write('from %s import %s\nprint(%s)' % (demo_file.split('.')[0],
problem_list[i]['entry_point'],
test_cases[j]['relation'].replace('candidate', problem_list[i]['entry_point'])))
try:
output = subprocess.run(["python", call_demo_file], capture_output=True, text=True, timeout=3)
except subprocess.TimeoutExpired as e:
print(e, flush=True)
unpassed_test_case.append([j, 'Timeout'])
continue
except Exception as e:
print(e, flush=True)
unpassed_test_case.append([j, 'Exception'])
continue
if output.stdout.strip() != 'True':
unpassed_test_case.append([j, 'false'])
else:
unpassed_test_case.append([j, 'True'])
if len(set([i[1] for i in unpassed_test_case])) == 1 and unpassed_test_case[0][1] == 'True':
# print('ALL TRUE')
case_status.append(['ALL TRUE'])
else:
case_status.append(unpassed_test_case)
# print(unpassed_test_case)
# test_cases = test_case_dic[problem_list[i]['task_id']]
# with open('./demo.py', 'w', encoding='utf-8') as f:
# f.write(problem_list[i]['prompt'] + problem_list[i]['canonical_solution'])
# call_demo_file = 'call_demo.py'
# unpassed_test_case = []
# for j in range(len(test_cases)):
# if test_cases[j]['relation'] == '==':
# with open('./call_demo.py', 'w') as f:
# f.write('from demo import %s\nprint(%s(%s))' % (
# problem_list[i]['entry_point'], problem_list[i]['entry_point'], test_cases[j]['input']))
# try:
# output = subprocess.run(["python", call_demo_file], capture_output=True, text=True, timeout=3)
#
# except subprocess.TimeoutExpired as e:
# print(e, flush=True)
# unpassed_test_case.append([j,'Timeout'])
# continue
# except Exception as e:
# print(e, flush=True)
# unpassed_test_case.append([j,'Exception'])
# continue
# if test_cases[j]['output'].strip() != output.stdout.strip():
# unpassed_test_case.append([j, 'false'])
# else:
# with open('./call_demo.py', 'w') as f:
# f.write('from demo import %s\nprint(%s)' % (
# problem_list[i]['entry_point'], test_cases[j]['relation'].replace('candidate', problem_list[i]['entry_point'])))
# try:
# output = subprocess.run(["python", call_demo_file], capture_output=True, text=True, timeout=3)
# except subprocess.TimeoutExpired as e:
# print(e, flush=True)
# unpassed_test_case.append([j,'Timeout'])
# continue
# except Exception as e:
# print(e, flush=True)
# unpassed_test_case.append([j,'Exception'])
# continue
# if output.stdout.strip() != 'True':
# unpassed_test_case.append([j, 'false'])
# case_status.append(unpassed_test_case)
# test_case = []
# for i in range(len(input)):
# res = {'input': input[i], 'output': output[i], 'relation': '=='}
# test_case.append(res)
# reconstruct the dataset
if not os.path.exists('HumanEval/HumanEval_new.jsonl'):
with open('HumanEval/HumanEval_new.json', 'w') as f:
f.write('')
for problem in problem_list:
res = {
'name': problem['task_id'],
'entry_point': problem['entry_point'],
'prompt': problem['prompt'],
'solution': problem['prompt'] + problem['canonical_solution'],
'test_case': test_case_dic[problem['task_id']]
}
json_str = json.dumps(res)
with open('HumanEval/HumanEval_new.jsonl', 'a') as f:
f.write(json_str + '\n') | [
"Generate Python3 code (Markdown):\n",
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | rororowyourboat/CadCAD_GPT_experiments | examples~predator_prey~cadcad_gpt.py | import openai
import json
import plotly.express as px
import pandas as pd
from radcad import Experiment
from radcad.engine import Engine
#importing radcad model from models folder
from predator_prey_radcad import model, simulation, experiment
from langchain.agents import create_pandas_dataframe_agent
from langchain.chat_models import ChatOpenAI
from langchain.agents.agent_types import AgentType
# from langchain.agents.agent_types import AgentType
from langchain.llms import OpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.schema.runnable import RunnablePassthrough, RunnableLambda
from langchain.schema.output_parser import StrOutputParser
with open('docs.txt', 'r') as file:
docs = file.read().replace('\n', '')
# tools in the tool kit
df = pd.DataFrame(experiment.run())
def change_param(param,value):
'''Changes the value of a parameter in the model'''
# simulation.model.initial_state.update({
# })
value = float(value)
simulation.model.params.update({
param: [value]
})
experiment = Experiment(simulation)
experiment.engine = Engine()
result = experiment.run()
# Convert the results to a pandas DataFrame
globals()['df'] = pd.DataFrame(result)
return f'new {param} value is {value} and the simulation dataframe is updated'
def model_info(param):
'''Returns the information about the model'''
if param == 'all':
return simulation.model.params
elif param in simulation.model.params:
return f'{param} = {simulation.model.params[param]}'
else:
return f'{param} is not a parameter of the model'
# pandas agent as a tool
def analyze_dataframe(question):
'''Analyzes the dataframe and returns the answer to the question'''
# pandas_agent = agent = create_pandas_dataframe_agent(OpenAI(temperature=0), df, verbose=True)
pandas_agent = create_pandas_dataframe_agent(ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613"),
df,
verbose=True,
agent_type=AgentType.OPENAI_FUNCTIONS,
)
answer = pandas_agent.run(question)
return answer
def model_documentation(question):
'''Returns the documentation of the model'''
vectorstore = FAISS.from_texts([docs], embedding=OpenAIEmbeddings())
retriever = vectorstore.as_retriever()
template = """Answer the question based only on the following context:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = ChatOpenAI()
chain = (
{"context": retriever, "question": RunnablePassthrough()}
| prompt
| model
| StrOutputParser()
)
info = chain.invoke(question)
return info
def A_B_test(param,param2,metric):
'''Runs an A/B test on the model'''
return 'A/B test is running'
########################
def plotter(column_name):
'''Plots the column from the dataframe'''
fig = px.line(df, x="timestep", y=[column_name], title='Predator Prey Model')
fig.show()
##################
def planner_agent(prompt):
"""Give LLM a given prompt and get an answer."""
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{
"role": "system",
"content": '''
You will be provided with a question by the user that is trying to run a cadcad python model. Your job is to provide the set of actions to take to get to the answer using only the functions available.
These are the functions available to you: {function_descriptions_multiple}. always remember to start and end plan with ###. Dont give the user any information other than the plan and only use the functions to get to the solution.
User: whats the current value of xyz?
Planner: ### 1) we use the function model_info to fetch the xyz parameter ###
User: What is the current value of all params?
Planner: ### 1) we use the function model_info to fetch all the parameters ###
User: What are the assumptions in this model?
Planner: ### 1) use the function model_documentation to fetch the assumptions in this model. ###
User: What are the metrics and params in the model?
Planner: ### 1) use the function model_documentation to fetch the metrics and params in the model. ###
User: What are the columns in the dataframe?
Planner: ### 1) use the function analyze_dataframe to fetch the columns in the dataframe. ###
User: What would happen to the A column at the end of the simulation if my xyz param was 20?
Planner: ### 1) we use function change_param to change the xyz parameter to 20 .\n 2) we use function analyze_dataframe to get the A at the end of the simulation. ###
USer: What is the current value of my xyz param? can you change it to 50 and tell me what the A column at the end of the simulation would be?
Planner: ### 1) we use function model_info to fetch the crash_chance parameter. \n 2) we use function change_param to change the xyz parameter to 50 .\n 3) we use function analyze_dataframe to get the A at the end of the simulation. ###
User: what would be the max value of A column if we increase the xyz param to 2?
Planner: ### 1) we use function change_param to change the xyz parameter to 2 .\n 2) we use function analyze_dataframe to get the max value of A column. ###
'''
},
{
"role": "user",
"content": prompt
}
],
)
output = completion.choices[0].message
return output
# tool descriptions
function_descriptions_multiple = [
{
"name": "change_param",
"description": "Changes the parameter of the cadcad simulation and returns dataframe as a global object. The parameter must be in this list:" + str(model.params.keys()),
"parameters": {
"type": "object",
"properties": {
"param": {
"type": "string",
"description": "parameter to change. choose from the list" + str(model.params.keys()),
},
"value": {
"type": "string",
"description": "value to change the parameter to, eg. 0.1",
},
},
"required": ["param", "value"],
},
},
{
"name": "model_info",
"description": "quantitative values of current state of the simulation parameters. If no param is specified the argument should be 'all'",
"parameters": {
"type": "object",
"properties": {
"param": {
"type": "string",
"description": "type of information to print. choose from the list: " + str(model.params.keys()),
},
},
"required": ["param"],
},
},
{
"name": "analyze_dataframe",
"description": "Use this whenever a quantitative question is asked about the dataframe. The question should be taken exactly as asked by the user",
"parameters": {
"type": "object",
"properties": {
"question": {
"type": "string",
"description": "The question asked by user that can be answered by an LLM dataframe agent",
},
},
"required": ["question"],
},
},
{
"name": "model_documentation",
"description": "use when asked about documentation of the model has information about what the model is, assumptions made, mathematical specs, differential model specs etc.",
"parameters": {
"type": "object",
"properties": {
"question": {
"type": "string",
"description": "The question asked by user that can be answered by an LLM dataframe agent",
},
},
"required": ["question"],
},
},
{
"name": "plotter",
"description": "use when asked to plot a column from dataframe",
"parameters": {
"type": "object",
"properties": {
"column_name": {
"type": "string",
"description": "The name of the column to be printed",
},
},
"required": ["column_name"],
},
}
]
# plan parser function which takes a string and returns a list of functions to call. It uses the \n as a delimiter to split the string into a list of functions to call.
def plan_parser(plan):
plan = plan.split('###')[1]
plans = plan.split('\n')
# plans = [x.strip() for x in plans]
#strip the blank space before and after the sentences
# plans = [x.strip() for x in plans if x.strip() != '']
return plans
# pritn with colors
def print_color(string, color):
print("\033["+color+"m"+string+"\033[0m")
# def orchestrator_pipeline(user_input):
# plan = planner_agent(user_input).content
# plan_list = plan_parser(plan)
# print_color("Planner Agent:", "32")
# print('I have made a plan to follow: \n')
# for plan in plan_list:
# print(plan)
# print('\n')
# for plan in plan_list:
# print_color("Executor Agent:", "31")
# print('Thought: My task is to', plan)
# answer = executor_agent(plan)
# print('Action: I should call', answer.function_call.name,'function with these' , json.loads(answer.function_call.arguments),'arguments')
# if answer.function_call.name == 'analyze_dataframe':
# print_color("Analyzer Agent:", "34")
# print('Observation: ', eval(answer.function_call.name)(**json.loads(answer.function_call.arguments)))
def executor_agent(prompt):
"""Give LLM a given prompt and get an answer."""
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[{"role": "user", "content": prompt}],
# add function calling
functions=function_descriptions_multiple,
function_call="auto", # specify the function call
)
output = completion.choices[0].message
return output
def cadcad_gpt(user_input):
plan = planner_agent(user_input).content
plan_list = plan_parser(plan)
print_color("Planner Agent:", "32")
print('I have made a plan to follow: \n')
for plan in plan_list:
print(plan)
print('\n')
for plan in plan_list:
print_color("Executor Agent:", "31")
print('Thought: My task is to', plan)
answer = executor_agent(plan)
print('Action: I should call', answer.function_call.name,'function with these' , json.loads(answer.function_call.arguments),'arguments')
if answer.function_call.name == 'analyze_dataframe':
print_color("Analyzer Agent:", "34")
print('Observation: ', eval(answer.function_call.name)(**json.loads(answer.function_call.arguments)))
# user_prompt = "whats the current value of crash chance?"
# print(executor_agent(user_prompt)) | [
"\n You will be provided with a question by the user that is trying to run a cadcad python model. Your job is to provide the set of actions to take to get to the answer using only the functions available.\n These are the functions available to you: {function_descriptions_multiple}. always remember to start and end plan with ###. Dont give the user any information other than the plan and only use the functions to get to the solution.\n\n User: whats the current value of xyz?\n Planner: ### 1) we use the function model_info to fetch the xyz parameter ###\n User: What is the current value of all params?\n Planner: ### 1) we use the function model_info to fetch all the parameters ###\n User: What are the assumptions in this model?\n Planner: ### 1) use the function model_documentation to fetch the assumptions in this model. ###\n User: What are the metrics and params in the model?\n Planner: ### 1) use the function model_documentation to fetch the metrics and params in the model. ###\n User: What are the columns in the dataframe?\n Planner: ### 1) use the function analyze_dataframe to fetch the columns in the dataframe. ###\n User: What would happen to the A column at the end of the simulation if my xyz param was 20?\n Planner: ### 1) we use function change_param to change the xyz parameter to 20 .\n 2) we use function analyze_dataframe to get the A at the end of the simulation. ###\n USer: What is the current value of my xyz param? can you change it to 50 and tell me what the A column at the end of the simulation would be?\n Planner: ### 1) we use function model_info to fetch the crash_chance parameter. \n 2) we use function change_param to change the xyz parameter to 50 .\n 3) we use function analyze_dataframe to get the A at the end of the simulation. ###\n User: what would be the max value of A column if we increase the xyz param to 2?\n Planner: ### 1) we use function change_param to change the xyz parameter to 2 .\n 2) we use function analyze_dataframe to get the max value of A column. ###\n ",
"Answer the question based only on the following context:\n {context}\n\n Question: {question}\n "
] |
2024-01-10 | rororowyourboat/CadCAD_GPT_experiments | examples~infinite_runner~cadcad_gpt.py | import openai
import json
import os
import pandas as pd
from radcad import Experiment
from radcad.engine import Engine
#importing radcad model from models folder
from infinite_runner_radcad import model, simulation, experiment
from langchain.agents import create_pandas_dataframe_agent
from langchain.chat_models import ChatOpenAI
from langchain.agents.agent_types import AgentType
# from langchain.agents.agent_types import AgentType
from langchain.llms import OpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.schema.runnable import RunnablePassthrough, RunnableLambda
from langchain.schema.output_parser import StrOutputParser
with open('docs.txt', 'r') as file:
docs = file.read().replace('\n', '')
##########################
# Tool kit
# tools in the tool kit
df = pd.DataFrame(experiment.run())
def change_param(param,value):
'''Changes the value of a parameter in the model'''
# simulation.model.initial_state.update({
# })
value = float(value)
simulation.model.params.update({
param: [value]
})
experiment = Experiment(simulation)
experiment.engine = Engine()
result = experiment.run()
# Convert the results to a pandas DataFrame
globals()['df'] = pd.DataFrame(result)
return f'new {param} value is {value} and the simulation dataframe is updated'
def model_info(param):
'''Returns the information about the model'''
if param == 'all':
return simulation.model.params
elif param in simulation.model.params:
return f'{param} = {simulation.model.params[param]}'
else:
return f'{param} is not a parameter of the model'
# pandas agent as a tool
def analyze_dataframe(question):
'''Analyzes the dataframe and returns the answer to the question'''
# pandas_agent = agent = create_pandas_dataframe_agent(OpenAI(temperature=0), df, verbose=True)
pandas_agent = create_pandas_dataframe_agent(ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613"),
df,
verbose=True,
agent_type=AgentType.OPENAI_FUNCTIONS,
)
answer = pandas_agent.run(question)
return answer
def model_documentation(question):
'''Returns the documentation of the model'''
vectorstore = FAISS.from_texts([docs], embedding=OpenAIEmbeddings())
retriever = vectorstore.as_retriever()
template = """Answer the question based only on the following context:
{context}
Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)
model = ChatOpenAI()
chain = (
{"context": retriever, "question": RunnablePassthrough()}
| prompt
| model
| StrOutputParser()
)
info = chain.invoke(question)
return info
def A_B_test(param,param2,metric):
'''Runs an A/B test on the model'''
return 'A/B test is running'
# tool descriptions
function_descriptions_multiple = [
{
"name": "change_param",
"description": "Changes the parameter of the cadcad simulation and returns dataframe as a global object. The parameter must be in this list:" + str(model.params.keys()),
"parameters": {
"type": "object",
"properties": {
"param": {
"type": "string",
"description": "parameter to change. choose from the list" + str(model.params.keys()),
},
"value": {
"type": "string",
"description": "value to change the parameter to, eg. 0.1",
},
},
"required": ["param", "value"],
},
},
{
"name": "model_info",
"description": "quantitative values of current state of the simulation parameters. If no param is specified the argument should be 'all'",
"parameters": {
"type": "object",
"properties": {
"param": {
"type": "string",
"description": "type of information to print. choose from the list: " + str(model.params.keys()),
},
},
"required": ["param"],
},
},
{
"name": "analyze_dataframe",
"description": "Use this whenever a quantitative question is asked about the dataframe. The question should be taken exactly as asked by the user",
"parameters": {
"type": "object",
"properties": {
"question": {
"type": "string",
"description": "The question asked by user that can be answered by an LLM dataframe agent",
},
},
"required": ["question"],
},
},
{
"name": "model_documentation",
"description": "use when asked about documentation of the model has information about what the model is, assumptions made, mathematical specs, differential model specs etc.",
"parameters": {
"type": "object",
"properties": {
"question": {
"type": "string",
"description": "The question asked by user that can be answered by an LLM dataframe agent",
},
},
"required": ["question"],
},
}
]
##################
# Agents
def planner_agent(prompt):
"""Give LLM a given prompt and get an answer."""
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{
"role": "system",
"content": '''
You will be provided with a question by the user that is trying to run a cadcad python model. Your job is to provide the set of actions to take to get to the answer using only the functions available.
These are the functions available to you: {function_descriptions_multiple}. always remember to start and end plan with ###. Dont give the user any information other than the plan and only use the functions to get to the solution.
User: whats the current value of xyz?
Planner: ### 1) we use the function model_info to fetch the xyz parameter ###
User: What is the current value of all params?
Planner: ### 1) we use the function model_info to fetch all the parameters ###
User: What are the assumptions in this model?
Planner: ### 1) use the function model_documentation to fetch the assumptions in this model. ###
User: What are the metrics and params in the model?
Planner: ### 1) use the function model_documentation to fetch the metrics and params in the model. ###
User: What are the columns in the dataframe?
Planner: ### 1) use the function analyze_dataframe to fetch the columns in the dataframe. ###
User: What would happen to the A column at the end of the simulation if my xyz param was 20?
Planner: ### 1) we use function change_param to change the xyz parameter to 20 .\n 2) we use function analyze_dataframe to get the A at the end of the simulation. ###
USer: What is the current value of my xyz param? can you change it to 50 and tell me what the A column at the end of the simulation would be?
Planner: ### 1) we use function model_info to fetch the crash_chance parameter. \n 2) we use function change_param to change the xyz parameter to 50 .\n 3) we use function analyze_dataframe to get the A at the end of the simulation. ###
'''
},
{
"role": "user",
"content": prompt
}
],
)
output = completion.choices[0].message
return output
def executor_agent(prompt):
"""Give LLM a given prompt and get an answer."""
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[{"role": "user", "content": prompt}],
# add function calling
functions=function_descriptions_multiple,
function_call="auto", # specify the function call
)
output = completion.choices[0].message
return output
######################
# utils
# plan parser function which takes a string and returns a list of functions to call. It uses the \n as a delimiter to split the string into a list of functions to call.
def plan_parser(plan):
plan = plan.split('###')[1]
plans = plan.split('\n')
# plans = [x.strip() for x in plans]
#strip the blank space before and after the sentences
# plans = [x.strip() for x in plans if x.strip() != '']
return plans
# pritn with colors
def print_color(string, color):
print("\033["+color+"m"+string+"\033[0m")
#######################
# orchestration pipeline
# def orchestrator_pipeline(user_input):
# plan = planner_agent(user_input).content
# plan_list = plan_parser(plan)
# print_color("Planner Agent:", "32")
# print('I have made a plan to follow: \n')
# for plan in plan_list:
# print(plan)
# print('\n')
# for plan in plan_list:
# print_color("Executor Agent:", "31")
# print('Thought: My task is to', plan)
# answer = executor_agent(plan)
# print('Action: I should call', answer.function_call.name,'function with these' , json.loads(answer.function_call.arguments),'arguments')
# if answer.function_call.name == 'analyze_dataframe':
# print_color("Analyzer Agent:", "34")
# print('Observation: ', eval(answer.function_call.name)(**json.loads(answer.function_call.arguments)))
def cadcad_gpt(user_input):
plan = planner_agent(user_input).content
plan_list = plan_parser(plan)
print_color("Planner Agent:", "32")
print('I have made a plan to follow: \n')
for plan in plan_list:
print(plan)
print('\n')
for plan in plan_list:
print_color("Executor Agent:", "31")
print('Thought: My task is to', plan)
answer = executor_agent(plan)
print('Action: I should call', answer.function_call.name,'function with these' , json.loads(answer.function_call.arguments),'arguments')
if answer.function_call.name == 'analyze_dataframe':
print_color("Analyzer Agent:", "34")
print('Observation: ', eval(answer.function_call.name)(**json.loads(answer.function_call.arguments)))
# user_prompt = "whats the current value of crash chance?"
# print(executor_agent(user_prompt)) | [
"\n You will be provided with a question by the user that is trying to run a cadcad python model. Your job is to provide the set of actions to take to get to the answer using only the functions available.\n These are the functions available to you: {function_descriptions_multiple}. always remember to start and end plan with ###. Dont give the user any information other than the plan and only use the functions to get to the solution.\n\n User: whats the current value of xyz?\n Planner: ### 1) we use the function model_info to fetch the xyz parameter ###\n User: What is the current value of all params?\n Planner: ### 1) we use the function model_info to fetch all the parameters ###\n User: What are the assumptions in this model?\n Planner: ### 1) use the function model_documentation to fetch the assumptions in this model. ###\n User: What are the metrics and params in the model?\n Planner: ### 1) use the function model_documentation to fetch the metrics and params in the model. ###\n User: What are the columns in the dataframe?\n Planner: ### 1) use the function analyze_dataframe to fetch the columns in the dataframe. ###\n User: What would happen to the A column at the end of the simulation if my xyz param was 20?\n Planner: ### 1) we use function change_param to change the xyz parameter to 20 .\n 2) we use function analyze_dataframe to get the A at the end of the simulation. ###\n USer: What is the current value of my xyz param? can you change it to 50 and tell me what the A column at the end of the simulation would be?\n Planner: ### 1) we use function model_info to fetch the crash_chance parameter. \n 2) we use function change_param to change the xyz parameter to 50 .\n 3) we use function analyze_dataframe to get the A at the end of the simulation. ###\n ",
"Answer the question based only on the following context:\n {context}\n\n Question: {question}\n "
] |
2024-01-10 | rororowyourboat/CadCAD_GPT_experiments | cadcad_gpt~orchestration.py | # create an cadcad-gpt agent class which takes in the model, simulation, experiment, doc string and can return the experiment.run() function
import openai
# tool descriptions
function_descriptions_multiple = [
{
"name": "change_param",
"description": "Changes the parameter of the cadcad simulation and returns dataframe as a global object. The parameter must be in this list:" + str(model.params.keys()),
"parameters": {
"type": "object",
"properties": {
"param": {
"type": "string",
"description": "parameter to change. choose from the list" + str(model.params.keys()),
},
"value": {
"type": "string",
"description": "value to change the parameter to, eg. 0.1",
},
},
"required": ["param", "value"],
},
},
{
"name": "model_info",
"description": "quantitative values of current state of the simulation parameters.",
"parameters": {
"type": "object",
"properties": {
"param": {
"type": "string",
"description": "type of information to print. choose from the list: " + str(model.params.keys()),
},
},
"required": ["param"],
},
},
{
"name": "analyze_dataframe",
"description": "Use this whenever a quantitative question is asked about the dataframe",
"parameters": {
"type": "object",
"properties": {
"question": {
"type": "string",
"description": "The question asked by user that can be answered by an LLM dataframe agent",
},
},
"required": ["question"],
},
},
{
"name": "model_documentation",
"description": "use when asked about documentation of the model has information about what the model is, assumptions made, mathematical specs, differential model specs etc.",
"parameters": {
"type": "object",
"properties": {
"question": {
"type": "string",
"description": "The question asked by user that can be answered by an LLM dataframe agent",
},
},
"required": ["question"],
},
}
]
class CadcadGPTAgent:
def __init__(self, model, simulation, experiment, docstring):
self.model = model
self.simulation = simulation
self.experiment = experiment
self.docstring = docstring
def run(self):
df1 = pd.DataFrame(self.experiment.run())
return df1
def executor_agent(self, prompt, function_descriptions = function_descriptions_multiple):
"""Give LLM a given prompt and get an answer."""
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[{"role": "user", "content": prompt}],
# add function calling
functions=function_descriptions_multiple,
function_call="auto", # specify the function call
)
output = completion.choices[0].message
return output
def planner_agent(self, prompt):
"""Give LLM a given prompt and get an answer."""
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{
"role": "system",
"content": '''
You will be provided with a question by the user that is trying to run a cadcad python model. Your job is to provide the set of actions to take to get to the answer using only the functions available.
For example, if the user asks "if my crash chance parameter was 0.2, what would the avg coins be at the end of the simulation?" you reply with "### 1) we use the function change_param to change the crash chance parameter to 0.2,\n 2) use the function analyze_dataframe to get the avg coins at the end of the simulation. ###"
if the user asks "what would happen to the coins at the end of the simulation if my crash chance param was 10 perc lower?" you reply with "### 1) find out the current value of crash chance param using the model_info function,\n 2) we use function change_param to change the crash chance parameter to 0.1*crash_chance .\n 3) we use function analyze_dataframe to get the avg coins at the end of the simulation. ###"
If the user asks "what is the documentation of the model?" you reply with "### use the function model_documentation to get the documentation of the model. ###
These are the functions available to you: {function_descriptions_multiple}. always remember to start and end plan with ###. Dont give the user any information other than the plan and only use the functions to get to the solution.
'''
},
{
"role": "user",
"content": prompt
}
],
)
output = completion.choices[0].message
return output
| [
"\n You will be provided with a question by the user that is trying to run a cadcad python model. Your job is to provide the set of actions to take to get to the answer using only the functions available.\n For example, if the user asks \"if my crash chance parameter was 0.2, what would the avg coins be at the end of the simulation?\" you reply with \"### 1) we use the function change_param to change the crash chance parameter to 0.2,\n 2) use the function analyze_dataframe to get the avg coins at the end of the simulation. ###\" \n if the user asks \"what would happen to the coins at the end of the simulation if my crash chance param was 10 perc lower?\" you reply with \"### 1) find out the current value of crash chance param using the model_info function,\n 2) we use function change_param to change the crash chance parameter to 0.1*crash_chance .\n 3) we use function analyze_dataframe to get the avg coins at the end of the simulation. ###\"\n If the user asks \"what is the documentation of the model?\" you reply with \"### use the function model_documentation to get the documentation of the model. ###\n These are the functions available to you: {function_descriptions_multiple}. always remember to start and end plan with ###. Dont give the user any information other than the plan and only use the functions to get to the solution.\n "
] |
2024-01-10 | Rocinate/autoTranscript | server~controller~transcriptController.py | from openai import OpenAI
from models import db, Transcript
from configs import UPLOAD_FOLDER
client = OpenAI()
def summary_extraction(transcript):
response = client.chat.completions.create(
model="gpt-3.5-turbo",
temperature=0,
n=1,
messages=[
{
"role": "system",
"content": "You are a highly skilled AI trained in language comprehension and summarization. I would like you to read the following text and summarize it into a concise abstract paragraph. Aim to retain the most important points, providing a coherent and readable summary that could help a person understand the main points of the discussion without needing to read the entire text. Please avoid unnecessary details or tangential points. Please use less than 30 words.",
},
{"role": "user", "content": transcript},
],
)
return response.choices[0].message.content
def key_points_extraction(transcript):
response = client.chat.completions.create(
model="gpt-3.5-turbo",
temperature=0,
n=1,
messages=[
{
"role": "system",
"content": "You are a proficient AI with a specialty in distilling information into key points. Based on the following text, identify and list the main points that were discussed or brought up. These should be the most important ideas, findings, or topics that are crucial to the essence of the discussion. Your goal is to provide a list that someone could read to quickly understand what was talked about. Please use less than 30 words.",
},
{"role": "user", "content": transcript},
],
)
return response.choices[0].message.content
def action_item_extraction(transcript):
response = client.chat.completions.create(
model="gpt-3.5-turbo",
temperature=0,
n=1,
messages=[
{
"role": "system",
"content": "You are an AI expert in analyzing conversations and extracting action items. Please review the text and identify any tasks, assignments, or actions that were agreed upon or mentioned as needing to be done. These could be tasks assigned to specific individuals, or general actions that the group has decided to take. Please list these action items clearly and concisely. Please use less than 30 words.",
},
{"role": "user", "content": transcript},
],
)
return response.choices[0].message.content
def sentiment_analysis(transcript):
response = client.chat.completions.create(
model="gpt-3.5-turbo",
temperature=0,
n=1,
messages=[
{
"role": "system",
"content": "As an AI with expertise in language and emotion analysis, your task is to analyze the sentiment of the following text. Please consider the overall tone of the discussion, the emotion conveyed by the language used, and the context in which words and phrases are used. Indicate whether the sentiment is generally positive, negative, or neutral, and provide brief explanations for your analysis where possible. Please use less than 30 words.",
},
{"role": "user", "content": transcript},
],
)
return response.choices[0].message.content
function_map = {
"keyIdentification": key_points_extraction,
"actionExtraction": action_item_extraction,
"summary": summary_extraction,
"sentiment": sentiment_analysis,
}
def create_task(id: int):
try:
print(f"create task {id}")
from app import app
with app.app_context():
transcript = Transcript.query.filter_by(id=id).first()
# if audio file is provided, convert it to text
if transcript.audio_name:
text = audio2text(transcript.audio_name)
transcript.content = text
# commit first incase the task is running for a long time
db.session.commit()
# run the task
result = run_task(transcript)
if not result:
transcript.status = "Failed"
else:
# update the transcript status
transcript.status = "Finished"
# save the transcript update
db.session.commit()
except Exception as e:
print(e)
return False
return True
def audio2text(audio_name):
file = open(UPLOAD_FOLDER + "/" + audio_name, "rb")
if not file:
return None
response = client.audio.transcriptions.create(
file=file,
model="whisper-1",
response_format="text",
language="en"
)
return response
# run task and update the transcript
def run_task(transcript: Transcript):
# check if the task is valid
if transcript.task not in function_map:
return False
transcript.analysis = function_map[transcript.task](transcript.content)
return True
| [
"You are a proficient AI with a specialty in distilling information into key points. Based on the following text, identify and list the main points that were discussed or brought up. These should be the most important ideas, findings, or topics that are crucial to the essence of the discussion. Your goal is to provide a list that someone could read to quickly understand what was talked about. Please use less than 30 words.",
"You are an AI expert in analyzing conversations and extracting action items. Please review the text and identify any tasks, assignments, or actions that were agreed upon or mentioned as needing to be done. These could be tasks assigned to specific individuals, or general actions that the group has decided to take. Please list these action items clearly and concisely. Please use less than 30 words.",
"As an AI with expertise in language and emotion analysis, your task is to analyze the sentiment of the following text. Please consider the overall tone of the discussion, the emotion conveyed by the language used, and the context in which words and phrases are used. Indicate whether the sentiment is generally positive, negative, or neutral, and provide brief explanations for your analysis where possible. Please use less than 30 words.",
"You are a highly skilled AI trained in language comprehension and summarization. I would like you to read the following text and summarize it into a concise abstract paragraph. Aim to retain the most important points, providing a coherent and readable summary that could help a person understand the main points of the discussion without needing to read the entire text. Please avoid unnecessary details or tangential points. Please use less than 30 words."
] |
2024-01-10 | ashishjsharda/OpenAIExamples | example2.py | import openai
#you can use any oen of [davinci,babbage,curie,ada] models
def query_gpt(prompt, model="text-curie-001", max_tokens=10):
openai.api_key = 'your open api key'
# Making a request to the model
response = openai.Completion.create(
engine=model,
prompt=prompt,
max_tokens=max_tokens
)
return response.choices[0].text.strip()
prompt = "Translate the following English text to French: 'Hello, how are you?'"
response = query_gpt(prompt)
print(response)
| [
"Translate the following English text to French: 'Hello, how are you?'"
] |
2024-01-10 | thaitran/PatentGen | app.py | import anthropic
from docx import Document
import gradio as gr
import os
import platform
import tempfile
# Run in debugging mode on Mac OS
if platform.system() == "Darwin":
DEBUG = True
else:
DEBUG = False
# Turn on auth if PATENTGEN_USERNAME and PATENTGEN_PASSWORD are set
USERNAME = os.environ.get("PATENTGEN_USERNAME")
PASSWORD = os.environ.get("PATENTGEN_PASSWORD")
MAX_TOKENS = 1000
SYSTEM_MESSAGE = "You are the world's best patent attorney. You are drafting a US patent application based on the attached transcript of an invention disclosure meeting."
CLAIMS_PROMPT = "Draft The Claims section with 10 claims. Only return the Claims and nothing else."
TITLE_PROMPT = "Draft the title for this patent application. Only return the Title and nothing else."
TECHFIELD_PROMPT = "Draft the Technical Field section. Only return the Technical Field and nothing else."
BACKGROUND_PROMPT = "Draft the Background section with 3 paragraphs. Only return the Background and nothing else."
EMBODIMENTS_PROMPT = "Draft the Summary of Example Embodiments section with 3 example embodiments. Only return the embodiments and nothing else."
DRAWINGS_PROMPT = "Draft the Brief Description of the Drawings section. Only return the Drawings and nothing else."
def generate(new_user_message, history=[], temperature=1):
if 'ANTHROPIC_API_KEY' not in os.environ:
raise Exception("This model will be run from www.anthropic.com - Please obtain an API key from https://console.anthropic.com/account/keys and then set the following environment variable before running this app:\n```\nexport ANTHROPIC_API_KEY=<your key>\n```")
client = anthropic.Anthropic()
prompt = SYSTEM_MESSAGE + "\n"
for user_message, assistant_response in history:
if user_message.strip() and assistant_response.strip():
prompt += anthropic.HUMAN_PROMPT + user_message + "\n" + anthropic.AI_PROMPT + assistant_response + "\n"
prompt += anthropic.HUMAN_PROMPT + new_user_message + anthropic.AI_PROMPT
if DEBUG:
print(prompt)
print("----------------------------------")
stream = client.completions.create(
model="claude-2",
prompt=prompt,
temperature=temperature,
max_tokens_to_sample=MAX_TOKENS,
stream=True
)
return stream
def gen_section_fn(index):
def gen_section(transcript_file, *args):
prompt_list = list(args)
new_user_message = prompt_list[index]
prompt_list[index] = ""
messages = []
if not transcript_file:
raise gr.Error("Please upload a transcript of the invention disclosure meeting first!")
with open(transcript_file.name, 'r') as f:
transcript = f.read()
if transcript:
messages.append((transcript, "Thank you, I will use this as background info when drafting the patent application."))
for i in range(0, len(prompt_list), 2):
messages.append((prompt_list[i], prompt_list[i+1]))
response = ""
stream = generate(new_user_message, history=messages)
for chunk in stream:
response += chunk.completion
yield response
return response
return gen_section
def gen_word_doc(claims_gen, title_gen, techfield_gen, background_gen, embodiments_gen, drawings_gen):
doc = Document()
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.docx')
temp_path = temp_file.name
doc.add_heading('TITLE', level=1)
doc.add_paragraph(title_gen)
doc.add_heading('TECHNICAL FIELD', level=1)
doc.add_paragraph(techfield_gen)
doc.add_heading('BACKGROUND', level=1)
doc.add_paragraph(background_gen)
doc.add_heading('SUMMARY OF EXAMPLE EMBODIMENTS', level=1)
doc.add_paragraph(embodiments_gen)
doc.add_heading('BRIEF DESCRIPTION OF THE DRAWINGS', level=1)
doc.add_paragraph(drawings_gen)
doc.add_heading('CLAIMS', level=1)
doc.add_paragraph(claims_gen)
doc.save(temp_path)
return temp_path
with gr.Blocks() as demo:
transcript = gr.File(label="Transcript of invention disclosure meeting")
claims_prompt = gr.Textbox(label="Prompt", value=CLAIMS_PROMPT, interactive=True)
claims_gen = gr.Textbox(lines=5, label="Claims", interactive=True, show_copy_button=True)
claims_button = gr.Button(value="Generate Claims")
title_prompt = gr.Textbox(label="Prompt", value=TITLE_PROMPT, interactive=True)
title_gen = gr.Textbox(lines=1, label="Title", interactive=True, show_copy_button=True)
title_button = gr.Button(value="Generate Title")
techfield_prompt = gr.Textbox(label="Prompt", value=TECHFIELD_PROMPT, interactive=True)
techfield_gen = gr.Textbox(lines=5, label="Technical Field", interactive=True, show_copy_button=True)
techfield_button = gr.Button(value="Generate Technical Field")
background_prompt = gr.Textbox(label="Prompt", value=BACKGROUND_PROMPT, interactive=True)
background_gen = gr.Textbox(lines=5, label="Background", interactive=True, show_copy_button=True)
background_button = gr.Button(value="Generate Background")
embodiments_prompt = gr.Textbox(label="Prompt", value=EMBODIMENTS_PROMPT, interactive=True)
embodiments_gen = gr.Textbox(lines=5, label="Embodiments", interactive=True, show_copy_button=True)
embodiments_button = gr.Button(value="Generate Embodiments")
drawings_prompt = gr.Textbox(label="Prompt", value=DRAWINGS_PROMPT, interactive=True)
drawings_gen = gr.Textbox(lines=5, label="Drawings", interactive=True, show_copy_button=True)
drawings_button = gr.Button(value="Generate Drawings")
word_doc = gr.File(label="Output Word Doc")
combine_button = gr.Button(value="Combine All Sections Into Word Doc", variant="primary")
inputs = [
transcript,
claims_prompt, claims_gen, # 0
title_prompt, title_gen, # 2
techfield_prompt, techfield_gen, # 4
background_prompt, background_gen, # 6
embodiments_prompt, embodiments_gen, # 8
drawings_prompt, drawings_gen, # 10
]
claims_button.click(gen_section_fn(0), inputs=inputs, outputs=claims_gen)
title_button.click(gen_section_fn(2), inputs=inputs, outputs=title_gen)
techfield_button.click(gen_section_fn(4), inputs=inputs, outputs=techfield_gen)
background_button.click(gen_section_fn(6), inputs=inputs, outputs=background_gen)
embodiments_button.click(gen_section_fn(8), inputs=inputs, outputs=embodiments_gen)
drawings_button.click(gen_section_fn(10), inputs=inputs, outputs=drawings_gen)
combine_button.click(
gen_word_doc,
inputs=[
claims_gen,
title_gen,
techfield_gen,
background_gen,
embodiments_gen,
drawings_gen
],
outputs=word_doc
)
if USERNAME and PASSWORD:
demo.queue().launch(auth=(USERNAME, PASSWORD), share=False, debug=DEBUG)
else:
demo.queue().launch(share=False, debug=DEBUG)
| [
"Draft the Summary of Example Embodiments section with 3 example embodiments. Only return the embodiments and nothing else.",
"\n",
"['PLACEHOLDER']",
"PLACEHOLDER\n",
"Draft the Technical Field section. Only return the Technical Field and nothing else.",
"Draft The Claims section with 10 claims. Only return the Claims and nothing else.",
"Draft the title for this patent application. Only return the Title and nothing else.",
"Draft the Background section with 3 paragraphs. Only return the Background and nothing else.",
"Draft the Brief Description of the Drawings section. Only return the Drawings and nothing else."
] |
2024-01-10 | sil-ai/ChatTRE | api.py | import os
from typing import Optional
import json
import uuid
from pathlib import Path
from pydantic import BaseModel
import openai
import torch
from transformers import BertTokenizerFast, BertModel
import chromadb
from chromadb.config import Settings
import cohere
from fastapi import FastAPI
# import modal
from translate import translate_text
# image = (
# modal.Image.debian_slim()
# .pip_install(
# "chromadb",
# "fastapi",
# "pydantic",
# "openai==0.27.2",
# "torch",
# "transformers",
# "google-cloud-translate",
# "cohere",
# ).copy(
# mount=modal.Mount.from_local_file(
# local_path=Path("iso639-1.json"), remote_path=Path('iso639-1.json')
# ),
# ).copy(
# mount=modal.Mount.from_local_dir(
# local_path=Path(".chromadb/"), remote_path=Path('.chromadb/')
# ),
# )
# )
# stub = modal.Stub("chatTRE-api-server", image=image)
app = FastAPI()
# @stub.function()
# @modal.asgi_app()
# def fastapi_app():
llm = 'chatgpt'
# llm = 'cohere'
embeddings = None # Use default chromadb embeddings
# embeddings = 'labse' # Use labse embeddings
# llm API key setup
if llm == 'cohere':
co = cohere.Client(os.environ["COHERE_KEY"])
elif llm == 'chatgpt':
openai.api_key = os.environ.get("OPENAI_KEY")
if embeddings and embeddings.lower() == 'labse':
cache_path = 'bert_cache/'
tokenizer = BertTokenizerFast.from_pretrained('setu4993/LaBSE', cache_dir=cache_path)
model = BertModel.from_pretrained('setu4993/LaBSE', cache_dir=cache_path).eval()
with open('iso639-1.json') as f:
iso_639_1 = json.load(f)
# Vector store (assuming the .chromadb directory already exists. If not, run db.py first)
client = chromadb.Client(Settings(
chroma_db_impl="duckdb+parquet",
persist_directory=".chromadb"
))
if embeddings and embeddings.lower() == 'labse':
collection = client.get_collection("tyndale-labse")
else:
collection = client.get_collection("tyndale")
state_dict = {}
# @stub.function()
def get_embeddings(query, tokenizer, model): # Only needed if using labse embeddings
query_input = tokenizer(query, return_tensors="pt", padding=False, truncation=True)
with torch.no_grad():
query_output = model(**query_input)
embedding = query_output.pooler_output.tolist()[0]
return embedding
# @stub.function()
def add_text(text, state):
query_text = '\n'.join([x[0] + '/n' + x[1][:50] + '\n' for x in state]) + text # Add the previous queries and answers to the search query
print(f'{query_text=}')
translation_response = translate_text(query_text)
english_query_text = translation_response.translations[0].translated_text
query_language_code = translation_response.translations[0].detected_language_code
query_language = iso_639_1[query_language_code]
print(f'{query_language=}')
print(f'{english_query_text=}')
# Get the context from chroma
if embeddings:
query_embeddings = get_embeddings(query_text, tokenizer, model)
results = collection.query(
query_embeddings=query_embeddings,
n_results=10
)
else: # Use default chromadb embeddings
results = collection.query(
query_texts=[english_query_text],
n_results=10
)
# Prompt.
context = '['
for i in range(len(results['documents'][0])):
print(results['metadatas'][0][i])
context += "{source:" + results['metadatas'][0][i]['citation'] + ', text: ' + results['documents'][0][i] + '}' + ','
context += ']' + '\n'
print(f'{context=}')
# Construct prompt.
chat_prefix = "The following is a conversation with an AI assistant for Bible translators. The assistant is"
chat_prefix += f" helpful, creative, clever, and very friendly. The assistant only responds in the {query_language} language.\n"
prompt = (
chat_prefix +
f'Read the paragraph below and answer the question, using only the information in the context delimited by triple backticks. Answer only in the {query_language} language. '
f'At the end of your answer, include the source of each context text that you used. You may use more than one, and include the sources of all those you used. '
# f' Respond in the following format:' + '{' +
# '"answer":<answer>, "sources": [<keys>]' + '}' +
f'If the question cannot be answered based on the context alone, write "Sorry i had trouble answering this question, based on the information i found\n'
f"\n"
f"Context:\n"
f"```{ context }```\n"
f"\n"
)
if len(state) > 0:
if len(state) > 3:
trim_state = state[-3:]
else:
trim_state = state
for exchange in trim_state:
prompt += "\nHuman: " + exchange[0] + "\nAI: " + exchange[1]
prompt += "\nHuman: " + text + "\nAI: "
else:
prompt += "\nHuman: " + text + "\nAI: "
print(f'{prompt=}')
if llm == 'cohere':
# Get the completion from co:here.
response = co.generate(model='xlarge',
prompt=prompt,
max_tokens=200,
temperature=0)
answer = response.generations[0].text
elif llm == 'chatgpt':
#ChatGPT reponse
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
temperature=0,
messages=[{"role": "user", "content": prompt}]
)
answer = response['choices'][0]["message"]["content"]
else:
print("No LLM specified")
return '', state
print(f'{answer=}')
state.append((text, answer))
return answer, state
class TextIn(BaseModel):
text: str
chat_id: Optional[str] = None
class TextOut(BaseModel):
text: str
chat_id: str
# @stub.function()
@app.post("/ask", response_model=TextOut)
def ask(input: TextIn):
print(f'{input=}')
if input.chat_id is None or input.chat_id == '':
input.chat_id = str(uuid.uuid4())
state_dict[input.chat_id] = []
text, state_dict[input.chat_id] = add_text(input.text, state_dict.get(input.chat_id, []))
print(f'{text=}')
print(f'{state_dict[input.chat_id]=}')
return {'text': text, 'chat_id': input.chat_id}
| [
"\nHuman: PLACEHOLDER\nAI: ",
"PLACEHOLDERRead the paragraph below and answer the question, using only the information in the context delimited by triple backticks. Answer only in the PLACEHOLDER language. At the end of your answer, include the source of each context text that you used. You may use more than one, and include the sources of all those you used. If the question cannot be answered based on the context alone, write \"Sorry i had trouble answering this question, based on the information i found\n\nContext:\n```PLACEHOLDER```\n\n",
"\nHuman: PLACEHOLDER\nAI: PLACEHOLDER"
] |
2024-01-10 | 5l1v3r1/modelscope | modelscope~models~cv~image_probing_model~backbone.py | # The implementation is adopted from OpenAI-CLIP,
# made pubicly available under the MIT License at https://github.com/openai/CLIP
import math
import sys
from collections import OrderedDict
from functools import reduce
from operator import mul
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
from torchvision import models
from .utils import convert_weights, load_pretrained
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed
# after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool,
# and the subsequent convolution has stride 1
self.downsample = nn.Sequential(
OrderedDict([('-1', nn.AvgPool2d(stride)),
('0',
nn.Conv2d(
inplanes,
planes * self.expansion,
1,
stride=1,
bias=False)),
('1', nn.BatchNorm2d(planes * self.expansion))]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self,
spacial_dim: int,
embed_dim: int,
num_heads: int,
output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(
torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1],
x.shape[2] * x.shape[3]).permute(2, 0, 1)
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0)
x = x + self.positional_embedding[:, None, :].to(x.dtype)
x, _ = F.multi_head_attention_forward(
query=x,
key=x,
value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat(
[self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False)
return x[0]
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self,
d_model: int,
n_head: int,
attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(
OrderedDict([('c_fc', nn.Linear(d_model, d_model * 4)),
('gelu', QuickGELU()),
('c_proj', nn.Linear(d_model * 4, d_model))]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(
dtype=x.dtype,
device=x.device) if self.attn_mask is not None else None
return self.attn(
x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor, idx):
features = {}
x_norm = self.ln_1(x)
features['layer_{}_pre_attn'.format(idx)] = x_norm.permute(1, 0, 2)
attn = self.attention(x_norm)
features['layer_{}_attn'.format(idx)] = attn.permute(1, 0, 2)
x = x + attn
mlp = self.mlp(self.ln_2(x))
features['layer_{}_mlp'.format(idx)] = mlp.permute(1, 0, 2)
x = x + mlp
return x, features
class Transformer(nn.Module):
def __init__(self,
width: int,
layers: int,
heads: int,
attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.ModuleList()
for i in range(layers):
block = ResidualAttentionBlock(width, heads, attn_mask)
self.resblocks.append(block)
def forward(self, x: torch.Tensor):
features = {}
for idx, block in enumerate(self.resblocks):
x, block_feats = block(x, idx)
features.update(block_feats)
return x, features
class VisualTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int,
layers: int, heads: int, output_dim: int):
super().__init__()
print(input_resolution, patch_size, width, layers, heads, output_dim)
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(
in_channels=3,
out_channels=width,
kernel_size=patch_size,
stride=patch_size,
bias=False)
scale = width**-0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn(
(input_resolution // patch_size)**2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor, return_all=True):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1],
-1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
zeros = torch.zeros(
x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device)
# shape = [*, grid ** 2 + 1, width]
x = torch.cat([self.class_embedding.to(x.dtype) + zeros, x], dim=1)
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x, features = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if return_all:
features['pre_logits'] = x
return features
if self.proj is not None:
x = x @ self.proj
return x
class CLIPNet(nn.Module):
def __init__(self, arch_name, pretrained, **kwargs):
super(CLIPNet, self).__init__()
if arch_name == 'CLIP_ViTB32':
self.clip = VisualTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512)
elif arch_name in ('CLIP_ViTB16', 'CLIP_ViTB16_FP16'):
self.clip = VisualTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512)
elif arch_name in ('CLIP_ViTL14', 'CLIP_ViTL14_FP16'):
self.clip = VisualTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768)
else:
raise KeyError(f'Unsupported arch_name for CLIP, {arch_name}')
def forward(self, input_data):
output = self.clip(input_data)
return output
def CLIP(arch_name='CLIP_RN50',
use_pretrain=False,
load_from='',
state_dict=None,
**kwargs):
model = CLIPNet(arch_name=arch_name, pretrained=None, **kwargs)
if use_pretrain:
if arch_name.endswith('FP16'):
convert_weights(model.clip)
load_pretrained(model.clip, state_dict, load_from)
return model
class ProbingModel(torch.nn.Module):
def __init__(self, feat_size, num_classes):
super(ProbingModel, self).__init__()
self.linear = torch.nn.Linear(feat_size, num_classes)
def forward(self, x):
return self.linear(x)
| [] |
2024-01-10 | jonfernandes/2022-cohere-hackathon-team-turing | ChatApp~ai_manager.py | import re
import pandas as pd
from collections import namedtuple
import cohere
from annoy import AnnoyIndex
import numpy as np
import datetime
from pathlib import Path
import json
from conversant.prompt_chatbot import PromptChatbot
import time
with open(f'{str(Path.cwd())}/ChatApp/COHERE_API_KEY.json', 'rt') as file:
content = json.load(file)
API_KEY = content['API_KEY']
class AIManager:
def __init__(self, API_KEY):
pd.set_option('max_colwidth', None)
self.co = cohere.Client(API_KEY)
self.create_products()
self.generate_kb()
self.bot = PromptChatbot.from_persona("customer_support_bot", self.co, '.')
def generate_summary(self, chat_log):
prompt='Summarize this dialogue:\nCustomer: Hi\nCustomer Support: hi. what can i help you with today?\nCustomer: What is the height at the back in cm for the Rowlinson Timber Cold Frame?\nCustomer Support: 38cm\nCustomer: What is the width in cm for the Rowlinson Timber Cold Frame?\nCustomer Support: 102cm\nCustomer: Thanks.\nTLDR: A customer wants the dimensions of a Rowlinson Timber Cold Frame\n--\nSummarize this dialogue:\nCustomer: Hi\nCustomer Support: hi. How can I assist you today?\nCustomer: The lid for the Halls Standard Cold Frame is very weak. What is it made of?\nCustomer Support: Polycarbonate.\nCustomer: It will only last a few months. How do I return it and get a refund, please?\nTLDR: The customer wants to return an item and get a refund as they think the material it is made of is very weak.\n--\nSummarize this dialogue:\nCustomer: Hi\nCustomer Support: hi. How can I help you today?\nCustomer: What wood is the Rowlinson cold frame made out of?\nCustomer Support: Softwood.\nCustomer: That\'s too flimsy and won\'t last. I want to return it and get a refund. How do I do that?\nTLDR: A customer wants to return an item as they are not happy with the material it is made of.\n--\nSummarize this dialogue:\nCustomer: Hi\nCustomer Support: hi. How can I help you today?\nCustomer: What is the height at the back of the Halls Standard Cold Frame in inches?\nCustomer Support: 28cm\nCustomer: That\'s too short. I want to return it and get a refund. How do I do that?\nTLDR: A customer wants to return an item as it is too short for their needs.\n--\nSummarize this dialogue:\n'
chat_log = chat_log + '\nTLDR:'
prompt += chat_log
response = self.co.generate(
model='xlarge',
prompt=prompt,
max_tokens=20,
temperature=0.6,
k=0,
p=1,
frequency_penalty=0,
presence_penalty=0,
stop_sequences=["--"],
return_likelihoods='NONE')
return response.generations[0].text
def generate_kb(self):
self.kb = pd.DataFrame({'question': []})
for product in self.products:
response = self.co.generate(
model='xlarge',
prompt=product.prompt,
max_tokens=200,
temperature=0.3,
k=0,
p=0.75,
frequency_penalty=0,
presence_penalty=0,
stop_sequences=[],
return_likelihoods='NONE')
results = response.generations[0].text
df = self.generate_df(results)
self.kb = pd.concat([self.kb, df], axis=0).reset_index(drop=True)
def sentiment_analysis(self, chat):
from cohere.classify import Example
examples=[Example("The order came 5 days early. I really liked it.", "positive"),
Example("The item exceeded my expectations", "positive"),
Example("I ordered more for my friends", "positive"),
Example("I would buy this again", "positive"),
Example("I would recommend this to others", "positive"),
Example("The package was damaged", "negative"),
Example("The order is 5 days late", "negative"),
Example("The order was incorrect", "negative"),
Example("I want to return my item. It does not work.", "negative"),
Example("The item\'s material feels low quality", "negative"),
Example("The product was okay", "neutral"),
Example("I received five items in total", "neutral"),
Example("I bought it from the website", "neutral"),
Example("I used the product this morning", "neutral"),
Example("The product arrived yesterday", "neutral")]
inputs = [chat]
response = self.co.classify(
model='medium',
inputs=inputs,
examples=examples)
return response.classifications
'''
output_filename = f'{datetime.datetime.today():%Y-%m-%d-%H%M}.txt'
with open(Path().cwd()/'ChatApp'/'call_logs'/f'{output_filename}', 'wt') as file:
file.write('Support call worker\nCustomer ID\nCall sentiment: {response.classifications}\nConfidence')
'''
def answer_message(self, msg: str, n_top: int = 3) -> list[str]:
kb_df = self.query_using_semantic_search(msg)
#gen = self.generate_using_dialog(msg)
while True: # Need to do this as a workaround as it takes approx. 4s for gen to get a response.
gen = self.generate_using_conversant(msg)
#print(f'gen --> {gen}')
if gen:
break
time.sleep(2)
result_df = kb_df.append(pd.DataFrame.from_dict({'question': [gen], "distance": [1]}), ignore_index=True)
return result_df.sort_values("distance")
def create_products(self):
product = namedtuple('product', ['name', 'prompt'])
data = [{'name': 'Halls Standard Cold Frame',
'prompt': 'Generate questions from this text: \n\nProduct: Halls Standard Cold Frame\nSturdy Aluminium Framework - rot and rust proof, maintenance free. \n\nAvailable With Two Types Of Glazing - choose from either 3mm Toughened Glass (if broken this glass granulates removing any danger of injury) or Polycarbonate (which is virtually unbreakable). Glazing is for all sides and the top of the cold frame.\n \nDimensions With Toughened Glass :\nWidth – 4ft 3in (129cm)\nDepth – 2ft 1in (63cm)\nHeight at the back – 1ft 3in (38cm) sloping to 1ft 1in (33cm) at the front\n\nDimensions With Polycarbonate :\nWidth – 3ft 3in (99cm)\nDepth – 2ft (60cm)\nHeight at the back – 1ft 4in (40cm) \n \nTwo Sliding, Hinged Lids - allow access to all areas of the cold frame. They also enable you to alter ventilation to your plants.\n\nDelivery - delivered direct from the manufacturers please allow up to 4-6 weeks for delivery.\n--\nQuestion: What is the delivery period for the Halls Standard Cold Frame?\nAnswer: 4-6 weeks\n--\nQuestion: What is the width in cm for the Halls Standard toughened glass Cold Frame?\nAnswer: 129cm\n--\nQuestion: What is the depth for the toughened glass in feet and inches for the Halls Standard Cold Frame?\nAnswer: 2ft 1in\n--\nQuestion: What is the height at the back in cm for the Halls Standard Polycarbonate Cold Frame?\nAnswer: 40cm\n--\nQuestion: What is the height at the front in feet and inches for the Halls Standard toughened glass Cold Frame?\nAnswer: 1ft 1in\n--\nQuestion: What is the width for the polycarbonate in cm for the Halls Standard Cold Frame?\nAnswer: 99cm\n--\nQuestion: What is the depth for the polycarbonate in feet and inches for the Halls Standard Cold Frame?\nAnswer: 2ft\n--\nQuestion: What is the height at the back in cm for the Halls Standard Cold Frame?\nAnswer: 1ft 4in\n--\nQuestion: What is the height at the front in cm for the Halls Standard Cold Frame?\nAnswer: 1ft 1in\n--\nQuestion: What is the height at the back in cm for the Halls Standard Cold Frame?\nAnswer: 1\n--\n'},
{'name': 'Rowlinson Timber Coldframe',
'prompt': 'Generate questions from this text: \n\nProduct: Rowlinson Timber Coldframe\n\nFSC Pressure Treated 19mm Softwood Frame - manufactured from FSC certified timber from sustainable sources. It has been pressure treated against rot. You can stain or paint the frame to match your garden if required. \n \nTwo Independently Opening Lids - allowing easy access to the plants in your cold frame. Supplied complete with wooden stays, with two height setting, to allow excellent ventilation. The lid is glazed with clear styrene plastic, allowing excellent light transmission and is virtually unbreakable.\n\nDimensions :\nWidth - 3ft 4in / 102cm \nDepth - 2ft 8in / 81cm \nHeight at back - 1ft 3in / 38cm\nHeight at front - 11in / 29cm\n\nSelf Assembly\nThis cold frame is delivered as pre assembled panels which simply need screwing together. The lid is supplied fully glazed and should be screwed into place together with the stays provided. You will need a cross-head screwdriver during construction.\n\nDelivery : please allow up to 14 working days for delivery.\n--\nQuestion: What is the delivery period for the Rowlinson Timber Cold Frame?\nAnswer: Up to 14 working days\n--\nQuestion: What is the width in inches for the Rowlinson Timber Cold Frame?\nAnswer: 3ft 4in\n--\nQuestion: What is the height at the back in cm for the Rowlinson Timber Cold Frame?\nAnswer: 38cm\n--\nQuestion: What wood is the Rowlinson cold frame made out of?\nAnswer: Softwood\n--\n'},
{'name': 'Haxnicks Grower Frame Polythene Cover',
'prompt': 'Generate questions from this text: \n\nProduct: Haxnicks Grower Frame Polythene Cover\n\nShaped to easily fit over the Grower Frame to create a protected space that will retain warmth and humidity for quicker plant growth.\nFour zips on the sides of the cover lets you easily access all areas of the area under cover.\nRoll up insect proof ventilation panels at either end of the cover allow air to circulate whilst preventing insects from getting to your plants.\nSize: 9’8\" long x 3’3\" wide x 3’3\" high (3 metres x 1 metre x 1 metre)\n--\nQuestion: How long is the Haxnicks Grower Frame Polythene Cover in feet and inches?\nAnswer: 9’8\"\n--\nQuestion: What is the width of the Haxnicks Grower Frame Polythene Cover in metres?\nAnswer: 1 metre\n--\nQuestion: How high is the Haxnicks Grower Frame Polythene Cover in feet and inches?\nAnswer: 3’3\"\n--\n'},]
self.products = [product(**item) for item in data]
def generate_df(self, results):
question = []
answer = []
results = re.sub('\n',' ', results)
results = results.split('--')
results = [result.strip() for result in results]
for result in results:
if 'Question' in result:
out = re.findall(r'Question: (.*?)? Answer: (.*?)$',result)
for item in out:
if item:
q, a = item
question.append(q + ' ' + a)
return pd.DataFrame({'question': question})
def query_using_semantic_search(self, query):
df = self.kb
embeds = self.co.embed(texts=list(df['question']),
model="large",
truncate="LEFT").embeddings
embeds = np.array(embeds)
num_entries, num_dimensions = embeds.shape
search_index = AnnoyIndex(num_dimensions, 'angular')
for i in range(len(embeds)):
search_index.add_item(i, embeds[i])
search_index.build(10)
search_index.save('test.ann')
query_embed = self.co.embed(texts=[query],
model='large',
truncate='LEFT').embeddings
similar_item_ids = search_index.get_nns_by_vector(query_embed[0],
2,
include_distances=True)
return pd.DataFrame({'question': df.loc[similar_item_ids[0], 'question'],
'distance': similar_item_ids[1]})
def generate_using_conversant(self, dialog):
#bot = PromptChatbot.from_persona("customer_support_bot", co, '.')
return self.bot.reply(f'{dialog}')
def generate_using_dialog(self, dialog):
promt_text = f"""You are a customer support agent responding to a customer.
--
Customer: Hello.
Agent: Hello, what can I help you with today?
--
Customer: {dialog}
Agent:"""
response = self.co.generate(
model='xlarge',
prompt=promt_text,
max_tokens=15,
temperature=0.3,
k=0,
p=0.75,
frequency_penalty=0,
presence_penalty=0,
stop_sequences=["--"],
return_likelihoods='NONE')
return response.generations[0].text.split("--")[0].strip()
if __name__ == "__main__":
aiManager = AIManager(API_KEY)
msg = 'What is the height at the back in cm for the Halls Standard Cold Frame'
response = aiManager.answer_message(msg)
print(response)
| [
"Summarize this dialogue:\nCustomer: Hi\nCustomer Support: hi. what can i help you with today?\nCustomer: What is the height at the back in cm for the Rowlinson Timber Cold Frame?\nCustomer Support: 38cm\nCustomer: What is the width in cm for the Rowlinson Timber Cold Frame?\nCustomer Support: 102cm\nCustomer: Thanks.\nTLDR: A customer wants the dimensions of a Rowlinson Timber Cold Frame\n--\nSummarize this dialogue:\nCustomer: Hi\nCustomer Support: hi. How can I assist you today?\nCustomer: The lid for the Halls Standard Cold Frame is very weak. What is it made of?\nCustomer Support: Polycarbonate.\nCustomer: It will only last a few months. How do I return it and get a refund, please?\nTLDR: The customer wants to return an item and get a refund as they think the material it is made of is very weak.\n--\nSummarize this dialogue:\nCustomer: Hi\nCustomer Support: hi. How can I help you today?\nCustomer: What wood is the Rowlinson cold frame made out of?\nCustomer Support: Softwood.\nCustomer: That's too flimsy and won't last. I want to return it and get a refund. How do I do that?\nTLDR: A customer wants to return an item as they are not happy with the material it is made of.\n--\nSummarize this dialogue:\nCustomer: Hi\nCustomer Support: hi. How can I help you today?\nCustomer: What is the height at the back of the Halls Standard Cold Frame in inches?\nCustomer Support: 28cm\nCustomer: That's too short. I want to return it and get a refund. How do I do that?\nTLDR: A customer wants to return an item as it is too short for their needs.\n--\nSummarize this dialogue:\n",
"chat_logc808616f-ba73-40cb-8d8d-68ed0feac20e\nTLDR:"
] |
2024-01-10 | bclark86/uplimit-openai-app | podcast_backend.py | import modal
def download_whisper():
# Load the Whisper model
import os
import whisper
print ("Download the Whisper model")
# Perform download only once and save to Container storage
whisper._download(whisper._MODELS["medium"], '/content/podcast/', False)
stub = modal.Stub("corise-podcast-project")
corise_image = modal.Image.debian_slim().pip_install("feedparser",
"https://github.com/openai/whisper/archive/9f70a352f9f8630ab3aa0d06af5cb9532bd8c21d.tar.gz",
"requests",
"ffmpeg",
"openai",
"tiktoken",
"wikipedia",
"ffmpeg-python").apt_install("ffmpeg").run_function(download_whisper)
@stub.function(image=corise_image, gpu="any", timeout=600)
def get_transcribe_podcast(rss_url, local_path):
print ("Starting Podcast Transcription Function")
print ("Feed URL: ", rss_url)
print ("Local Path:", local_path)
# Read from the RSS Feed URL
import feedparser
intelligence_feed = feedparser.parse(rss_url)
podcast_title = intelligence_feed['feed']['title']
episode_title = intelligence_feed.entries[0]['title']
episode_image = intelligence_feed['feed']['image'].href
for item in intelligence_feed.entries[0].links:
if (item['type'] == 'audio/mpeg'):
episode_url = item.href
episode_name = "podcast_episode.mp3"
print ("RSS URL read and episode URL: ", episode_url)
# Download the podcast episode by parsing the RSS feed
from pathlib import Path
p = Path(local_path)
p.mkdir(exist_ok=True)
print ("Downloading the podcast episode")
import requests
with requests.get(episode_url, stream=True) as r:
r.raise_for_status()
episode_path = p.joinpath(episode_name)
with open(episode_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
print ("Podcast Episode downloaded")
# Load the Whisper model
import os
import whisper
# Load model from saved location
print ("Load the Whisper model")
model = whisper.load_model('medium', device='cuda', download_root='/content/podcast/')
# Perform the transcription
print ("Starting podcast transcription")
result = model.transcribe(local_path + episode_name)
# Return the transcribed text
print ("Podcast transcription completed, returning results...")
output = {}
output['podcast_title'] = podcast_title
output['episode_title'] = episode_title
output['episode_image'] = episode_image
output['episode_transcript'] = result['text']
return output
@stub.function(image=corise_image, secret=modal.Secret.from_name("my-openai-secret"))
def get_podcast_summary(podcast_transcript):
import openai
## ADD YOUR LOGIC HERE TO RETURN THE SUMMARY OF THE PODCAST USING OPENAI
instructPrompt = """
Condense this podcast transcript into a one-page summary that is suitable for business professionals to understand:
"""
request = instructPrompt + podcast_transcript
chatOutput = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": request}
]
)
podcastSummary = chatOutput.choices[0].message.content
return podcastSummary
@stub.function(image=corise_image, secret=modal.Secret.from_name("my-openai-secret"))
def get_podcast_guest(podcast_transcript):
import openai
import wikipedia
import json
## ADD YOUR LOGIC HERE TO RETURN THE PODCAST GUEST INFORMATION
request = podcast_transcript[:5000]
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": request}],
functions=[
{
"name": "get_podcast_guest_information",
"description": "Identify the name of the guest appearing in the podcast",
"parameters": {
"type": "object",
"properties": {
"guest_name": {
"type": "string",
"description": "Name of the guest",
},
"unit": {"type": "string"},
},
"required": ["guest_name"],
},
}
],
function_call={"name": "get_podcast_guest_information"}
)
podcastGuest = ""
response_message = completion["choices"][0]["message"]
if response_message.get("function_call"):
function_name = response_message["function_call"]["name"]
function_args = json.loads(response_message["function_call"]["arguments"])
podcastGuest=function_args.get("guest_name")
return podcastGuest
@stub.function(image=corise_image, secret=modal.Secret.from_name("my-openai-secret"))
def get_podcast_highlights(podcast_transcript):
import openai
### ADD YOUR LOGIC HERE TO RETURN THE HIGHLIGHTS OF THE PODCAST
instructPrompt = """
We want to extract some key moments in the podcast.\n
These are typically interesting insights from the guest or critical questions that the host might have put forward.\n
It could also be a discussion on a hot topic or controversial opinion.
Provide 5 to 10 key moments in the form of verbatims.
Transcript:
"""
request = instructPrompt + podcast_transcript
chatOutput = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": request}
]
)
podcastHighlights = chatOutput.choices[0].message.content
return podcastHighlights
@stub.function(image=corise_image, secret=modal.Secret.from_name("my-openai-secret"), timeout=1200)
def process_podcast(url, path):
output = {}
podcast_details = get_transcribe_podcast.call(url, path)
podcast_summary = get_podcast_summary.call(podcast_details['episode_transcript'])
podcast_guest = get_podcast_guest.call(podcast_details['episode_transcript'])
podcast_highlights = get_podcast_highlights.call(podcast_details['episode_transcript'])
output['podcast_details'] = podcast_details
output['podcast_summary'] = podcast_summary
output['podcast_guest'] = podcast_guest
output['podcast_highlights'] = podcast_highlights
return output
@stub.local_entrypoint()
def test_method(url, path):
output = {}
podcast_details = get_transcribe_podcast.call(url, path)
print ("Podcast Summary: ", get_podcast_summary.call(podcast_details['episode_transcript']))
print ("Podcast Guest Information: ", get_podcast_guest.call(podcast_details['episode_transcript']))
print ("Podcast Highlights: ", get_podcast_highlights.call(podcast_details['episode_transcript']))
| [
"\n Condense this podcast transcript into a one-page summary that is suitable for business professionals to understand: \n ",
"\n We want to extract some key moments in the podcast.\n \n These are typically interesting insights from the guest or critical questions that the host might have put forward.\n\n It could also be a discussion on a hot topic or controversial opinion.\n\n Provide 5 to 10 key moments in the form of verbatims. \n\n Transcript: \n ",
"You are a helpful assistant."
] |
2024-01-10 | ArtificiallyInteresting/AnimalPicker | animalLlm.py | from langchain.chat_models import ChatOpenAI
from langchain import PromptTemplate
from langchain import LLMChain
from langchain.prompts.chat import (
SystemMessagePromptTemplate,
)
from langchain.memory import ChatMessageHistory, ConversationBufferMemory
from langchain.schema.messages import (
SystemMessage,
AIMessage,
HumanMessage
)
from dotenv import load_dotenv
load_dotenv()
def generateQuestions(thing, names, descriptions):
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
template='''You are a question generating bot. Users are going to be given a quiz to determine which {thing} they are. Come up with exactly 5 questions that we could ask the user to determine which {thing} they are. These are the choices:\n'''
for i in range(len(names)):
template += "{names[" + str(i) + "]}: {" + "descriptions[" + str(i) + "]}\n"
prompt=PromptTemplate(
input_variables=["thing", "names", "descriptions"],
template=template,
)
chain = LLMChain(llm=llm, prompt=prompt)
output = chain.run(thing=thing, names=names, descriptions=descriptions)
#Validation here?
questions = output.split("\n")
return questions
def analyzeAnswers(thing, names, descriptions, questions, answers):
history = ChatMessageHistory()
template = "You are a funny and interesting chatbot analyzing the answers to a quiz to determine which {thing} the user is. The user will end up being one of these things: \n "
for i in range(len(names)):
template += names[i] + ": " + descriptions[i] + " \n "
# systemMessage = SystemMessagePromptTemplate.from_template(template=template, thing=thing, names=names, descriptions=descriptions)
template += "The user has already answered 5 questions to determine which {thing} they are, and their answers are as follows: \n "
# systemContent = template.format(thing=thing)
# systemMessage = SystemMessage(content=systemContent)
print(template)
# history.add_message(systemMessage)
for i in range(len(questions)):
history.add_message(AIMessage(content="Question " + str(i) + ": " + questions[i]))
history.add_message(HumanMessage(content=answers[i]))
history.add_message(AIMessage(content="Alright! The results are in! And the {thing} you are is...".format(thing=thing)))
memory = ConversationBufferMemory(return_messages=True)
memory.load_memory_variables(inputs=history)
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
prompt = PromptTemplate(
input_variables=["thing"], template=template
)
chain = LLMChain(llm=llm, memory=memory, prompt=prompt)
output = chain.run(thing=thing)
return output
if __name__ == "__main__":
thing = "animal"
names = ["lion", "penguin"]
descriptions = ["brave and fiersome", "cold and silly"]
# questions = generateQuestions(thing, names, descriptions)
# print(questions)
questions = ['1. Are you more inclined towards being brave and fierce, or do you tend to be more cautious and silly in your actions?', '2. Do you prefer warmer climates or are you more comfortable in colder environments?', '3. Are you known for your bravery and leadership qualities, or do you often find yourself being silly and making others laugh?', '4. Are you more comfortable in social situations, enjoying the company of others, or do you prefer solitude and quiet moments?', '5. When faced with challenges, do you tend to face them head-on with courage, or do you prefer to take a more cautious and calculated approach?']
answers = ["brave and fierce", "colder environments", "bravery and leadership qualities", "enjoying the company of others", "head-on with courage"]
answers = analyzeAnswers(thing, names, descriptions, questions, answers)
print(answers) | [
"descriptions",
"thing",
"You are a funny and interesting chatbot analyzing the answers to a quiz to determine which {thing} the user is. The user will end up being one of these things: \n ",
"names",
": ",
"Question ",
" \n ",
"{names[PLACEHOLDER]}: {descriptions[PLACEHOLDER]}\n",
"Alright! The results are in! And the PLACEHOLDER you are is...",
"You are a question generating bot. Users are going to be given a quiz to determine which {thing} they are. Come up with exactly 5 questions that we could ask the user to determine which {thing} they are. These are the choices:\n",
"The user has already answered 5 questions to determine which {thing} they are, and their answers are as follows: \n "
] |
2024-01-10 | berksengul17/langchain-virtual-assistant | virtual-assistant.py | import keyboard
import os
import tempfile
from dotenv import load_dotenv
import openai
import sounddevice as sd
import soundfile as sf
from elevenlabs import generate, play, set_api_key
from langchain.agents import initialize_agent, load_tools
from langchain.agents.agent_toolkits import ZapierToolkit
from langchain.llms import OpenAI
from langchain.memory import ConversationBufferMemory
from langchain.utilities.zapier import ZapierNLAWrapper
load_dotenv()
set_api_key(os.environ['ELEVEN_LABS_API_KEY'])
openai.api_key = os.environ['OPENAI_API_KEY']
duration = 5
fs = 44100
channels = 1
def record_audio(duration, fs, channels):
print("Recording...")
recording = sd.rec(int(duration * fs), samplerate=fs, channels=channels)
sd.wait()
print("Finished recording.")
return recording
def transcribe_audio(recording, fs):
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_audio:
sf.write(temp_audio.name, recording, fs)
temp_audio.close()
with open(temp_audio.name, "rb") as audio_file:
transcript = openai.Audio.transcribe("whisper-1", audio_file)
os.remove(temp_audio.name)
return transcript["text"].strip()
def play_generated_audio(text, voice="Bella", model="eleven_monolingual_v1"):
audio = generate(text=text, voice=voice, model=model)
play(audio)
if __name__ == '__main__':
llm = OpenAI(temperature=0.6)
memory = ConversationBufferMemory(memory_key="chat_history")
zapier = ZapierNLAWrapper(zapier_nla_api_key=os.environ['ZAPIER_API_KEY'])
toolkit = ZapierToolkit.from_zapier_nla_wrapper(zapier)
tools = toolkit.get_tools() + load_tools(["human"])
agent = initialize_agent(tools, llm, memory=memory,
agent="conversational-react-description", verbose=True)
while True:
# print("Press spacebar to start recording.")
# keyboard.wait("space")
# recorded_audio = record_audio(duration, fs, channels)
# message = transcribe_audio(recorded_audio, fs)
message = input("You: ")
assistant_message = agent.run(message)
play_generated_audio(assistant_message) | [] |
2024-01-10 | reedington/QUFIK_TUNGA | detect_and_fix_bugs.py | from openai import OpenAI
from dotenv import load_dotenv, dotenv_values
load_dotenv()
client = OpenAI(api_key=dotenv_values(".env")["OPENAI_API_KEY"])
def detect_and_fix_bugs(code_base, language):
# Define an initial message from the user
messages = [
{
"role": "user",
"content": "Say this is a test",
}
] # Define messages as a list with a single initial message dictionary
code_base = code_base
language = language
if code_base:
# Append the user's new message to the messages list
messages.append({'role': 'user', 'content': f'Please review the following {language} code and identify any bugs or potential issues. If you find any errors, please suggest a fix or improvements to the code: {code_base}'})
# Create a chat completion using the AI model (assuming 'client' is initialized elsewhere)
chat_completion = client.chat.completions.create(
messages=messages, # Pass the list of messages
model="gpt-4" # Use the GPT-4 model for generating a response
)
# Retrieve the response content from the chat completion
# Note: Make sure 'chat_completion' contains the response object with 'choices' available
reply = chat_completion.choices[0].message.content
# Add the assistant's response to the messages list
messages.append({"role": "assistant", "content": reply})
return reply
| [
"Please review the following PLACEHOLDER code and identify any bugs or potential issues. If you find any errors, please suggest a fix or improvements to the code: PLACEHOLDER",
"Say this is a test"
] |
2024-01-10 | keatonminor/GitPractice | Jarvis.py | import openai
import speech_recognition as sr
import pyttsx3
engine = pyttsx3.init()
engine.setProperty('rate', 160)
engine.setProperty('pitch', 0.8)
recognizer = sr.Recognizer()
openai.api_key = ""
prompt= "hello there, in obi wan voice"
def create_response(text):
response = openai.Completion.create(
model="text-davinci-003",
prompt=("Answer like the rapper drake." + str(text)),
#prompt= ("Answer in the style of nietzsche but be bitter." + str(text)),
temperature=0.9,
max_tokens=200,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response.choices[0].text
###trying to make a function that will continue to chat with the user
while True:
# Set up the microphone and listen for the user's voice
with sr.Microphone() as source:
print("Say something:")
audio = recognizer.listen(source)
# Convert the audio to text
try:
text = recognizer.recognize_google(audio)
print(f"You said: {text}")
if "Jarvis" in text:
# Respond to the user saying "Jarvis"
engine.say("Yes, what can I do for you sir?")
engine.runAndWait()
# Listen for the user's next instructions
with sr.Microphone() as source:
audio = recognizer.listen(source)
text = recognizer.recognize_google(audio)
print(f"You said: {text}")
response=(create_response(text))
engine.say(response)
print(response)
engine.runAndWait()
engine.stop()
break
# Do something with the instructions (e.g., perform a task, etc.)
except sr.UnknownValueError:
print("Sorry, I couldn't understand what you said.")
except sr.RequestError as e:
print("Sorry, there was an error processing your request: " + str(e))
# engine.say(create_response(text))
# engine.runAndWait()
# engine.stop()
| [
"Answer like the rapper drake.PLACEHOLDER",
"hello there, in obi wan voice"
] |
2024-01-10 | Terieyenike/prompt-llms | project_streamlit_custom_chatgpt~project_streamlit_custom_chatgpt.py | from langchain.chat_models import ChatOpenAI
from langchain.schema import(
SystemMessage,
HumanMessage,
AIMessage
)
import streamlit as st
from streamlit_chat import message
# loading the OpenAI api key from .env (OPENAI_API_KEY="sk-********")
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv(), override=True)
st.set_page_config(
page_title='You Custom Assistant',
page_icon='🤖'
)
st.subheader('Your Custom ChatGPT 🤖')
chat = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0.5)
# creating the messages (chat history) in the Streamlit session state
if 'messages' not in st.session_state:
st.session_state.messages = []
# creating the sidebar
with st.sidebar:
# streamlit text input widget for the system message (role)
system_message = st.text_input(label='System role')
# streamlit text input widget for the user message
user_prompt = st.text_input(label='Send a message')
if system_message:
if not any(isinstance(x, SystemMessage) for x in st.session_state.messages):
st.session_state.messages.append(
SystemMessage(content=system_message)
)
# st.write(st.session_state.messages)
# if the user entered a question
if user_prompt:
st.session_state.messages.append(
HumanMessage(content=user_prompt)
)
with st.spinner('Working on your request ...'):
# creating the ChatGPT response
response = chat(st.session_state.messages)
# adding the response's content to the session state
st.session_state.messages.append(AIMessage(content=response.content))
# st.session_state.messages
# message('this is chatgpt', is_user=False)
# message('this is the user', is_user=True)
# adding a default SystemMessage if the user didn't entered one
if len(st.session_state.messages) >= 1:
if not isinstance(st.session_state.messages[0], SystemMessage):
st.session_state.messages.insert(0, SystemMessage(content='You are a helpful assistant.'))
# displaying the messages (chat history)
for i, msg in enumerate(st.session_state.messages[1:]):
if i % 2 == 0:
message(msg.content, is_user=True, key=f'{i} + 🤓') # user's question
else:
message(msg.content, is_user=False, key=f'{i} + 🤖') # ChatGPT response
# run the app: streamlit run ./project_streamlit_custom_chatgpt.py
| [
"You are a helpful assistant.",
"Send a message"
] |
2024-01-10 | Terieyenike/prompt-llms | llm_question_answering_app~chat_with_documents.py | import streamlit as st
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
# loading PDF, DOCX and TXT files as LangChain Documents
def load_document(file):
import os
name, extension = os.path.splitext(file)
if extension == '.pdf':
from langchain.document_loaders import PyPDFLoader
print(f'Loading {file}')
loader = PyPDFLoader(file)
elif extension == '.docx':
from langchain.document_loaders import Docx2txtLoader
print(f'Loading {file}')
loader = Docx2txtLoader(file)
elif extension == '.txt':
from langchain.document_loaders import TextLoader
loader = TextLoader(file)
else:
print('Document format is not supported!')
return None
data = loader.load()
return data
# splitting data in chunks
def chunk_data(data, chunk_size=256, chunk_overlap=20):
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
chunks = text_splitter.split_documents(data)
return chunks
# create embeddings using OpenAIEmbeddings() and save them in a Chroma vector store
def create_embeddings(chunks):
embeddings = OpenAIEmbeddings()
vector_store = Chroma.from_documents(chunks, embeddings)
return vector_store
def ask_and_get_answer(vector_store, q, k=3):
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
llm = ChatOpenAI(model='gpt-3.5-turbo', temperature=1)
retriever = vector_store.as_retriever(search_type='similarity', search_kwargs={'k': k})
chain = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever)
answer = chain.run(q)
return answer
# calculate embedding cost using tiktoken
def calculate_embedding_cost(texts):
import tiktoken
enc = tiktoken.encoding_for_model('text-embedding-ada-002')
total_tokens = sum([len(enc.encode(page.page_content)) for page in texts])
# print(f'Total Tokens: {total_tokens}')
# print(f'Embedding Cost in USD: {total_tokens / 1000 * 0.0004:.6f}')
return total_tokens, total_tokens / 1000 * 0.0004
# clear the chat history from streamlit session state
def clear_history():
if 'history' in st.session_state:
del st.session_state['history']
if __name__ == "__main__":
import os
# loading the OpenAI api key from .env
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv(), override=True)
st.image('img.png')
st.subheader('LLM Question-Answering Application 🤖')
with st.sidebar:
# text_input for the OpenAI API key (alternative to python-dotenv and .env)
api_key = st.text_input('OpenAI API Key:', type='password')
if api_key:
os.environ['OPENAI_API_KEY'] = api_key
# file uploader widget
uploaded_file = st.file_uploader('Upload a file:', type=['pdf', 'docx', 'txt'])
# chunk size number widget
chunk_size = st.number_input('Chunk size:', min_value=100, max_value=2048, value=512, on_change=clear_history)
# k number input widget
k = st.number_input('k', min_value=1, max_value=20, value=3, on_change=clear_history)
# add data button widget
add_data = st.button('Add Data', on_click=clear_history)
if uploaded_file and add_data: # if the user browsed a file
with st.spinner('Reading, chunking and embedding file ...'):
# writing the file from RAM to the current directory on disk
bytes_data = uploaded_file.read()
file_name = os.path.join('./', uploaded_file.name)
with open(file_name, 'wb') as f:
f.write(bytes_data)
data = load_document(file_name)
chunks = chunk_data(data, chunk_size=chunk_size)
st.write(f'Chunk size: {chunk_size}, Chunks: {len(chunks)}')
tokens, embedding_cost = calculate_embedding_cost(chunks)
st.write(f'Embedding cost: ${embedding_cost:.4f}')
# creating the embeddings and returning the Chroma vector store
vector_store = create_embeddings(chunks)
# saving the vector store in the streamlit session state (to be persistent between reruns)
st.session_state.vs = vector_store
st.success('File uploaded, chunked and embedded successfully.')
# user's question text input widget
q = st.text_input('Ask a question about the content of your file:')
if q: # if the user entered a question and hit enter
standard_answer = "Answer only based on the text you received as input. Don't search external sources. " \
"If you can't answer then return `I DONT KNOW`."
q = f"{q} {standard_answer}"
if 'vs' in st.session_state: # if there's the vector store (user uploaded, split and embedded a file)
vector_store = st.session_state.vs
st.write(f'k: {k}')
answer = ask_and_get_answer(vector_store, q, k)
# text area widget for the LLM answer
st.text_area('LLM Answer: ', value=answer)
st.divider()
# if there's no chat history in the session state, create it
if 'history' not in st.session_state:
st.session_state.history = ''
# the current question and answer
value = f'Q: {q} \nA: {answer}'
st.session_state.history = f'{value} \n {"-" * 100} \n {st.session_state.history}'
h = st.session_state.history
# text area widget for the chat history
st.text_area(label='Chat History', value=h, key='history', height=400)
# run the app: streamlit run ./chat_with_documents.py
| [] |
2024-01-10 | Entegre77/CodeMaster | prototype.py | # Import the necessary modules
import openai
import autogpt
# Set up the GPT-3.5 powered Agents
agent_manager = autogpt.agent_manager.AgentManager()
# Define a function to perform a web search
@agent_manager.agent
def search(query):
response = openai.Completion.create(
engine="text-davinci-002",
prompt=f"Perform a web search for {query}.",
max_tokens=1024,
n=1,
stop=None,
temperature=0.5,
)
return response.choices[0].text
# Define a function to create a new file
@agent_manager.agent
def create_file(filename):
with open(filename, "w") as f:
f.write("")
return f"Successfully created file {filename}."
# Define a function to append text to a file
@agent_manager.agent
def append_to_file(filename, text):
with open(filename, "a") as f:
f.write(text)
return f"Successfully appended text to file {filename}."
# Define a function to read a file
@agent_manager.agent
def read_file(filename):
with open(filename, "r") as f:
text = f.read()
return text
# Define a function to list files in a directory
@agent_manager.agent
def list_files(directory):
import os
files = os.listdir(directory)
return files
# Define a function to delete a file
@agent_manager.agent
def delete_file(filename):
import os
os.remove(filename)
return f"Successfully deleted file {filename}."
| [
"Perform a web search for PLACEHOLDER."
] |
2024-01-10 | MLeidel/GptGUI | gptgui.py | '''
gptgui.py 1.3
by Michael Leidel
remarks:
modified API for openai >=1.3.3
'''
import os
import sys
import time
import signal
import configparser
import subprocess
import webbrowser
import markdown
from tkinter.font import Font
from tkinter import messagebox
from ttkbootstrap import *
from ttkbootstrap.constants import *
from ttkbootstrap.tooltip import ToolTip
import datetime
from openai import OpenAI
# for subprocess to exec gptopt.py
PY = "python3" # Linux
# PY = "pythonw" # Windows
class Application(Frame):
''' main class docstring '''
def __init__(self, parent):
Frame.__init__(self, parent)
self.pack(fill=BOTH, expand=True, padx=4, pady=4)
self.Saved = True
# get settings from ini file
config = configparser.ConfigParser()
config.read('gptgui.ini')
self.MyTheme = config['Main']['theme']
self.MyPath = config['Main']['path']
self.MyFntQryF = config['Main']['fontqryfam']
self.MyFntQryZ = config['Main']['fontqrysiz']
self.MyFntGptF = config['Main']['fontgptfam']
self.MyFntGptZ = config['Main']['fontgptsiz']
self.MyModel = config['Main']['engine']
self.MyTemp = config['Main']['temperature']
self.MyTokens = config['Main']['tokens']
self.MyKey = config['Main']['gptkey']
self.MyTime = config['Main']['showtime']
self.MySave = config['Main']['autosave']
self.MyEditor = config['Main']['editor']
self.MyFile = config['Main']['tempfile']
self.MySystem = config['Main']['system']
self.TOPFRAME = int(config['Main']['top_frame'])
if len(self.MyKey) < 16:
self.MyKey = os.environ.get(self.MyKey) # Using ENV var instead of actual key string.
self.create_widgets()
def create_widgets(self):
''' creates GUI for app '''
# expand widget to fill the grid
self.columnconfigure(1, weight=1, pad=5)
self.columnconfigure(2, weight=1, pad=5)
self.rowconfigure(2, weight=1, pad=5)
self.query = Text(self)
self.query.grid(row=1, column=1, columnspan=2, sticky='nsew')
efont = Font(family=self.MyFntQryF, size=self.MyFntQryZ)
self.query.configure(font=efont)
self.query.config(wrap="word", # wrap=NONE
undo=True, # Tk 8.4
width=50,
height=self.TOPFRAME,
padx=5, # inner margin
#insertbackground='#000', # cursor color
tabs=(efont.measure(' ' * 4),))
self.scrolly = Scrollbar(self, orient=VERTICAL,
command=self.query.yview)
self.scrolly.grid(row=1, column=3, sticky='ns') # use nse
self.query['yscrollcommand'] = self.scrolly.set
self.txt = Text(self)
self.txt.grid(row=2, column=1, columnspan=2, sticky='nsew')
efont = Font(family=self.MyFntGptF, size=self.MyFntGptZ)
self.txt.configure(font=efont)
self.txt.config(wrap="word", # wrap=NONE
undo=True, # Tk 8.4
width=50,
height=12,
padx=5, # inner margin
#insertbackground='#000', # cursor color
tabs=(efont.measure(' ' * 4),))
self.scrolly = Scrollbar(self, orient=VERTICAL, command=self.txt.yview)
self.scrolly.grid(row=2, column=3, sticky='ns') # use nse
self.txt['yscrollcommand'] = self.scrolly.set
# BUTTON FRAME
btn_frame = Frame(self)
btn_frame.grid(row=4, column=1, sticky='w')
self.clear = Button(btn_frame, text='Clear', command=self.on_clear_all)
self.clear.grid(row=1, column=2, sticky='w',
pady=(5, 0), padx=(5, 7))
self.save = Button(btn_frame, text='Save', command=self.on_save_file)
self.save.grid(row=1, column=3, sticky='w',
pady=(5, 0), padx=5)
self.view = Button(btn_frame, text='View', command=self.on_view_file)
self.view.grid(row=1, column=4, sticky='w',
pady=(5, 0))
self.purge = Button(btn_frame, text='Purge', command=self.on_purge)
self.purge.grid(row=1, column=5, sticky='w',
pady=(5, 0), padx=5)
self.open = Button(btn_frame, text='Text', command=self.on_md_open)
self.open.grid(row=1, column=6, sticky='w',
pady=(5, 0), padx=5)
self.md = Button(btn_frame, text='Html', command=self.on_md_render)
self.md.grid(row=1, column=7, sticky='w',
pady=(5, 0), padx=(0, 5))
self.opts = Button(btn_frame, text='Options', command=self.options)
self.opts.grid(row=1, column=8, sticky='w',
pady=(5, 0), padx=5)
self.sub = Button(btn_frame,
text='Submit Query (Ctrl-g)',
command=self.on_submit, width=35)
self.sub.grid(row=1, column=9, sticky='w',
pady=(5, 0), padx=(20, 0))
# END BUTTON FRAME
cls = Button(self, text='Close', command=self.exit_program)
cls.grid(row=4, column=2, columnspan=2, sticky='e',
pady=(5,0), padx=5)
# Popup menus - for self.query Text widgets
self.popup_query = Menu(tearoff=0, title="title")
self.popup_query.add_command(label="Copy",
command=lambda: self.popquery(1))
self.popup_query.add_command(label="Paste",
command=lambda: self.popquery(2))
self.popup_query.add_separator()
self.popup_query.add_command(label="Copy All",
command=lambda: self.popquery(3))
self.popup_query.add_separator()
self.popup_query.add_command(label="Larger",
command=lambda: self.popquery(4))
self.popup_query.add_command(label="Smaller",
command=lambda: self.popquery(5))
self.popup_query.add_separator()
self.popup_query.add_command(label="Browser",
command=lambda: self.popquery(6))
self.query.bind("<Button-3>", self.do_pop_query)
# Popup menus - for self.txt Text widgets
self.popup_txt = Menu(tearoff=0, title="title")
self.popup_txt.add_command(label="Copy",
command=lambda: self.poptxt(1))
self.popup_txt.add_command(label="Paste",
command=lambda: self.poptxt(2))
self.popup_txt.add_separator()
self.popup_txt.add_command(label="Copy All",
command=lambda: self.poptxt(3))
self.txt.bind("<Button-3>", self.do_pop_txt)
# Bindings
root.bind("<Control-t>", self.show_tokens) # Show result tokens in title
root.bind("<Control-m>", self.on_toggle_time) # time elapsed toggle
root.bind("<Control-h>", self.on_kb_help) # show hotkey help
root.bind("<Control-q>", self.exit_program) # Close button
root.bind("<Control-s>", self.on_save_file) # Save button
root.bind("<Control-g>", self.on_submit) # Submit Query button
root.bind("<Control-Return>", self.on_submit) # Submit Query button
root.bind("<Control-Shift-S>", self.speak_text) # speak query response
root.bind("<Escape>", self.speak_text_cancel) # stop speaking
# ToolTips
ToolTip(self.clear,
text="Erase window text",
bootstyle=(INFO, INVERSE),
wraplength=140)
ToolTip(self.view,
text="View saved text in window",
bootstyle=(INFO, INVERSE),
wraplength=140)
ToolTip(self.save,
text="Append current text",
bootstyle=(INFO, INVERSE),
wraplength=140)
ToolTip(self.purge,
text="Remove all saved text",
bootstyle=(INFO, INVERSE),
wraplength=140)
ToolTip(self.sub,
text="Ctrl-Enter to Append",
bootstyle=(INFO, INVERSE),
wraplength=140)
ToolTip(self.md,
text="markdown to browser",
bootstyle=(INFO, INVERSE),
wraplength=140)
ToolTip(self.open,
text="markdown to text editor",
bootstyle=(INFO, INVERSE),
wraplength=140)
if self.MySave == "1":
self.save.config(text="Auto Save", bootstyle="default-outline")
self.query.focus_set()
# check if query entered on command line
# if it query entered on command line
# then execute it immediately
if len(sys.argv) > 1:
query = " ".join(sys.argv[1:])
self.query.insert("1.0", query)
self.on_submit()
else:
self.txt.delete("1.0", END)
self.txt.insert("1.0", "Ctrl-h for help")
#----------------------------------------------------------------------
def on_submit(self, e=None):
''' Query OpenAI Gpt engine and display response in Text widgit'''
if e is None:
renderStyle = "X"
else:
renderStyle = e.keysym # "Return" means append to Output Text
start = time.time() # time the Gpt retrival
querytext = self.query.get("1.0", END)
if len(querytext) < 4:
return
if self.MySave == "0":
self.save.configure(bootstyle=DEFAULT) # new - not been saved
self.Saved = False
# get the Gpt key from the ini value
try:
client = OpenAI(
api_key = self.MyKey # openai API
)
except Exception as e:
messagebox.showerror("Could Not Read Key file",
"Did you enter your Gpt Key?")
return
# openai API request code
try:
response = client.chat.completions.create(
model=self.MyModel,
max_tokens=int(self.MyTokens),
temperature=float(self.MyTemp),
messages=[{"role": "system", "content": self.MySystem},
{"role": "user", "content" : querytext.strip()}
]
)
# display Gpt response in Text widget
output = response.choices[0].message.content
# collect response token info
self.length = len(output)
self.completion = response.usage.completion_tokens
self.total = response.usage.total_tokens
self.prompt = response.usage.prompt_tokens
# # display response text
if self.MyTime == "1" :
self.elapsed = (time.time() - start)
output = f"elapsed time: {round(self.elapsed, 5)}\n-----\n" + output
if renderStyle != "Return":
self.txt.delete("1.0", END)
self.txt.insert("1.0", output)
else:
# self.txt.mark_set(INSERT, END)
self.txt.insert(END, output)
# on Auto Save do the save
if self.MySave == "1":
self.on_save_file()
except Exception as e:
messagebox.showerror("Problems", e)
print("Key=", self.MyKey)
def on_purge(self):
''' User is purging the query-save file '''
if not os.path.isfile(self.MyPath):
messagebox.showwarning(self.MyPath, "Empty - No File to purge")
return
ret = messagebox.askokcancel("Purge", "Delete All Saved Queries?")
if ret is True:
os.remove(self.MyPath)
messagebox.showinfo("Purge", "Saved Queries Deleted.")
def on_clear_all(self):
''' User is clearning the GUI fields '''
if self.Saved is False:
if messagebox.askokcancel('GptGUI',
'Last response not saved - continue?') is False:
return
self.txt.delete("1.0", END)
self.query.delete("1.0", END)
self.save.configure(bootstyle=DEFAULT) # new - not been saved
self.Saved = True
def on_save_file(self, e=None):
''' Save the current query and result to user file (MyPath) '''
resp = self.txt.get("1.0", END).strip()
qury = self.query.get("1.0", END).strip()
if qury == "" or resp == "": # make sure there is a query present
return
try:
msg = " \ncompletion tokens: " + str(self.completion) + \
" \ntotal tokens: " + str(self.total) + \
" \nprompt tokens: " + str(self.prompt) + "\n-----\n"
with open(self.MyPath, "a") as fout:
fout.write(str(now.strftime("%Y-%m-%d %H:%M \n")))
fout.write(qury + " \nengine: " + MyModel)
fout.write(msg)
fout.write(resp.strip() + "\n\n---\n\n")
except Exception as e:
messagebox.showerror("Save Query Problem", e)
if self.MySave == "0": # Auto Save is off
# indicate that a "save" has processed
self.save.configure(bootstyle="default-outline")
self.Saved = True
def on_view_file(self):
''' View the user saved queries file '''
if not os.path.isfile(self.MyPath):
messagebox.showwarning(self.MyPath, "Empty - No File")
return
if self.Saved is False:
if messagebox.askokcancel('GptGUI',
'Last response not saved - continue?') is False:
return
# Either the user has or has-not saved the current query reponse.
# Therefore, set the "Save" button back to DEFAULT because
# if the response was not saved prior, then it is just lost.
self.Saved = True
self.save.configure(bootstyle=DEFAULT)
self.txt.delete("1.0", END)
with open(self.MyPath, "r") as fin:
self.txt.insert("1.0", fin.read())
self.query.delete("1.0", END)
def options(self, e=None):
''' Launch Options program and exit this program '''
subprocess.call([PY, "gptopt.py"])
# re-read configuration
config = configparser.ConfigParser()
config.read('gptgui.ini')
self.MyTheme = config['Main']['theme']
self.MyPath = config['Main']['path']
self.MyFntQryF = config['Main']['fontqryfam']
self.MyFntQryZ = config['Main']['fontqrysiz']
self.MyFntGptF = config['Main']['fontgptfam']
self.MyFntGptZ = config['Main']['fontgptsiz']
self.MyModel = config['Main']['engine']
self.MyTemp = config['Main']['temperature']
self.MyTokens = config['Main']['tokens']
self.MyKey = config['Main']['gptkey']
self.MyTime = config['Main']['showtime']
self.MySave = config['Main']['autosave']
self.MyEditor = config['Main']['editor']
self.MyFile = config['Main']['tempfile']
self.MySystem = config['Main']['system']
self.TOPFRAME = int(config['Main']['top_frame'])
if len(self.MyKey) < 16:
self.MyKey = os.environ.get(self.MyKey) # Using ENV var instead of actual key string.
# re-set the items and change font/size
efont = Font(family=self.MyFntQryF, size=self.MyFntQryZ)
self.query.configure(font=efont, height=self.TOPFRAME)
efont = Font(family=self.MyFntGptF, size=self.MyFntGptZ)
self.txt.configure(font=efont)
style = Style()
style = Style(theme=self.MyTheme)
MyTitle = "GptGUI (OpenAI) " + self.MyModel + " " + str(self.MyTokens) + " " + str(self.MyTemp)
root.title(MyTitle)
def show_tokens(self, e=None):
''' show response tokens '''
msg = "text length: " + str(self.length) + \
"\ncompletion tokens: " + str(self.completion) + \
"\ntotal tokens: " + str(self.total) + \
"\nprompt tokens: " + str(self.prompt)
if self.MyTime == "1":
msg += "\nResponse Time Elapsed: " + str(self.elapsed)
messagebox.showinfo("GptGUI Response Tokens", msg)
def on_toggle_time(self, e=None):
''' Toggles the showing of the response time '''
if self.MyTime == "1":
self.MyTime = "0"
else:
self.MyTime = "1"
messagebox.showinfo("Toggle Show Elapsed Time",
" Set to " + self.MyTime + " ")
def getmdtext(self):
''' get all or selected text '''
if self.txt.tag_ranges("sel"):
text = self.txt.selection_get()
else: # Select All
self.txt.focus()
self.txt.tag_add(SEL, '1.0', END)
self.txt.mark_set(INSERT, '1.0')
self.txt.see(INSERT)
if self.txt.tag_ranges("sel"):
text = self.txt.selection_get()
self.txt.tag_remove(SEL, "1.0", END)
return text
def on_md_open(self, e=None):
''' open txt (MD) in your text editor '''
text = self.getmdtext()
filename = os.getcwd() + '/' + self.MyFile
print(filename)
with open(filename, 'w') as f:
f.write(text)
print(filename, self.MyEditor)
subprocess.Popen([self.MyEditor, filename])
def on_md_render(self, e=None):
''' render txt (MD) to html and show window '''
text = self.getmdtext()
# convert MD to HTML
H = markdown.markdown(text,
extensions=['fenced_code'])
# write to file
filename = os.getcwd() + '/' + self.MyFile + '.html'
print(filename)
with open(filename, 'w') as f:
f.write(H)
# open file in browser
webbrowser.open_new_tab('file:///' + filename)
def speak_text(self, e=None):
''' Speak the query response text '''
text = self.getmdtext() # get selected or all text
self.espeak_proc = subprocess.Popen(["espeak-ng", text])
def speak_text_cancel(self, e=None):
''' cancel the currently speaking text '''
self.espeak_proc.send_signal(signal.SIGINT)
def on_kb_help(self, e=None):
''' display hot keys message '''
msg = '''
<Ctrl-t> View response metrics\n
<Ctrl-m> Temporarily Toggle\n
show-elapsed-time\n
<Ctrl-h> This HotKey help\n
<Ctrl-q> Close Program\n
No Prompt\n
<Ctrl-s> Save output (Button)\n
<Ctrl-g> Submit Query (Button)\n
<Ctrl-Enter> Submit & Append\n
<Ctrl-Shift-S> Speak the Text\n
<Escape> Cancel Speaking Text\n
'''
messagebox.showinfo("Hot Keys Help", msg)
def do_pop_query(self, event):
''' handles right-click for context menu '''
try:
self.popup_query.tk_popup(event.x_root,
event.y_root, 0)
except:
self.popup_query.grab_release()
def do_pop_txt(self, event):
''' handles right-click for context menu '''
try:
self.popup_txt.tk_popup(event.x_root,
event.y_root, 0)
except:
self.popup_txt.grab_release()
def popquery(self, n):
''' Routes query Text context menu actions '''
if n == 1: # Copy
root.clipboard_clear() # clear clipboard contents
if self.query.tag_ranges("sel"):
root.clipboard_append(self.query.selection_get()) # append new value to clipbaord
elif n == 2: # Paste
inx = self.query.index(INSERT)
try:
self.query.insert(inx, root.clipboard_get())
except Exception as e:
return
elif n == 3: # Copy All
self.query.focus()
self.query.tag_add(SEL, '1.0', END)
self.query.mark_set(INSERT, '1.0')
self.query.see(INSERT)
root.clipboard_clear() # clear clipboard contents
if self.query.tag_ranges("sel"): # append new value to clipbaord
root.clipboard_append(self.query.selection_get())
self.query.tag_remove(SEL, "1.0", END)
elif n == 4: # larger
self.TOPFRAME += 2
self.query.config(height=self.TOPFRAME)
elif n == 5: # smaller
if self.TOPFRAME > 3:
self.TOPFRAME -= 2
self.query.config(height=self.TOPFRAME)
else: # 6
search = self.query.selection_get()
webbrowser.open("https://duckduckgo.com/?q=" + search)
def poptxt(self, n):
''' Routes txt Text context menu actions '''
if n == 1: # Copy
root.clipboard_clear() # clear clipboard contents
root.clipboard_append(self.txt.selection_get()) # append new value to clipbaord
elif n == 2: # Paste
inx = self.txt.index(INSERT)
self.txt.insert(inx, root.clipboard_get())
else: # Select All
self.txt.focus()
self.txt.tag_add(SEL, '1.0', END)
self.txt.mark_set(INSERT, '1.0')
self.txt.see(INSERT)
root.clipboard_clear() # clear clipboard contents
if self.txt.tag_ranges("sel"): # append new value to clipbaord
root.clipboard_append(self.txt.selection_get())
self.txt.tag_remove(SEL, "1.0", END)
def exit_program(self, e=None):
''' Only exit program without prompt if
1. Ctrl-q was hit
OR
2. Both Text frames are empty '''
resp = self.txt.get("1.0", END).strip()
qury = self.query.get("1.0", END).strip()
if resp == "" and qury == "":
save_location()
sys.exit()
if e is None: # ctrl-q avoids this message
if messagebox.askokcancel('GptGUI',
'Did you want to close the app?') is False:
return
save_location()
#------------------------------------------------------------
# SAVE GEOMETRY INFO AND EXIT
def save_location(e=None):
''' executes at WM_DELETE_WINDOW event - see below
Also called from self.exit_program.
Save window geometry before destruction
'''
with open("winfo", "w") as fout:
fout.write(root.geometry())
root.destroy()
# used for saving queries with date and time
now = datetime.datetime.now()
# get options that go into the window creation and title
config = configparser.ConfigParser()
config.read('gptgui.ini')
MyTheme = config['Main']['theme']
MyModel = config['Main']['engine']
MyTemp = config['Main']['temperature']
MyTokens = config['Main']['tokens']
# define main window
MyTitle = "GptGUI (OpenAI 1.3.3) " + MyModel + " " + str(MyTokens)
root = Window(MyTitle, MyTheme, iconphoto="icon.png")
# change working directory to path for this file
p = os.path.realpath(__file__)
os.chdir(os.path.dirname(p))
# ACCESS GEOMETRY INFO
if os.path.isfile("winfo"):
with open("winfo") as f:
lcoor = f.read()
root.geometry(lcoor.strip())
else:
root.geometry("675x505") # WxH+left+top
root.protocol("WM_DELETE_WINDOW", save_location) # TO SAVE GEOMETRY INFO
root.minsize(875, 325) # width, height
Sizegrip(root).place(rely=1.0, relx=1.0, x=0, y=0, anchor='se')
Application(root)
root.mainloop()
| [] |
2024-01-10 | jario-jin/SpireView | utils~detector~common_replay_buffer.py | #code from openai
#https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py
import numpy as np
import random
import operator
class SegmentTree(object):
def __init__(self, capacity, operation, neutral_element):
"""Build a Segment Tree data structure.
https://en.wikipedia.org/wiki/Segment_tree
Can be used as regular array, but with two
important differences:
a) setting item's value is slightly slower.
It is O(lg capacity) instead of O(1).
b) user has access to an efficient `reduce`
operation which reduces `operation` over
a contiguous subsequence of items in the
array.
Paramters
---------
capacity: int
Total size of the array - must be a power of two.
operation: lambda obj, obj -> obj
and operation for combining elements (eg. sum, max)
must for a mathematical group together with the set of
possible values for array elements.
neutral_element: obj
neutral element for the operation above. eg. float('-inf')
for max and 0 for sum.
"""
assert capacity > 0 and capacity & (capacity - 1) == 0, "capacity must be positive and a power of 2."
self._capacity = capacity
self._value = [neutral_element for _ in range(2 * capacity)]
self._operation = operation
def _reduce_helper(self, start, end, node, node_start, node_end):
if start == node_start and end == node_end:
return self._value[node]
mid = (node_start + node_end) // 2
if end <= mid:
return self._reduce_helper(start, end, 2 * node, node_start, mid)
else:
if mid + 1 <= start:
return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)
else:
return self._operation(
self._reduce_helper(start, mid, 2 * node, node_start, mid),
self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)
)
def reduce(self, start=0, end=None):
"""Returns result of applying `self.operation`
to a contiguous subsequence of the array.
self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))
Parameters
----------
start: int
beginning of the subsequence
end: int
end of the subsequences
Returns
-------
reduced: obj
result of reducing self.operation over the specified range of array elements.
"""
if end is None:
end = self._capacity
if end < 0:
end += self._capacity
end -= 1
return self._reduce_helper(start, end, 1, 0, self._capacity - 1)
def __setitem__(self, idx, val):
# index of the leaf
idx += self._capacity
self._value[idx] = val
idx //= 2
while idx >= 1:
self._value[idx] = self._operation(
self._value[2 * idx],
self._value[2 * idx + 1]
)
idx //= 2
def __getitem__(self, idx):
assert 0 <= idx < self._capacity
return self._value[self._capacity + idx]
class SumSegmentTree(SegmentTree):
def __init__(self, capacity):
super(SumSegmentTree, self).__init__(
capacity=capacity,
operation=operator.add,
neutral_element=0.0
)
def sum(self, start=0, end=None):
"""Returns arr[start] + ... + arr[end]"""
return super(SumSegmentTree, self).reduce(start, end)
def find_prefixsum_idx(self, prefixsum):
"""Find the highest index `i` in the array such that
sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum
if array values are probabilities, this function
allows to sample indexes according to the discrete
probability efficiently.
Parameters
----------
perfixsum: float
upperbound on the sum of array prefix
Returns
-------
idx: int
highest index satisfying the prefixsum constraint
"""
assert 0 <= prefixsum <= self.sum() + 1e-5
idx = 1
while idx < self._capacity: # while non-leaf
if self._value[2 * idx] > prefixsum:
idx = 2 * idx
else:
prefixsum -= self._value[2 * idx]
idx = 2 * idx + 1
return idx - self._capacity
class MinSegmentTree(SegmentTree):
def __init__(self, capacity):
super(MinSegmentTree, self).__init__(
capacity=capacity,
operation=min,
neutral_element=float('inf')
)
def min(self, start=0, end=None):
"""Returns min(arr[start], ..., arr[end])"""
return super(MinSegmentTree, self).reduce(start, end)
class ReplayBuffer(object):
def __init__(self, size):
"""Create Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._storage = []
self._maxsize = size
self._next_idx = 0
def __len__(self):
return len(self._storage)
def push(self, state, action, reward, next_state, done):
data = (state, action, reward, next_state, done)
if self._next_idx >= len(self._storage):
self._storage.append(data)
else:
self._storage[self._next_idx] = data
self._next_idx = (self._next_idx + 1) % self._maxsize
def _encode_sample(self, idxes):
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done = data
obses_t.append(np.array(obs_t, copy=False))
actions.append(np.array(action, copy=False))
rewards.append(reward)
obses_tp1.append(np.array(obs_tp1, copy=False))
dones.append(done)
return np.array(obses_t), np.array(actions), np.array(rewards), np.array(obses_tp1), np.array(dones)
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)]
return self._encode_sample(idxes)
class PrioritizedReplayBuffer(ReplayBuffer):
def __init__(self, size, alpha):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
alpha: float
how much prioritization is used
(0 - no prioritization, 1 - full prioritization)
See Also
--------
ReplayBuffer.__init__
"""
super(PrioritizedReplayBuffer, self).__init__(size)
assert alpha > 0
self._alpha = alpha
it_capacity = 1
while it_capacity < size:
it_capacity *= 2
self._it_sum = SumSegmentTree(it_capacity)
self._it_min = MinSegmentTree(it_capacity)
self._max_priority = 1.0
def push(self, *args, **kwargs):
"""See ReplayBuffer.store_effect"""
idx = self._next_idx
super(PrioritizedReplayBuffer, self).push(*args, **kwargs)
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha
def _sample_proportional(self, batch_size):
res = []
for _ in range(batch_size):
# TODO(szymon): should we ensure no repeats?
mass = random.random() * self._it_sum.sum(0, len(self._storage) - 1)
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return res
def sample(self, batch_size, beta):
"""Sample a batch of experiences.
compared to ReplayBuffer.sample
it also returns importance weights and idxes
of sampled experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
beta: float
To what degree to use importance weights
(0 - no corrections, 1 - full correction)
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
weights: np.array
Array of shape (batch_size,) and dtype np.float32
denoting importance weight of each sampled transition
idxes: np.array
Array of shape (batch_size,) and dtype np.int32
idexes in buffer of sampled experiences
"""
assert beta > 0
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self._storage)) ** (-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self._storage)) ** (-beta)
weights.append(weight / max_weight)
weights = np.array(weights)
encoded_sample = self._encode_sample(idxes)
return tuple(list(encoded_sample) + [weights, idxes])
def update_priorities(self, idxes, priorities):
"""Update priorities of sampled transitions.
sets priority of transition at index idxes[i] in buffer
to priorities[i].
Parameters
----------
idxes: [int]
List of idxes of sampled transitions
priorities: [float]
List of updated priorities corresponding to
transitions at the sampled idxes denoted by
variable `idxes`.
"""
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self._storage)
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority)
| [] |
2024-01-10 | vmurahari3/QualEval | utils~misc_utils.py | import openai
import numpy as np
import random
def authenticate(args):
with open(args.api_key) as f:
api_key = f.readlines()[0].strip()
openai.api_key = api_key
return api_key
def seed_function(args):
random.seed(args.seed)
np.random.seed(args.seed)
def get_prompt(
args,
train_dataset,
instruction_template,
demonstration_template,
demonstration_sep="\n",
):
if not args.few_shot:
prompt = instruction_template
else:
# Include examples in the prompt
assert (
train_dataset is not None
), "Want to do few-shot, but no train dataset provided"
# Sample some examples from the train dataset
collated_demonstrations = ""
cols = train_dataset.column_names
for example_id in range(len(train_dataset[cols[0]])):
example = {col: train_dataset[col][example_id] for col in cols}
cur_demonstration = demonstration_template.format(**example)
collated_demonstrations = (
collated_demonstrations + demonstration_sep + cur_demonstration
)
prompt = "{}\n{}".format(instruction_template, collated_demonstrations)
return prompt, collated_demonstrations
| [
"PLACEHOLDER\n"
] |
2024-01-10 | vmurahari3/QualEval | get_dashboard.py | from utils.args import add_args
from utils.plotting_utils import vconcat_resize
from utils.misc_utils import authenticate, seed_function
import logging
import os
import pandas as pd
import numpy as np
import inflect
import openai
import random
import matplotlib.pyplot as plt
from matplotlib import rcParams, font_manager
import matplotlib as mpl
import seaborn as sns
import argparse
import json
from scipy.optimize import linear_sum_assignment, linprog
from PIL import Image
import cv2
import zipfile
from utils.templates import PROFICIENCY_METRICS, LABEL_KEY, TASK_INSTRUCTIONS
inflect_engine = inflect.engine()
font_size = 26
# Create a Matplotlib Font object from our `.ttf` file
font = font_manager.FontEntry(fname=str("fonts/Roboto-Regular.ttf"), name="roboto")
# Register this object with Matplotlib's ttf list
font_manager.fontManager.ttflist.append(font)
rc = {}
rc["font.family"] = "roboto"
rcParams.update(rc)
PROFICIENCY_FILTER_THRESHOLD = {}
PROFICIENCY_FILTER_THRESHOLD["mbpp"] = 1
PROFICIENCY_FILTER_THRESHOLD["knkarthick_dialogsum"] = 0.25
PROFICIENCY_FILTER_THRESHOLD["mmlu_biology"] = 1
CLASSIFICATION_TASKS = ["mmlu_biology", "medmcqa"]
# pretty labels
PRETTY_LABELS = {}
PRETTY_LABELS["mbpp"] = {}
PRETTY_LABELS["mbpp"]["subtask"] = {
"Parse natural language description": "Parse Description",
"Understand test cases": "Understand Test Cases",
"Handle data types and structures": "Handle Data Types",
"Implement mathematical operations": "Implement Math Operations",
"Handling loops and conditionals": "Handle Loops/If-Else",
"Manage variable assignments and data manipulation": "Variable Assignments",
"Implement algorithmic operations": "Implement Algorithms",
"Handle exception and error cases": "Handle Exceptions & Errors",
"Optimize for efficiency and readability": "Optimize for Efficiency",
"Validate against test cases": "Validate Against Test Cases",
"Generate Python syntax": "Generate Python Syntax",
"Manipulate arrays and lists": "Manipulate Arrays & Lists",
"Handle edge cases or special scenarios": "Handle Edge Cases",
"Extract and store arrays from function parameters": "Extract & Store Arrays",
}
PRETTY_LABELS["mbpp"]["domain"] = {
"Mathematical Operations": "Mathematical Operations",
"String Manipulation": "String Manip.",
"List Manipulation": "List Manip.",
"Conditional Statements": "Conditional Statements",
"Data Processing": "Data Processing",
"Sorting": "Sorting",
"Number Manipulation": "Number Manip.",
"Tuple Manipulation": "Tuple Manip.",
"Boolean Operations": "Bool Operations",
"Geometric Calculations": "Geometric Calculations",
"Text Pattern Matching": "Text Pattern Matching",
"Array Manipulation": "Array Manip.",
"File Handling": "File Handling",
"Data Validation": "Data Validation",
"Sequence Analysis": "Sequence Analysis",
}
PRETTY_LABELS["knkarthick_dialogsum"] = {}
PRETTY_LABELS["knkarthick_dialogsum"]["subtask"] = {
"Identifying the participants in the conversation": "Identify the participants",
"Understanding the topic of discussion": "Understand the topic",
"Extracting key information or important details": "Extract key information",
"Summarizing the conversation concisely": "Summarize concisely",
"Recognizing the roles and relationships of the speakers": "Recognize roles",
"Comprehending specific statements or questions": "Comprehend specific statements",
"Interpreting instructions or suggestions": "Interpret instructions/suggestions",
"Identifying requests for information or clarification": "Identify requests for information",
"Extracting important information and questions": "Extract important questions",
"Understanding the conversational context": "Understand conversational context",
"Recognizing the main topic of conversation": "Recognize main topic",
"Noting suggestions, recommendations, or solutions proposed": "Note suggestions/solution",
"Extracting information about language proficiency or qualifications": "Extract language proficiency",
"Recognizing and interpreting emotions": "Recognize & interpret emotions",
"Extracting relevant details": "Extract relevant details",
}
PRETTY_LABELS["knkarthick_dialogsum"]["domain"] = {
"Dating and relationships": "Dating & relationships",
"Outdoor activities and sports": "Outdoor activities",
"Career and job interviews": "Career & job interviews",
"Food and restaurant ordering": "Food & restaurant ordering",
"Environmental issues and pollution": "Environmental issues",
"Social interactions and personal relationships": "Social interactions",
"Leisure and recreation": "Leisure & recreation",
"Employment and professional skills": "Employment & professional skills",
"Food and hospitality": "Food & hospitality",
"Environmental conservation and sustainability": "Environmental sustainability",
"Movie preferences and plans": "Movie preferences & plans",
"Sports and live events": "Sports & live events",
"Fashion and clothing choices": "Fashion & clothing choices",
"Education": "Education",
"Work environment": "Work environment",
}
PRETTY_LABELS["mmlu_biology"] = {}
PRETTY_LABELS["mmlu_biology"]["subtask"] = {
"Understanding and interpreting clinical information": "Interpret clinical info",
"Identifying and categorizing symptoms, conditions, and diseases": "Identify symptoms",
"Analyzing and processing medical test results": "Analyze medical tests",
"Recommending appropriate treatments and interventions based on patient-specific factors": "Recommend appropriate treatment",
"Providing accurate and relevant information to healthcare professionals and patients": "Provide acc. info",
"Understanding and interpreting multiple choice questions": "Interpret multiple-choice ques.",
"Analyzing and selecting the correct answer choice": "Analyze answer choice",
"Recognizing key terms and concepts in clinical biology": "Recognize concepts",
"Identifying patterns and relationships between questions and answers": "Identify patterns b/w Ques. and Ans.",
"Retaining and applying knowledge from example data to new questions and answers": "Apply knowledge",
"Understanding and classifying pH levels": "Understanding and classifying pH levels",
"Providing information and reminders about medication administration and potential side effects": "Providing information and reminders about medication administration and potential side effects",
"Suggesting the appropriate size of cannula for specific medical interventions such as blood transfusions": "Suggesting the appropriate size of cannula for specific medical interventions",
"Applying domain-specific knowledge to select the most appropriate answer choice": "Apply domain-specific knowledge",
"Identifying potential drug interactions": "Identify potential drug interactions",
}
PRETTY_LABELS["mmlu_biology"]["domain"] = {
"Cell Biology": "Cell Biology",
"Neurology": "Neurology",
"Biochemistry": "Biochemistry",
"Physiology": "Physiology",
"Pharmacology": "Pharmacology",
"Clinical biology": "Clinical biology",
"Diagnostic tests": "Diagnostic tests",
"Treatment options": "Treatment options",
"Anatomy and physiology": "Anatomy & physiology",
"Medical procedures and interventions": "Medical procedures",
"Genetics and heredity": "Genetics and heredity",
"Dermatology": "Dermatology",
"Urology": "Urology",
"Respiratory medicine": "Respiratory medicine",
"Wound healing and surgery": "Wound healing & surgery",
}
def get_dataset_assignment_LP(
args,
all_category_elements_importance_scores,
ground_truth_scores,
categories,
max_assignments_per_data_point=2,
slack=0.1,
):
assignments = {}
for category in categories:
# filter based on category
category_gt_scores = ground_truth_scores[
ground_truth_scores["category_type"] == category
]
# if we don't have a complete graph (scores for some missing categories), do a join and assign a -1 score to missing categories
# Fill missing values with default score
default_score = -1
category_gt_scores["score"] = category_gt_scores["score"].fillna(default_score)
category_gt_scores_pivoted = category_gt_scores.pivot(
index="id", columns="category", values="score"
).fillna(default_score)
category_gt_scores_np = category_gt_scores_pivoted.values
num_data_points, num_category_elements = category_gt_scores_np.shape
# duplicate the columns based on the importance scores
category_elements_importance_scores = all_category_elements_importance_scores[
category
]
# align the importance scores categories with the columns of the gt scores
category_elements_importance_scores = (
category_elements_importance_scores.reindex(
category_gt_scores_pivoted.columns
)
)
category_elements_importance_scores_np = (
category_elements_importance_scores.values
)
category_elements_importance_scores_np = (
category_elements_importance_scores_np
/ np.sum(category_elements_importance_scores_np)
)
num_slots_per_category_element = np.floor(
category_elements_importance_scores_np * num_data_points
).astype(int)
# the number of slots might not add up to the number of data points
# distrbute the remaining slots randomly
num_slots_remaining = num_data_points - np.sum(num_slots_per_category_element)
if num_slots_remaining > 0:
num_slots_per_category_element = (
num_slots_per_category_element
+ np.random.multinomial(
num_slots_remaining,
np.ones(num_category_elements) / num_category_elements,
)
)
num_slots_per_category_element = (
num_slots_per_category_element * max_assignments_per_data_point
)
# add some slack
assert slack >= 0 and slack <= 1
num_slots_per_category_element_ub = num_slots_per_category_element + np.floor(
slack * num_slots_per_category_element
)
num_slots_per_category_element_lb = num_slots_per_category_element - np.floor(
slack * num_slots_per_category_element
)
# construct the linear program
# decision variables
# x_ij = 1 if category element j is assigned to data point i
# x_ij = 0 otherwise
# objective function
# max sum_i sum_j x_ij * score_ij
# i = [1, num_data_points]
# j = [1, num_category_elements]
# constraints
# sum_j x_ij = 2 for all i
# sum_i x_ij = num_slots_per_category_element[j] * (1 +- slack) for all j
# flexible solver
# x_ij = {0,1}
# score_ij = [1,5]
num_category_elements = category_gt_scores_np.shape[1]
num_data_points = category_gt_scores_np.shape[0]
# cost vector
c = category_gt_scores_np.flatten()
A = np.zeros(
(
num_data_points + num_category_elements + num_category_elements,
num_data_points * num_category_elements,
)
)
b = np.zeros(num_data_points + num_category_elements + num_category_elements)
# constraint 1
for i in range(num_data_points):
A[i, i * num_category_elements : (i + 1) * num_category_elements] = 1
b[i] = max_assignments_per_data_point
# constraint 2 -- upper bound
for j in range(num_category_elements):
A[num_data_points + j, j::num_category_elements] = 1
b[num_data_points + j] = num_slots_per_category_element_ub[j]
# constraint 2 -- lower bound
for j in range(num_category_elements):
A[
num_data_points + num_category_elements + j, j::num_category_elements
] = -1
b[
num_data_points + num_category_elements + j
] = -num_slots_per_category_element_lb[j]
# solve the linear program
res = linprog(-c, A_ub=A, b_ub=b, bounds=(0, 1), integrality=1)
# get the assignments
reshaped_assignments = res.x.reshape(num_data_points, num_category_elements)
assert np.all(
np.logical_or(reshaped_assignments == 0, reshaped_assignments == 1)
)
assignment = {}
for j in range(num_category_elements):
non_zeros_data_points = np.nonzero(reshaped_assignments[:, j] == 1)
assignment[
category_gt_scores_pivoted.columns[j]
] = category_gt_scores_pivoted.index[non_zeros_data_points].tolist()
assert (
len(assignment[category_gt_scores_pivoted.columns[j]])
<= num_slots_per_category_element_ub[j]
)
assert (
len(assignment[category_gt_scores_pivoted.columns[j]])
>= num_slots_per_category_element_lb[j]
)
print(
"Number of assignments for category element",
category_gt_scores_pivoted.columns[j],
f"{len(assignment[category_gt_scores_pivoted.columns[j]])} {num_slots_per_category_element_ub[j]} {num_slots_per_category_element_lb[j]}",
)
assignments[category] = assignment
return assignments
def preprocessing(args, all_scores_generations, all_scores_gt, proficiency_scores):
categories = args.categories.split(",")
# rename the columns to category
for category in categories:
category_df = all_scores_generations[category]
category_df.rename(columns={category: "category"}, inplace=True)
category_df["category_type"] = category
category_df_gt = all_scores_gt[category]
category_df_gt.rename(columns={category: "category"}, inplace=True)
category_df_gt["category_type"] = category
# merge all categories into a single dataframe
all_scores_generations_merged = pd.concat(all_scores_generations.values())
all_scores_gt_merged = pd.concat(all_scores_gt.values())
# assert no empty generations, and assert scores in range [0,5]
assert np.all(all_scores_generations_merged["generation"] != "")
assert np.all(all_scores_gt_merged["generation"] != "")
assert np.all(all_scores_generations_merged["score"] >= 0)
assert np.all(all_scores_generations_merged["score"] <= 5)
assert np.all(all_scores_gt_merged["score"] >= 0)
assert np.all(all_scores_gt_merged["score"] <= 5)
assert len(all_scores_generations_merged) == len(all_scores_gt_merged)
# assert number of unique ids in generations and gt are the same
# assert number of unique ids in proficiency scores and gt are the same
assert np.all(
np.unique(all_scores_generations_merged["id"].values)
== np.unique(all_scores_gt_merged["id"].values)
)
assert np.all(
np.unique(all_scores_generations_merged["id"].values)
== np.unique(proficiency_scores.index.values)
)
pruned_category_elements = {}
for category in categories:
category_df_gt = all_scores_gt_merged[
all_scores_gt_merged["category_type"] == category
]
# find mean score for different elements in the category with pd groupby
grouped_category_df_gt = category_df_gt.groupby(category_df_gt["category"])
scores_per_category_type = grouped_category_df_gt["score"].mean()
top_10_category_elements = scores_per_category_type.sort_values(ascending=False)
top_10_category_elements = top_10_category_elements[:10]
top_10_category_elements.index = top_10_category_elements.index.str.split(
":"
).str.get(0)
pruned_category_elements[category] = top_10_category_elements
# prune the generation score to only contain the top 10 category elements
pruned_generation_scores = []
for category in categories:
category_df = all_scores_generations_merged[
all_scores_generations_merged["category_type"] == category
]
category_df["category"] = category_df["category"].str.split(":").str.get(0)
category_df = category_df[
category_df["category"].isin(pruned_category_elements[category].index)
]
pruned_generation_scores.append(category_df)
# pruned GT scores as well
pruned_gt_scores = []
for category in categories:
category_df = all_scores_gt_merged[
all_scores_gt_merged["category_type"] == category
]
category_df["category"] = category_df["category"].str.split(":").str.get(0)
category_df = category_df[
category_df["category"].isin(pruned_category_elements[category].index)
]
pruned_gt_scores.append(category_df)
# merge all categories into a single dataframe
all_scores_gt_merged_pruned = pd.concat(pruned_gt_scores)
all_scores_generations_merged_pruned = pd.concat(pruned_generation_scores)
return (
all_scores_generations_merged_pruned,
all_scores_gt_merged_pruned,
pruned_category_elements,
)
def get_gt_breakdown(args, all_scores_gt):
categories = args.categories.split(",")
fig, axes = plt.subplots(nrows=1, ncols=len(categories), figsize=(40, 9))
# visualize the scores for these category elements
for i, category in enumerate(categories):
category_df_gt = all_scores_gt[all_scores_gt["category_type"] == category]
# find mean score for different elements in the category with pd groupby
grouped_category_df_gt = category_df_gt.groupby(category_df_gt["category"])
scores_per_category_type = grouped_category_df_gt["score"].mean()
# setting title etc.
scores_per_category_type = scores_per_category_type.sort_values(ascending=True)
qualitative_colors = sns.color_palette("husl", 10)
sns.set_theme(style="white")
sns.set_palette(qualitative_colors)
sns.set_style("white")
labels = scores_per_category_type.index
labels_split = []
for label in labels:
label = label.strip()
if args.pretty_plot:
label = PRETTY_LABELS[args.task_name][category][label]
else:
label_words = label.split()
label = "\n".join(
[
" ".join(label_words[: len(label_words) // 2]),
" ".join(label_words[len(label_words) // 2 :]),
]
)
labels_split.append(label)
axes[i].pie(
x=scores_per_category_type.values,
labels=labels_split,
colors=qualitative_colors,
autopct="%1.0f%%",
startangle=90,
textprops={"fontsize": font_size},
pctdistance=0.80,
explode=[0.05] * len(scores_per_category_type),
)
# add labels
axes[i].set_title(
f"{inflect_engine.plural_noun(category.capitalize())}",
fontsize=1.5 * font_size,
)
hole = plt.Circle((0, 0), 0.65, facecolor="white")
axes[i].add_patch(hole)
# save the scores for each category element
scores_per_category_type.to_csv(
os.path.join(
args.input_dir_generation_scores,
f"gt_scores_per_category_element_{category}.csv",
)
)
fig.suptitle("Prior over categories", fontsize=2 * font_size)
plt.tight_layout(h_pad=2, w_pad=2, pad=2)
plt.savefig(
os.path.join(args.input_dir_generation_scores, "gt_breakdown.pdf"),
dpi=300,
transparent=True,
)
plt.savefig(
os.path.join(args.input_dir_generation_scores, "gt_breakdown.png"),
dpi=300,
transparent=True,
)
def get_correlation_breakdown(
args, all_scores_generations, all_scores_gt, proficiency_scores
):
# initialize the reportcard plot
categories = args.categories.split(",")
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(15, 12))
# visualize the scores for these category elements
color_dict = {"subtask": ["#ffc8c8", "#ff5858"], "domain": ["#bbdefb", "#2196f3"]}
for i, category in enumerate(["subtask"]):
generation_correlations = {}
category_df_gt = all_scores_gt[all_scores_gt["category_type"] == category]
category_df_generations = all_scores_generations[
all_scores_generations["category_type"] == category
]
# iterate over the category elements
# for each category element, find the correlation between the gt and generation scores
for category_element in category_df_gt["category"].unique():
# filter based on category element
category_element_df_gt = category_df_gt[
category_df_gt["category"] == category_element
]
category_element_df_generations = category_df_generations[
category_df_generations["category"] == category_element
]
# sort both the dataframes based on the id
category_element_df_gt = category_element_df_gt.sort_values(by="id")
category_element_df_generations = (
category_element_df_generations.sort_values(by="id")
)
# group by id to get proficiency score for each generation
category_element_df_generations = category_element_df_generations.join(
proficiency_scores,
on="id",
how="inner",
rsuffix="_proficiency",
)
filter_index = (
category_element_df_generations["proficiency_score"]
>= PROFICIENCY_FILTER_THRESHOLD[args.task_name]
)
category_element_df_generations = category_element_df_generations[
filter_index
]
category_element_df_gt = category_element_df_gt[
category_element_df_gt["id"].isin(
category_element_df_generations["id"].values
)
]
assert np.all(
category_element_df_gt["id"].values
== category_element_df_generations["id"].values
)
# filter based on proficiency scores
num_intersection = np.sum(
np.abs(
category_element_df_gt["score"].values
- category_element_df_generations["score"].values
)
>= 2
)
correlation = num_intersection / len(category_element_df_gt)
generation_correlations[category_element] = correlation
# plot the scores for each category element
generation_correlations = pd.DataFrame.from_dict(
generation_correlations, orient="index"
)
generation_correlations.columns = ["score"]
# sort based on score before plotting
generation_correlations = generation_correlations.sort_values(
by="score", ascending=True
)
if args.pretty_plot:
# remove the two rows in the middle of the data frame
middle_index = len(generation_correlations) // 2
generation_correlations = pd.concat(
[
generation_correlations.iloc[: middle_index - 1],
generation_correlations.iloc[middle_index + 1 :],
]
)
labels = generation_correlations.index
labels_split = []
for label in labels:
label = label.strip()
if args.pretty_plot:
label = PRETTY_LABELS[args.task_name][category][label]
else:
label_words = label.split()
label = "\n".join(
[
" ".join(label_words[: len(label_words) // 2]),
" ".join(label_words[len(label_words) // 2 :]),
]
)
labels_split.append(label)
# find mean score for different elements in the category with pd groupby
# rotate xticks
qualitative_colors = sns.color_palette("Set2", 10)
sns.set_theme(style="white")
sns.set_palette(qualitative_colors)
sns.set_style("white")
colours = color_dict[category]
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"colour_map", colours, N=256
)
norm = mpl.colors.Normalize(
generation_correlations["score"].min(),
generation_correlations["score"].max(),
) # linearly normalizes data into the [0.0, 1.0] interval
bar_plot = sns.barplot(
ax=axes,
x=labels_split,
y=generation_correlations["score"],
palette=cmap(norm(generation_correlations["score"])),
linewidth=2,
)
# add labels
axes.tick_params(axis="y", labelsize=font_size)
axes.set_ylabel("Distance", fontsize=font_size, labelpad=20)
axes.set_ylim(
max(generation_correlations["score"].min() - 0.05, 0),
# min(generation_correlations["score"].max(), 1),
1,
)
xlabels = axes.get_xticklabels()
axes.spines[["right", "top", "left"]].set_visible(False)
axes.spines["bottom"].set_linewidth(1.5)
axes.spines["bottom"].set_color("grey")
# loop through bars and add annotations
for j, bar in enumerate(bar_plot.patches):
# Get the x-coordinate of the bar
x = bar.get_x()
# Get the y-coordinate of the bar
y = bar.get_y()
# add the text
axes.text(
x=x + bar.get_width() / 2,
y=y + bar.get_height() + 0.01,
s=xlabels[j].get_text(),
ha="center",
va="bottom",
fontsize=font_size,
rotation=90,
multialignment="left",
)
axes.set_xticklabels([])
# save the scores for each category element
generation_correlations.to_csv(
os.path.join(
args.input_dir_generation_scores,
f"generation_correlations_{category}.csv",
)
)
fig.suptitle(
"Alignment between usage of skills",
fontsize=1.5 * font_size,
)
plt.tight_layout(h_pad=2, w_pad=2, pad=2)
plt.savefig(
os.path.join(args.input_dir_generation_scores, "correlation_breakdown.pdf"),
dpi=300,
transparent=True,
)
plt.savefig(
os.path.join(args.input_dir_generation_scores, "correlation_breakdown.png"),
dpi=300,
transparent=True,
)
def get_proficiency_breakdown(args, all_scores_gt, proficiency_scores):
# initialize the reportcard plot
# get the LP assignments
categories = args.categories.split(",")
fig, axes = plt.subplots(ncols=len(categories), figsize=(30, 18), sharey=True)
categories_importance_scores = {}
for category in categories:
category_df_gt = all_scores_gt[all_scores_gt["category_type"] == category]
# find mean score for different elements in the category with pd groupby
grouped_category_df_gt = category_df_gt.groupby(category_df_gt["category"])
scores_per_category_type = grouped_category_df_gt["score"].mean()
categories_importance_scores[category] = scores_per_category_type
dataset_assignments = get_dataset_assignment_LP(
args,
categories_importance_scores,
all_scores_gt,
categories,
max_assignments_per_data_point=2,
)
color_dict = {"subtask": ["#F4D941", "#EC8235"], "domain": ["#bbdefb", "#2196f3"]}
# now we have the assignments for each category element
for i, category in enumerate(categories):
cur_assignment = dataset_assignments[category]
generation_scores_with_assignments = {}
qualitative_samples = pd.DataFrame()
for category_element in cur_assignment:
# filter based on assignments
# the index of proficiency scores is the id
cur_proficiency_scores = proficiency_scores[
proficiency_scores.index.isin(cur_assignment[category_element])
]
generation_scores_with_assignments[category_element] = [
cur_proficiency_scores["proficiency_score"].mean(),
len(cur_proficiency_scores),
]
# output some qualitative samples
# get the top 3 and bottom 3 generations for each category element
top_generations = cur_proficiency_scores.sort_values(
by="proficiency_score", ascending=False
)[:3]
top_generations["category_element"] = category_element
bottom_generations = cur_proficiency_scores.sort_values(
by="proficiency_score", ascending=True
)[:3]
bottom_generations["category_element"] = category_element
qualitative_samples = pd.concat(
[qualitative_samples, top_generations, bottom_generations]
)
# average the scores for each category element given the assignments
# plot the scores for each category element
generation_scores_with_assignments_df = pd.DataFrame.from_dict(
generation_scores_with_assignments, orient="index"
)
generation_scores_with_assignments_df.columns = ["score", "num_samples"]
# sort based on score before plotting
generation_scores_with_assignments_df = (
generation_scores_with_assignments_df.sort_values(
by="score", ascending=True
)
)
generation_scores_with_assignments_df.to_csv(
os.path.join(
args.input_dir_generation_scores,
f"generation_scores_with_assignments_{category}.csv",
)
)
if args.pretty_plot:
# remove the two rows in the middle of the data frame
middle_index = len(generation_scores_with_assignments_df) // 2
generation_scores_with_assignments_df = pd.concat(
[
generation_scores_with_assignments_df.iloc[: middle_index - 1],
generation_scores_with_assignments_df.iloc[middle_index + 1 :],
]
)
labels = generation_scores_with_assignments_df.index
labels_split = []
for label in labels:
label = label.strip()
if args.pretty_plot:
label = PRETTY_LABELS[args.task_name][category][label]
else:
label_words = label.split()
label = "\n".join(
[
" ".join(label_words[: len(label_words) // 2]),
" ".join(label_words[len(label_words) // 2 :]),
]
)
labels_split.append(label)
qualitative_colors = sns.color_palette("Set2", 10)
sns.set_theme(style="white")
sns.set_palette(qualitative_colors)
sns.set_style("white")
colours = color_dict[category]
cmap = mpl.colors.LinearSegmentedColormap.from_list(
"colour_map", colours, N=256
)
norm = mpl.colors.Normalize(
generation_scores_with_assignments_df["score"].min(),
generation_scores_with_assignments_df["score"].max(),
) # linearly normalizes data into the [0.0, 1.0] interval
sns.barplot(
ax=axes[i],
x=labels_split,
y=generation_scores_with_assignments_df["score"],
capsize=10,
errwidth=10,
palette=cmap(norm(generation_scores_with_assignments_df["score"])),
)
# add labels
axes[i].tick_params(axis="y", labelsize=font_size * 1.3)
axes[i].set_title(
f"{inflect_engine.plural_noun(category.capitalize())}",
fontsize=font_size * 1.9,
)
ylabel = args.proficiency_metric
axes[i].set_ylabel(ylabel, ha="center", fontsize=font_size * 1.4, labelpad=20)
axes[i].set_ylim(
max(generation_scores_with_assignments_df["score"].min() - 0.1, 0),
min(generation_scores_with_assignments_df["score"].max() + 0.1, 1),
)
xlabels = axes[i].get_xticklabels()
axes[i].set_xticklabels(
xlabels,
rotation=90,
ha="center",
fontsize=font_size * 1.6,
multialignment="right",
)
axes[i].spines[["right", "top", "left"]].set_visible(False)
axes[i].spines["bottom"].set_linewidth(1.5)
axes[i].spines["bottom"].set_color("grey")
# store the qualitative samples
qualitative_samples.to_csv(
os.path.join(
args.input_dir_generation_scores,
f"qualitative_samples_{category}.csv",
)
)
# dump the LP assignments
index_2_category_element = {}
for category_element in cur_assignment:
for index in cur_assignment[category_element]:
if index not in index_2_category_element:
index_2_category_element[index] = []
index_2_category_element[index].append(category_element)
index_2_category_element_df = pd.DataFrame.from_dict(
index_2_category_element, orient="index"
)
index_2_category_element_df = index_2_category_element_df.join(
proficiency_scores_df, how="inner"
)
index_2_category_element_df = index_2_category_element_df.sort_index()
index_2_category_element_df.to_csv(
os.path.join(
args.input_dir_generation_scores,
f"index_2_category_element_{category}.csv",
)
)
fig.suptitle("Proficiency by category", fontsize=2 * font_size, font="roboto")
plt.tight_layout(h_pad=2, w_pad=2, pad=2)
plt.savefig(
os.path.join(args.input_dir_generation_scores, "proficiency_breakdown.pdf"),
dpi=300,
transparent=True,
)
plt.savefig(
os.path.join(args.input_dir_generation_scores, "proficiency_breakdown.png"),
dpi=300,
transparent=True,
)
def get_nl_summary(args):
# show the list of categories and category elements
# list the breakdown of the category elements as json
# list the breakdown of the proficiency scores as json
# list the breakdown of the correlation scores as json if they exist
# prompt the model to generate a NL summary
task_instruction = TASK_INSTRUCTIONS[args.task_name]
categories = args.categories.split(",")
gt_breakdown = {}
proficiency_scores = {}
correlation_scores = {}
category_elements = {}
for category in categories:
# load the ground truth breakdown
gt_breakdown[category] = pd.read_csv(
os.path.join(
args.input_dir_generation_scores,
f"gt_scores_per_category_element_{category}.csv",
),
)
proficiency_scores[category] = pd.read_csv(
os.path.join(
args.input_dir_generation_scores,
f"generation_scores_with_assignments_{category}.csv",
),
)
if os.path.exists(
os.path.join(
args.input_dir_generation_scores,
f"generation_correlations_{category}.csv",
),
):
correlation_scores[category] = pd.read_csv(
os.path.join(
args.input_dir_generation_scores,
f"generation_correlations_{category}.csv",
),
)
category_elements[category] = (
gt_breakdown[category]["category"].unique().tolist()
)
# prepare the dataframes for the NL summary
proficiency_scores[category] = proficiency_scores[category].set_index(
"Unnamed: 0"
)
proficiency_scores[category] = proficiency_scores[category]["score"].to_dict()
gt_breakdown[category] = gt_breakdown[category].set_index("category")
gt_breakdown[category] = gt_breakdown[category]["score"].to_dict()
if category in correlation_scores:
correlation_scores[category] = correlation_scores[category].set_index(
"Unnamed: 0"
)
correlation_scores[category] = correlation_scores[category][
"score"
].to_dict()
# compose request to LLM
task_instruction_message = f"A machine learning model is tasked with the following task: \n f{task_instruction}"
category_list = {
category: "\n".join(category_elements[category]) for category in categories
}
category_message = [
f"These are the {inflect_engine.plural_noun(category)} for the task:\n {category_list[category]}"
for category in categories
]
category_message = "\n\n".join(category_message)
gt_breakdown_message = [
f"In the evaluation data, these are the importance scores of the {inflect_engine.plural_noun(category)}:\n {json.dumps(gt_breakdown[category])}"
for category in categories
]
gt_breakdown_message = "\n\n".join(gt_breakdown_message)
proficiency_scores_message = [
f"The following scores show how well the model performs on the {inflect_engine.plural_noun(category)}: {json.dumps(proficiency_scores[category])}"
for category in categories
]
proficiency_scores_message = "\n\n".join(proficiency_scores_message)
if args.task_name not in CLASSIFICATION_TASKS:
correlation_scores_message = [
f"The following distance demonstrates how much the {inflect_engine.plural_noun(category)} are actually used for generating the output when they are requried to generate the input. Therefore, a low distance implies that the model is utilizing the category when it needs to: {json.dumps(correlation_scores[category])}. [Important] Lower distance implies the {category} is leveraged when it needs to be used."
for category in ["subtask"]
]
correlation_scores_message = "\n\n".join(correlation_scores_message)
else:
correlation_scores_message = ""
summarization_message = "Given the above information, please write a brief summary highlighting important information. Please be precise and concise but please be comprehensive."
system_prompt = "Given a holistic picture of the performance of a machine learning model, you are asked to summarize the model's overall performance."
try:
response = openai.ChatCompletion.create(
model=args.model,
messages=[
{"role": "system", "content": system_prompt},
{
"role": "user",
"content": task_instruction_message,
},
{
"role": "user",
"content": category_message,
},
{
"role": "user",
"content": gt_breakdown_message,
},
{
"role": "user",
"content": proficiency_scores_message,
},
{
"role": "user",
"content": correlation_scores_message,
},
{
"role": "user",
"content": summarization_message,
},
],
temperature=args.temperature,
max_tokens=1700,
top_p=args.top_p,
frequency_penalty=args.frequency_penalty,
presence_penalty=args.presence_penalty,
)
except:
print(
"exception encountered while creating pruned set of categories, skipping this iteration"
)
return
if (
"error" in response
or "choices" not in response
or len(response["choices"]) == 0
):
return
response_text = response["choices"][0]["message"]["content"]
# save the response
with open(
os.path.join(args.input_dir_generation_scores, "NLsummary.txt"), "w"
) as f:
f.write(response_text)
print(response_text)
# save the response as an image
fig, ax = plt.subplots(figsize=(24, 12))
ax.text(
0.5,
0.5,
response_text,
horizontalalignment="center",
verticalalignment="center",
fontsize=0.9 * font_size,
wrap=True,
font="roboto",
multialignment="left",
backgroundcolor="lavender",
)
ax.axis("off")
plt.tight_layout(h_pad=2, w_pad=2, pad=2)
plt.savefig(
os.path.join(args.input_dir_generation_scores, "NLsummary.pdf"),
dpi=300,
transparent=True,
)
plt.savefig(
os.path.join(args.input_dir_generation_scores, "NLsummary.png"),
dpi=300,
transparent=True,
)
def get_reportcard(args, all_scores_generations, all_scores_gt, proficiency_scores):
# initialize the reportcard plot
categories = args.categories.split(",")
# preprocessing
all_scores_generations, all_scores_gt, pruned_category_elements = preprocessing(
args, all_scores_generations, all_scores_gt, proficiency_scores
)
# get the gt breakdown
get_gt_breakdown(args, all_scores_gt)
# get the proficiency scores for each category element
get_proficiency_breakdown(args, all_scores_gt, proficiency_scores)
# get correlation between gt and generation scores for each category elements
if args.task_name not in CLASSIFICATION_TASKS:
get_correlation_breakdown(
args, all_scores_generations, all_scores_gt, proficiency_scores
)
# get the NL summary
get_nl_summary(args)
# concatenate the different images into a single reportcard
# load images with cv2
all_images = []
gt_breakdown_image = cv2.imread(
os.path.join(args.input_dir_generation_scores, "gt_breakdown.png")
)
proficiency_breakdown_image = cv2.imread(
os.path.join(args.input_dir_generation_scores, "proficiency_breakdown.png")
)
all_images.append(gt_breakdown_image)
all_images.append(proficiency_breakdown_image)
if args.task_name not in CLASSIFICATION_TASKS:
correlation_breakdown_image = cv2.imread(
os.path.join(args.input_dir_generation_scores, "correlation_breakdown.png")
)
correlation_image_dummy = (
np.ones(
(
correlation_breakdown_image.shape[0],
correlation_breakdown_image.shape[1] // 2,
3,
)
).astype(np.uint8)
* 255
)
# horizontal concatenation of correlation breakdown and dummy image
correlation_breakdown_image = np.concatenate(
(
correlation_image_dummy,
correlation_breakdown_image,
correlation_image_dummy,
),
axis=1,
)
all_images.append(correlation_breakdown_image)
nl_summary = cv2.imread(
os.path.join(args.input_dir_generation_scores, "NLsummary.png")
)
all_images.append(nl_summary)
img_resize = vconcat_resize(all_images)
cv2.imwrite(
os.path.join(args.input_dir_generation_scores, "reportcard.png"), img_resize
)
# convert the reportcard to pdf
img = Image.open(
os.path.join(args.input_dir_generation_scores, "reportcard.png")
).convert("RGB")
img.save(
os.path.join(args.input_dir_generation_scores, "reportcard.pdf"),
save_all=True,
)
# dump a zip file with all the data
zipf = zipfile.ZipFile(
os.path.join(args.input_dir_generation_scores, "reportcard.zip"), "w"
)
with zipf:
zipf.write(
os.path.join(args.input_dir_generation_scores, "reportcard.pdf"),
"dashboard.pdf",
)
zipf.write(
os.path.join(args.input_dir_generation_scores, "gt_breakdown.pdf"),
"prior_over_categories.pdf",
)
zipf.write(
os.path.join(args.input_dir_generation_scores, "proficiency_breakdown.pdf"),
"proficiency_over_categories.pdf",
)
if args.task_name not in CLASSIFICATION_TASKS:
zipf.write(
os.path.join(
args.input_dir_generation_scores, "correlation_breakdown.pdf"
),
"distance_bw_GT_and_Output.pdf",
)
zipf.write(
os.path.join(args.input_dir_generation_scores, "NLsummary.pdf"),
"summary.pdf",
)
zipf.write(
os.path.join(args.input_dir_generation_scores, "NLsummary.txt"),
"summary.txt",
)
# also add the data files
for category in categories:
zipf.write(
os.path.join(
args.input_dir_generation_scores,
f"gt_scores_per_category_element_{category}.csv",
),
f"prior_over_category_elements_{category}.csv",
)
zipf.write(
os.path.join(
args.input_dir_generation_scores,
f"generation_scores_with_assignments_{category}.csv",
),
f"proficiency_over_category_elements_{category}.csv",
)
if args.task_name not in CLASSIFICATION_TASKS:
if category == "subtask":
zipf.write(
os.path.join(
args.input_dir_generation_scores,
f"generation_correlations_{category}.csv",
),
f"distance_bw_GT_and_Output_{category}.csv",
)
if __name__ == "__main__":
# get the model generation prompts
parser = argparse.ArgumentParser()
parser.add_argument(
"--categories",
type=str,
default="subtask,domain",
help="Categories to use for the reportcard",
)
parser.add_argument(
"--input_dir_generation_scores",
type=str,
default="",
help="Input directory for finding the generation scores",
)
parser.add_argument(
"--generation_file",
type=str,
default="",
help="Input file for finding the generations",
)
parser.add_argument(
"--proficiency_metric",
type=str,
default="",
help="Proficiency metric to use for the reportcard",
)
parser.add_argument(
"--input_dir_gt_scores",
type=str,
default="",
help="Input directory for finding the ground truth scores",
)
parser.add_argument(
"--pretty_plot",
action="store_true",
help="Whether to use pretty plots or not",
)
parser = add_args(parser)
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=args.logging_level)
# Random seed
seed_function(args)
api_key = authenticate(args)
all_scores_generations = {}
all_scores_gt = {}
# load both the generation scores and the ground truth scores
for category in args.categories.split(","):
# get the generation score for this category
generation_score_file = os.path.join(
args.input_dir_generation_scores, f"{category}_scores.csv"
)
generation_scores = pd.read_csv(generation_score_file)
# get the ground truth score for this category
gt_score_file = os.path.join(args.input_dir_gt_scores, f"{category}_scores.csv")
gt_scores = pd.read_csv(gt_score_file)
# add to the dictionary
all_scores_generations[category] = generation_scores
all_scores_gt[category] = gt_scores
# load the proficiency scores from the generation jsonl file
proficiency_scores = {}
proficiency_metric = ""
with open(args.generation_file) as f:
assert (
args.proficiency_metric in PROFICIENCY_METRICS[args.task_name]
), "Proficiency metric not supported"
proficiency_metric = f"generation_{args.proficiency_metric}"
for line in f:
line = json.loads(line)
proficiency_scores[line["id"]] = line
# convert the proficiency scores to a dataframe
proficiency_scores_df = pd.DataFrame.from_dict(proficiency_scores, orient="index")
# rename the column
proficiency_scores_df = proficiency_scores_df.rename(
columns={proficiency_metric: "proficiency_score"}
)
# get the reportcard
get_reportcard(args, all_scores_generations, all_scores_gt, proficiency_scores_df)
| [
"Given a holistic picture of the performance of a machine learning model, you are asked to summarize the model's overall performance."
] |
2024-01-10 | vmurahari3/QualEval | score_categories.py | import re
# Insert code for taking arguments from command line
import argparse
import openai
import os
import json
import asyncio
from api_request_parallel_processor import process_api_requests_from_file
from utils.args import add_args
from utils.misc_utils import authenticate
import random
import pandas as pd
from utils.templates import SCORING_PROMPTS, SYSTEM_PROMPTS, CLASSIFICATION_TASKS
def score_generation(args, generations, category, category_dataset, scoring_prompt):
# score each element in category_dataset with every generation
# generate a pandas dataframe with the scores
# return the dataframe
# get the prompt
task_instruction = category_dataset[0]["task_instruction"]
header_prompt = f"Good Job, we will now present the output generation from the language model. \n"
# system prompt
system_prompt = SYSTEM_PROMPTS[args.task_name]
# generate scores for all prompts
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
generated_samples = []
num_api_epochs = 0
failed_to_match = 0
id_to_generation_index = {g["id"]: i for i, g in enumerate(generations)}
cur_generations = generations
while num_api_epochs < 30 and len(cur_generations) > 0:
# generate prompts for all categories and generations
failed_to_match = 0
prompt_file_path = os.path.join(
args.output_dir, f"prompt_score_{category}_{num_api_epochs}.jsonl"
)
metadata_file_path = os.path.join(
args.output_dir, f"metadata_score_{category}_{num_api_epochs}.jsonl"
)
response_file_path = os.path.join(
args.output_dir, f"response_score_{category}_{num_api_epochs}.jsonl"
)
if args.delete_old_responses:
if os.path.exists(response_file_path):
os.remove(response_file_path)
failed_to_match = 0
all_prompts = []
for generation in cur_generations:
category_prompt = "\n".join([c[category] for c in category_dataset])
# don't include the generation in the prompt for classification tasks
if (
args.task_name in CLASSIFICATION_TASKS
or not args.use_output_for_scoring
):
prompt = (
header_prompt
+ scoring_prompt
+ f"\n Make sure to score all {len(category_dataset)} {category} \n"
+ f"{category}:\n{category_prompt}"
)
else:
prompt = (
header_prompt
+ scoring_prompt
+ f"\n Make sure to score all {len(category_dataset)} {category} \n"
+ f"{category}:\n{category_prompt} \n Output: {generation[args.generation_field]} \n"
)
all_prompts.append(
{
"prompt": prompt,
"generation": generation,
"category_type": category,
"id": generation["id"],
}
)
with open(prompt_file_path, "w") as f, open(
metadata_file_path, "w"
) as metadata_f:
for prompt in all_prompts:
cur_input = prompt["generation"][args.input_field]
formatted_request = {
"model": "gpt-3.5-turbo-16k",
"messages": [
{"role": "system", "content": system_prompt},
{
"role": "user",
"content": f"This is the input text: {cur_input} \n. This is the task instruction given to a language model: {task_instruction} \n",
},
{
"role": "user",
"content": f"Please understand and note the above input and the task instruction.",
},
{"role": "user", "content": prompt["prompt"]},
],
"temperature": args.temperature,
"max_tokens": 1000,
"top_p": args.top_p,
"frequency_penalty": args.frequency_penalty,
"presence_penalty": args.presence_penalty,
}
metadata = {
"generation": prompt["generation"][args.generation_field],
"id": prompt["id"],
}
f.write(json.dumps(formatted_request))
f.write("\n")
metadata_f.write(json.dumps(metadata))
metadata_f.write("\n")
request_url = "https://api.openai.com/v1/chat/completions"
# Make API calls
asyncio.run(
process_api_requests_from_file(
prompt_file_path,
response_file_path,
request_url,
args.api_key,
args.max_requests_per_minute,
args.max_tokens_per_minute,
"cl100k_base",
args.max_attempts,
args.logging_level,
metadata_file_path,
)
)
# process the responses and save them in the data directory
error_generations = []
regex_template = r".*Score:\s*(?P<score>\d+)(?P<explanation>[\s|\S]*)"
# for failed examples, refire the api call
with open(response_file_path, "r") as f:
api_responses = [json.loads(line) for line in f]
for api_response in api_responses:
metadata = api_response[2]
api_response = api_response[1]
if "error" in api_response or "choices" not in api_response:
print("Failed to generate response")
failed_to_match += 1
error_generations.append(
generations[id_to_generation_index[metadata["id"]]]
)
continue
# parse the response with regex filtering
response_text = api_response["choices"][0]["message"]["content"]
response_scores = response_text.split("\n")
# remove empty scores
response_scores = [
s
for s in response_scores
if s and "score:" in s.lower() and "evidence" in s.lower()
]
if len(response_scores) != len(category_dataset):
print("Failed to match example: {}".format(response_text))
print(
f"Number of scores {len(response_scores)} does not match number of category items {len(category_dataset)}"
)
failed_to_match += 1
error_generations.append(
generations[id_to_generation_index[metadata["id"]]]
)
continue
explanation = ""
cur_example_scores = []
for ex_id, ex in enumerate(response_scores):
match = re.match(regex_template, ex)
if match and "score" in match.groupdict():
score = match.group("score").strip()
if "explanation" in match.groupdict():
explanation = match.group("explanation").strip()
else:
print("Failed to match example: {}".format(response_text))
error_generations.append(
generations[id_to_generation_index[metadata["id"]]]
)
failed_to_match += 1
break
output_example = {
"generation": metadata["generation"],
category: category_dataset[ex_id][category],
"score": score,
"id": metadata["id"],
"explanation": explanation,
}
cur_example_scores.append(output_example)
if len(cur_example_scores) == len(category_dataset):
generated_samples.extend(cur_example_scores)
cur_generations = error_generations
num_api_epochs += 1
print("Failed to match {} examples".format(failed_to_match))
print("Retrying with {} examples".format(len(cur_generations)))
print("Number of api epochs: {}".format(num_api_epochs))
print("Finished all epochs. Failed to match {} examples".format(failed_to_match))
# deal with the remaining examples
assert len(cur_generations) == 0, "All examples could not be processed"
output_df = pd.DataFrame.from_dict(generated_samples)
return output_df
# input json files and extract the generation_field
# score the generation_field;
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_field",
type=str,
help="The key in input file for input text",
)
parser.add_argument(
"--generation_field",
type=str,
help="The key in input file for generations",
)
parser.add_argument(
"--generation_file",
type=str,
help="The generation file path, expecting a jsonl file",
)
parser.add_argument(
"--subtask_file",
default="",
type=str,
help="The subtask file path, expecting a jsonl file",
)
parser.add_argument(
"--domain_file",
default="",
type=str,
help="The domain file path, expecting a jsonl file",
)
parser.add_argument(
"--use_output_for_scoring",
default=1,
type=int,
help="Whether to use the output for scoring",
)
parser = add_args(parser)
args = parser.parse_args()
api_key = authenticate(args)
args.api_key = api_key
# load a jsonl file
with open(args.generation_file, "r") as f:
generations = [json.loads(line) for line in f]
categories = ["subtask", "domain"]
for category in categories:
scoring_prompt = SCORING_PROMPTS[args.task_name][category]
# load options for the category
if category == "subtask":
category_file = args.subtask_file
elif category == "domain":
category_file = args.domain_file
# load category file jsonl
with open(category_file, "r") as f:
category_dataset = [json.loads(line) for line in f]
score_df = score_generation(
args, generations, category, category_dataset, scoring_prompt
)
# store the scores in args.output_dir
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
output_score_file = os.path.join(args.output_dir, f"{category}_scores.csv")
score_df.to_csv(output_score_file)
if __name__ == "__main__":
main()
| [
".*Score:\\s*(?P<score>\\d+)(?P<explanation>[\\s|\\S]*)",
"\n",
"Good Job, we will now present the output generation from the language model. \n",
"\n Make sure to score all 1 PLACEHOLDER \n",
"PLACEHOLDERPLACEHOLDER\n Make sure to score all 1 PLACEHOLDER \nPLACEHOLDER:\nPLACEHOLDER",
"This is the input text: PLACEHOLDER \n. This is the task instruction given to a language model: PLACEHOLDER \n",
"Please understand and note the above input and the task instruction.",
"[]",
"prompt_score_PLACEHOLDER_PLACEHOLDER.jsonl"
] |
2024-01-10 | 16dprice/kanga_data_gathering | send_prompt.py | import openai
openai.api_key = "MESSAGE DJ OR KATIE FOR THIS"
def send_prompt(prompt, model, num_completions=1):
completion = openai.Completion.create(
model=model,
prompt=prompt,
max_tokens=40,
temperature=0.9,
frequency_penalty=1.5,
presence_penalty=1.5,
n=num_completions,
stop=["\n"]
)
return completion
sample_prompt = """The following is a conversation between Robin and Ted. Ted has been angry lately and is upset about the cereal he has that is too sugary. Robin attempts to be compassionate and console Ted.
Ted: This cereal is too sugary! I'm totally unsatisfied with it.
Robin: I'm sorry Ted. Is there anything I can do about it?
Ted: I wish you could just get me better cereal. This cereal is terrible.
Robin:"""
| [
"The following is a conversation between Robin and Ted. Ted has been angry lately and is upset about the cereal he has that is too sugary. Robin attempts to be compassionate and console Ted.\n\nTed: This cereal is too sugary! I'm totally unsatisfied with it.\nRobin: I'm sorry Ted. Is there anything I can do about it?\nTed: I wish you could just get me better cereal. This cereal is terrible.\nRobin:"
] |
2024-01-10 | amitlals/Azure-Cognitive-Search-Azure-OpenAI-Accelerator-BING-API | apps~frontend~pages~1_Search.py | import streamlit as st
import urllib
import os
import re
import time
import random
from collections import OrderedDict
from openai.error import OpenAIError
from langchain.docstore.document import Document
from langchain.chat_models import AzureChatOpenAI
from utils import (
get_search_results,
order_search_results,
model_tokens_limit,
num_tokens_from_docs,
embed_docs,
search_docs,
get_answer,
)
st.set_page_config(page_title="GPT Smart Search", page_icon="📖", layout="wide")
# Add custom CSS styles to adjust padding
st.markdown("""
<style>
.block-container {
padding-top: 1rem;
padding-bottom: 0rem;
}
</style>
""", unsafe_allow_html=True)
st.header("GPT Smart Search Engine")
def clear_submit():
st.session_state["submit"] = False
with st.sidebar:
st.markdown("""# Instructions""")
st.markdown("""
Ask a question that you think can be answered with the information in about 10k Arxiv Computer Science publications from 2020-2021 or in 52k Medical Covid-19 Publications from 2020.
For example:
- What are markov chains?
- List the authors that talk about Boosting Algorithms
- How does random forest work?
- What kind of problems can I solve with reinforcement learning? Give me some real life examples
- What kind of problems Turing Machines solve?
- What are the main risk factors for Covid-19?
- What medicine reduces inflammation in the lungs?
- Why Covid doesn't affect kids that much compared to adults?
\nYou will notice that the answers to these questions are diferent from the open ChatGPT, since these papers are the only possible context. This search engine does not look at the open internet to answer these questions. If the context doesn't contain information, the engine will respond: I don't know.
""")
coli1, coli2= st.columns([3,1])
with coli1:
query = st.text_input("Ask a question to your enterprise data lake", value= "What is CLP?", on_change=clear_submit)
with coli2:
language= st.selectbox('Answer language',('English', 'Spanish', 'French', 'German', 'Portuguese', 'Italian'), index=0)
# options = ['English', 'Spanish', 'Portuguese', 'French', 'Russian']
# selected_language = st.selectbox('Answer Language:', options, index=0)
button = st.button('Search')
if (not os.environ.get("AZURE_SEARCH_ENDPOINT")) or (os.environ.get("AZURE_SEARCH_ENDPOINT") == ""):
st.error("Please set your AZURE_SEARCH_ENDPOINT on your Web App Settings")
elif (not os.environ.get("AZURE_SEARCH_KEY")) or (os.environ.get("AZURE_SEARCH_KEY") == ""):
st.error("Please set your AZURE_SEARCH_ENDPOINT on your Web App Settings")
elif (not os.environ.get("AZURE_OPENAI_ENDPOINT")) or (os.environ.get("AZURE_OPENAI_ENDPOINT") == ""):
st.error("Please set your AZURE_OPENAI_ENDPOINT on your Web App Settings")
elif (not os.environ.get("AZURE_OPENAI_API_KEY")) or (os.environ.get("AZURE_OPENAI_API_KEY") == ""):
st.error("Please set your AZURE_OPENAI_API_KEY on your Web App Settings")
elif (not os.environ.get("DATASOURCE_SAS_TOKEN")) or (os.environ.get("DATASOURCE_SAS_TOKEN") == ""):
st.error("Please set your DATASOURCE_SAS_TOKEN on your Web App Settings")
else:
os.environ["OPENAI_API_BASE"] = os.environ.get("AZURE_OPENAI_ENDPOINT")
os.environ["OPENAI_API_KEY"] = os.environ.get("AZURE_OPENAI_API_KEY")
os.environ["OPENAI_API_VERSION"] = os.environ["AZURE_OPENAI_API_VERSION"]
os.environ["OPENAI_API_TYPE"] = "azure"
MODEL = os.environ.get("AZURE_OPENAI_MODEL_NAME")
llm = AzureChatOpenAI(deployment_name=MODEL, temperature=0, max_tokens=500)
if button or st.session_state.get("submit"):
if not query:
st.error("Please enter a question!")
else:
# Azure Search
try:
index1_name = "cogsrch-index-files"
index2_name = "cogsrch-index-csv"
indexes = [index1_name, index2_name]
agg_search_results = get_search_results(query, indexes)
ordered_results = order_search_results(agg_search_results, reranker_threshold=1)
st.session_state["submit"] = True
# Output Columns
placeholder = st.empty()
except Exception as e:
st.markdown("Not data returned from Azure Search, check connection..")
st.markdown(e)
if "ordered_results" in locals():
try:
docs = []
for key,value in ordered_results.items():
for page in value["chunks"]:
docs.append(Document(page_content=page, metadata={"source": value["location"]}))
add_text = "Reading the source documents to provide the best answer... ⏳"
if "add_text" in locals():
with st.spinner(add_text):
if(len(docs)>0):
tokens_limit = model_tokens_limit(MODEL)
num_tokens = num_tokens_from_docs(docs)
if num_tokens > tokens_limit:
index = embed_docs(docs)
top_docs = search_docs(index,query)
num_tokens = num_tokens_from_docs(top_docs)
chain_type = "map_reduce" if num_tokens > tokens_limit else "stuff"
else:
top_docs = docs
chain_type = "stuff"
answer = get_answer(llm=llm, docs=top_docs, query=query, language=language, chain_type=chain_type)
else:
answer = {"output_text":"No results found" }
else:
answer = {"output_text":"No results found" }
with placeholder.container():
st.markdown("#### Answer")
split_word = "Source"
split_regex = re.compile(f"{split_word}s:?\\W*", re.IGNORECASE)
answer_text = split_regex.split(answer["output_text"])[0]
st.markdown(answer_text)
try:
sources_list = split_regex.split(answer["output_text"])[1].replace(" ","").split(",")
#sources_list = answer["output_text"].split("SOURCES:")[1].replace(" ","").split(",")
sources_markdown = "Sources: "
for index, value in enumerate(sources_list):
sources_markdown += "[[" + str(index+1) + "]](" + value + os.environ.get("DATASOURCE_SAS_TOKEN") + ")"
st.markdown(sources_markdown)
except Exception as e:
st.markdown("Sources: N/A")
st.markdown("---")
st.markdown("#### Search Results")
if(len(docs)>0):
for key, value in ordered_results.items():
url = value['location'] + os.environ.get("DATASOURCE_SAS_TOKEN")
title = str(value['title']) if (value['title']) else value['name']
score = str(round(value['score']*100/4,2))
st.markdown("[" + title + "](" + url + ")" + " (Score: " + score + "%)")
st.markdown(value["caption"])
st.markdown("---")
except OpenAIError as e:
st.error(e) | [] |
2024-01-10 | mosaicml/llm-foundry | llmfoundry~models~inference_api_wrapper~openai_causal_lm.py | # Copyright 2022 MosaicML LLM Foundry authors
# SPDX-License-Identifier: Apache-2.0
"""Implements a OpenAI chat and causal LM inference API wrappers."""
import logging
import os
import random
from time import sleep
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
import torch
from composer.core.types import Batch
from composer.utils.import_helpers import MissingConditionalImportError
from transformers import AutoTokenizer
log = logging.getLogger(__name__)
from llmfoundry.models.inference_api_wrapper.interface import \
InferenceAPIEvalWrapper
__all__ = [
'OpenAICausalLMEvalWrapper',
'OpenAIChatAPIEvalWrapper',
]
if TYPE_CHECKING:
from openai.types.chat.chat_completion import ChatCompletion
from openai.types.completion import Completion
from openai.types.completion_choice import Logprobs
MAX_RETRIES = 10
class OpenAIEvalInterface(InferenceAPIEvalWrapper):
def __init__(self, model_cfg: Dict, tokenizer: AutoTokenizer) -> None:
super().__init__(model_cfg, tokenizer)
assert os.getenv(
'OPENAI_API_KEY'
) is not None, 'No OpenAI API Key found. Ensure it is saved as an environmental variable called OPENAI_API_KEY.'
try:
import openai
except ImportError as e:
raise MissingConditionalImportError(
extra_deps_group='openai',
conda_package='openai',
conda_channel='conda-forge') from e
self.client = openai.OpenAI()
self.model_name = model_cfg['version']
def generate_completion(self, prompt: str, num_tokens: int):
raise NotImplementedError()
def process_result(self, completion): # pyright: ignore
raise NotImplementedError()
def get_next_token_logit_tensor(self, prompt: str, num_tokens: int = 1):
completion = self.try_generate_completion(prompt, num_tokens)
return self.process_result(completion)
def try_generate_completion(self, prompt: str, num_tokens: int):
try:
from openai import APITimeoutError, RateLimitError
except ImportError as e:
raise MissingConditionalImportError(
extra_deps_group='openai',
conda_package='openai',
conda_channel='conda-forge') from e
tries = 0
completion = None
delay = 1
while tries < MAX_RETRIES:
tries += 1
try:
completion = self.generate_completion(prompt, num_tokens)
break
except RateLimitError as e:
if 'You exceeded your current quota' in str(
e._message): # pyright: ignore
raise e
delay *= 2 * (1 + random.random())
sleep(delay)
continue
except APITimeoutError as e:
delay *= 2 * (1 + random.random())
sleep(delay)
continue
return completion
class OpenAIChatAPIEvalWrapper(OpenAIEvalInterface):
def __init__(self, model_cfg: Dict, tokenizer: AutoTokenizer) -> None:
super().__init__(model_cfg, tokenizer)
self.generate_completion = lambda prompt, num_tokens: self.client.chat.completions.create(
model=self.model_name,
messages=[{
'role':
'system',
'content':
model_cfg.get('system_role_prompt',
'Please complete the following text: ')
}, {
'role': 'user',
'content': prompt
}],
max_tokens=num_tokens,
temperature=0.0)
def retokenize(self, tokens: List[int], cont_idxs: List[int]):
"""Chat API will never respond with a word-initial space.
If the continuation tokens begin with a word initial space, we need to
re-tokenize with the space removed.
"""
original_len = len(tokens)
retokenized_continuation = self.tokenizer(
self.tokenizer.decode(tokens[cont_idxs[0]:cont_idxs[-1] +
1]).strip())['input_ids']
# replace the original continuation with the retokenized continuation + padding
padding = [tokens[-1]] * (
len(tokens) - len(tokens[:cont_idxs[0]] + retokenized_continuation))
tokens = tokens[:cont_idxs[0]] + retokenized_continuation + padding
if len(tokens) > original_len:
# this only happens if we were already at max seq len and the continuation got LARGER
tokens = tokens[-original_len:]
cont_idxs = list(
range(original_len - len(retokenized_continuation),
original_len))
else:
cont_idxs = list(
range(cont_idxs[0],
cont_idxs[0] + len(retokenized_continuation)))
return torch.tensor(tokens), torch.tensor(cont_idxs)
def rebatch(self, batch: Batch):
"""Chat API tokenization has different behavior than GPT3.
Model responses will never begin with spaces even if the continuation is
expected to, so we need to retokenize the input to account for that.
"""
new_batch: Dict[str, Union[List[torch.Tensor], torch.Tensor]] = {
'input_ids': [],
'continuation_indices': [],
'labels': []
}
for tokens, cont_idxs in zip(batch['input_ids'],
batch['continuation_indices']):
tokens, cont_idxs = self.retokenize(tokens.tolist(),
cont_idxs.tolist())
assert isinstance(new_batch['input_ids'], list)
new_batch['input_ids'].append(tokens)
assert isinstance(new_batch['labels'], list)
new_batch['labels'].append(tokens)
assert isinstance(new_batch['continuation_indices'], list)
new_batch['continuation_indices'].append(cont_idxs)
new_batch.update({
k: torch.stack(new_batch[k]) # pyright: ignore
for k in ['input_ids', 'labels']
})
new_batch.update({k: v for k, v in batch.items() if k not in new_batch})
return new_batch
def eval_forward(self, batch: Batch, outputs: Optional[Any] = None):
# Override the base class because Chat's API always strips spacing from model outputs resulting in different tokens
# than what the continuation would expect.
# Get around this issue by retokenizing the batch to remove spacing from the continuation as well as
# decoding the whole continuation at once.
padding_tok = self.tokenizer.pad_token_id if self.tokenizer.pad_token_id else self.tokenizer.eos_token_id
output_logits_batch = []
batch = self.rebatch(batch)
for tokens, cont_idxs in zip(batch['input_ids'],
batch['continuation_indices']):
seqlen = tokens.shape[0]
tokens = tokens.tolist()
cont_idxs = cont_idxs.tolist()
expected_cont_tokens = tokens[cont_idxs[0]:cont_idxs[-1] + 1]
output_logits = torch.nn.functional.one_hot(
torch.tensor(tokens[1:cont_idxs[0]]),
num_classes=self.tokenizer.vocab_size)
prompt = self.tokenizer.decode(tokens[:cont_idxs[0]])
next_logit_tensor = self.get_next_token_logit_tensor(
prompt, num_tokens=len(expected_cont_tokens))
if next_logit_tensor is not None:
output_logits = torch.cat([output_logits, next_logit_tensor])
padding = torch.nn.functional.one_hot(
torch.full((seqlen - output_logits.shape[0],), padding_tok),
num_classes=self.tokenizer.vocab_size)
output_logits = torch.cat([output_logits, padding])
output_logits_batch.append(output_logits)
return torch.stack(output_logits_batch).to(batch['input_ids'].device)
def process_result(self, completion: Optional['ChatCompletion']):
if completion is None:
raise ValueError("Couldn't generate model output")
if len(completion.choices) > 0:
tensors = []
for t in self.tokenizer(
completion.choices[0].message.content)['input_ids']:
tensors.append(
self.tokenizer.construct_logit_tensor(
{self.tokenizer.decode([t]): 0.0}))
if len(tensors) == 0:
return None
return torch.stack(tensors)
else:
# the model sometimes stops early even though we are still requesting tokens!
# not sure if there's a fix
return None
class OpenAICausalLMEvalWrapper(OpenAIEvalInterface):
def __init__(self, model_cfg: Dict, tokenizer: AutoTokenizer) -> None:
super().__init__(model_cfg, tokenizer)
# TODO: this will be deprecated
self.generate_completion = lambda prompt, num_tokens: self.client.completions.create(
model=self.model_name,
prompt=prompt,
max_tokens=num_tokens,
logprobs=5,
temperature=0.0)
def process_result(self, completion: Optional['Completion']):
if completion is None:
raise ValueError("Couldn't generate model output")
if TYPE_CHECKING:
assert isinstance(completion, Completion)
assert isinstance(completion.choices[0].logprobs, Logprobs)
assert isinstance(completion.choices[0].logprobs.top_logprobs, list)
if len(completion.choices[0].logprobs.top_logprobs[0]) > 0:
tensor = self.tokenizer.construct_logit_tensor(
dict(completion.choices[0].logprobs.top_logprobs[0]))
return tensor
else:
# the model sometimes stops early even though we are still requesting tokens!
# not sure if there's a fix
return None
| [
"system_role_prompt",
"Please complete the following text: "
] |
2024-01-10 | BruceChar/rust-exercise | poker~.chat~workflows~auto_command~action~ask_codebase~handler.py | import os
import re
import sys
import json
import tempfile
import uuid
from chat.ask_codebase.store.qdrant import QdrantWrapper as Q, get_client
from chat.ask_codebase.indexing.embedding import EmbeddingWrapper as E
from langchain.embeddings import HuggingFaceEmbeddings
from chat.ask_codebase.indexing.loader.file import (
FileLoader,
FileSource,
gen_local_reference_maker,
)
from chat.util.misc import is_source_code
from chat.ask_codebase.chains.simple_qa import SimpleQA
from chat.ask_codebase.chains.stuff_dc_qa import StuffDocumentCodeQa
def get_app_data_dir(app_name):
home = os.path.expanduser("~")
if os.name == "nt": # For Windows
appPath = os.path.join(home, "AppData", "Roaming", app_name)
else: # For Unix and Linux
appPath = os.path.join(home, ".local", "share", app_name)
if not os.path.exists(appPath):
os.makedirs(appPath)
return appPath
supportedFileTypes = []
STORAGE_FILE = os.path.join(get_app_data_dir("devchat"), "qdrant_storage2")
SOURCE_NAME = ""
def query(question: str):
try:
client = get_client(mode=STORAGE_FILE)
q = Q.reuse(
source_name=SOURCE_NAME,
embedding_cls=HuggingFaceEmbeddings,
client=client,
)
chain = StuffDocumentCodeQa(q)
_, docs = chain.run(question)
for d in docs:
print(d.metadata.get('filepath'))
print(d.page_content)
sys.exit(0)
except Exception as e:
print(e)
sys.exit(1)
if __name__ == "__main__":
try:
if os.path.exists(".chat/askcode.json"):
with open(".chat/askcode.json", "r") as f:
askcode_data = json.load(f)
SOURCE_NAME = askcode_data.get("SOURCE_NAME", str(uuid.uuid4()))
else:
SOURCE_NAME = str(uuid.uuid4())
with open(".chat/askcode.json", "w+") as f:
json.dump({"SOURCE_NAME": SOURCE_NAME}, f)
query(sys.argv[1])
sys.exit(0)
except Exception as e:
print(e)
sys.exit(1) | [] |
2024-01-10 | UCDAyoung/poetUsingOpenAI | sample01.py | # GPT시인
# ver 0.9
import openai
import streamlit as st
#기본적으로 설정해야하는 부분
openai.api_key = '8a8e7d4d1700468f9bcafc6f48a89216'
openai.api_base = 'https://helloairad.openai.azure.com/'
openai.api_type = 'azure'
openai.api_version = '2023-05-15'
st.header('# welcome to ChatGPT',divider = 'rainbow')
st.write()
name = st.text_input('작가명을 입력하세요')
st.write(name+'## 작가님 안녕하세요')
subject = st.text_input('시의 주제를 입력하세요')
st.write(subject)
content = st.text_input('추가로 하고 싶은 이야기를 입력하세요')
st.write(content)
button_click = st.button('시 생성')
if(button_click):
with st.spinner('Wait for it....'):
result =openai.ChatCompletion.create(
engine='devmodel',
messages=[
{'role':'system','content':'You are a helpful assistant'},
{'role':'user','content':'작가의 이름은 홍길동' + name},
{'role':'user','content':'시의 주제는 코딩'+subject },
{'role':'user','content':content},
{'role':'user','content':'위의 내용으로 시를 생성해줘'}
]
)
st.divider()
st.write('# Result')
st.write(result.choices[0].message.content)
print(result)
| [
"작가의 이름은 홍길동PLACEHOLDER",
"You are a helpful assistant",
"위의 내용으로 시를 생성해줘",
"시의 주제는 코딩PLACEHOLDER"
] |
2024-01-10 | michaelfdickey/OpenAI-API-with-Python-Bootcamp | openai_api~02-12_model-completions.py | import openai
import os
import getpass
key = getpass.getpass(prompt='Enter your OpenAI API key: ')
openai.api_key = key
openai.api_key = ''
#prompt = input('Enter your text: ')
prompt = 'give me a motto for a futuristic motorcycle company'
# roles => system, user, assistant
messages = [
{'role': 'system', 'content':'you are a good and smart assistant'},
{'role': 'user', 'content':prompt},
]
response = openai.ChatCompletion.create(
model = 'gpt-3.5-turbo',
messages = messages,
temperature = 1,
top_p = 0.8,
max_tokens = 1000,
n = 2
)
print(response['choices'][0]['message']['content'])
print(response['choices'][1]['message']['content']) | [
"you are a good and smart assistant",
"give me a motto for a futuristic motorcycle company"
] |
2024-01-10 | michaelfdickey/OpenAI-API-with-Python-Bootcamp | openai_api~02-09_making-chatGPT-requests.py |
import openai
import os
import getpass
key = getpass.getpass(prompt='Enter your OpenAI API key: ')
openai.api_key = key
# prompt user to enter their text
#prompt = input('Enter your text: ')
#print('prompt is: ', repr(prompt))
prompt = 'tell me the name of the largest city in the world'
# roles => system, user, assistant
messages = [
{'role': 'system', 'content':'Answer as detailed as possible, also while rhyming as much as possible.'},
{'role': 'user', 'content':prompt},
]
response = openai.ChatCompletion.create(
model = 'gpt-3.5-turbo',
messages = messages,
temperature = 0.8,
max_tokens = 1000
)
"""
#openai.Completion.create() # will take some paramaters and return a response object
response = openai.Completion.create(
model='text-davinci-003',
prompt=prompt,
temperature=0.8, # controls the randomness of the output, the higher the temp the more random. 0-2 default is 1. 0 is deterministic
max_tokens=1000 # max generated for completion, 4096 is the max, input and output count to this limit
)
"""
#print(response)
print(response['choices'][0]['message']['content']) | [
"Answer as detailed as possible, also while rhyming as much as possible.",
"tell me the name of the largest city in the world"
] |
2024-01-10 | michaelfdickey/OpenAI-API-with-Python-Bootcamp | openai_api~02_06-installing_and_authenticating.py |
import openai
import os
import getpass
key = getpass.getpass(prompt='Enter your OpenAI API key: ')
openai.api_key = key
# prompt user to enter their text
prompt = input('Enter your text: ')
print('prompt is: ', repr(prompt))
#openai.Completion.create() # will take some paramaters and return a response object
response = openai.Completion.create(
model='text-davinci-003',
prompt=prompt,
temperature=0.8, # controls the randomness of the output, the higher the temp the more random. 0-2 default is 1. 0 is deterministic
max_tokens=1000 # max generated for completion, 4096 is the max, input and output count to this limit
)
#print(response)
print(response['choices'][0]['text']) | [
"Enter your text: "
] |
2024-01-10 | 123-code/Python-Experiments | EntityExtraction.py | from langchain.chat_models import ChatOpenAI
from langchain.chains import create_tagging_chain, create_tagging_chain_pydantic
from langchain.prompts import ChatPromptTemplate
import os
from neo4j import GraphDatabase
import glob
from pydantic import BaseModel
openai_api_key=os.getenv("OPENAI_API_KEY")
schema = {
"properties":{
"skills":{"type":"string"},
}
}
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
chain = create_tagging_chain(schema, llm)
def extract_entities(folder):
files = glob.glob(f'data/{folder}/*')
system_msg = "You are a helpful IT-project and account management expert who extracts information from documents."
print(len(files))
results = []
for file in files:
with open(file) as f:
text = f.read()
result = chain.run(text)
results.append(result)
return results
print(extract_entities('people_profiles'))
| [] |
2024-01-10 | mivanovitch/superagent | app~lib~vectorstores~pinecone.py | import pinecone
from decouple import config
from langchain.vectorstores.pinecone import Pinecone
pinecone.init(
api_key=config("PINECONE_API_KEY"), # find at app.pinecone.io
environment=config("PINECONE_ENVIRONMENT"), # next to api key in console
)
pinecone.Index("superagent")
class PineconeVectorstore:
def __init__(self):
pass
def from_documents(self, docs, embeddings, index_name, namespace):
Pinecone.from_documents(
docs, embeddings, index_name="superagent", namespace=namespace
)
def from_existing_index(self, embeddings, namespace):
return Pinecone.from_existing_index(
"superagent", embedding=embeddings, namespace=namespace
)
| [] |
2024-01-10 | safeisrisky/nlp-tasks-demo | apps~2_nlp_spacy.py | import streamlit as st
import pandas as pd
import openai
import json
import utils
from utils import get_message
from utils import get_system_prompt, get_companies_prompt, get_non_companies_prompt
import spacy_streamlit as ss
import spacy
if not utils.check_password():
st.stop()
nlp_tasks_menu = [
"Tokenization",
"Word2Vec",
"Named Entity Recognition",
"Dependency Parser and POS",
"Similarity",
]
nlp_models_menu = ["en_core_web_sm", "en_core_web_md", "en_core_web_lg"]
@st.cache_resource
def get_nlp_model(model_type):
"Loading NLP Model"
nlp = spacy.load(model_type)
return nlp
def main():
if "nlp_model" not in st.session_state:
st.session_state.nlp_model = "en_core_web_sm"
if "rawtext" not in st.session_state:
st.session_state.rawtext = None
model_choice = st.sidebar.selectbox("NLP Model", nlp_models_menu)
st.session_state.nlp_model = model_choice
nlp = get_nlp_model(st.session_state.nlp_model)
choice = st.sidebar.selectbox("Menu", nlp_tasks_menu)
if choice == "Tokenization":
st.subheader("Tokenization")
input_tok = st.text_area("Enter Text", value=st.session_state.rawtext)
st.session_state.rawtext = input_tok
tok_button = st.button("Tokenize")
if st.session_state.rawtext:
docx = nlp(st.session_state.rawtext)
ss.visualize_tokens(docx)
elif choice == "Named Entity Recognition":
st.subheader("Named Entity Recognition")
input_ner = st.text_area("Enter Text", value=st.session_state.rawtext)
st.session_state.rawtext = input_ner
ner_button = st.button("NER")
if st.session_state.rawtext:
docx = nlp(st.session_state.rawtext)
ss.visualize_ner(docx)
elif choice == "Dependency Parser and POS":
st.subheader("Dependency Parser and POS Tagging")
input_dep = st.text_area("Enter Text", value=st.session_state.rawtext)
st.session_state.rawtext = input_dep
dep_button = st.button("Go")
if st.session_state.rawtext:
docx = nlp(st.session_state.rawtext)
ss.visualize_parser(docx)
elif choice == "Similarity":
text1 = "The Company went bankrupt"
text2 = "The company was involved in a financial scandal"
ss.visualize_similarity(nlp, (text1, text2))
elif choice == "Word2Vec":
word_vec_input = st.text_input("Enter a word or phrase")
tokens = nlp(word_vec_input)
for token in tokens:
st.write("Token:", token.text, "Vector Shape:", token.vector.shape)
st.write(pd.DataFrame(token.vector))
main()
| [] |
2024-01-10 | safeisrisky/nlp-tasks-demo | apps~1_nlp_llm.py | import streamlit as st
import pandas as pd
import openai
import json
import utils
from utils import get_message
from utils import get_system_prompt, get_companies_prompt, get_non_companies_prompt
if not utils.check_password():
st.stop()
st.title("NLP Tasks using LLM")
if "jsonresp" not in st.session_state:
st.session_state.jsonresp = None
openai.api_key = st.secrets["OPENAI_API_KEY"]
app_password = st.secrets["APP_PASSWORD"]
client = openai.OpenAI()
openai_model = "gpt-4-1106-preview"
if "openai_model" not in st.session_state:
st.session_state["openai_model"] = openai_model
if "messages" not in st.session_state:
st.session_state.messages = []
system_prompt = get_system_prompt()
st.session_state.messages.append(get_message("system", system_prompt))
companies_prompt = get_companies_prompt()
st.session_state.messages.append(get_message("user", companies_prompt))
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
if prompt := st.chat_input("Please enter"):
st.chat_message("user").markdown(prompt)
st.session_state.messages.append(get_message("user", prompt))
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
stream = client.chat.completions.create(
model=st.session_state["openai_model"],
response_format={"type": "json_object"},
messages=st.session_state.messages,
stream=True,
)
for response in stream:
full_response += response.choices[0].delta.content or ""
message_placeholder.write(full_response + "▌")
try:
json_resp = json.loads(full_response)["data"]
message_placeholder.json(json_resp)
st.session_state.jsonresp = json_resp
except:
message_placeholder.write(full_response)
st.session_state.jsonresp = full_response
st.session_state.messages.append(get_message("assistant", full_response))
with st.expander("LLM Results"):
if st.session_state.jsonresp is not None:
try:
st.dataframe(pd.DataFrame(st.session_state.jsonresp))
except:
st.write(st.session_state.jsonresp)
| [] |
2024-01-10 | implementation-matters/code-for-paper | src~policy_gradients~torch_utils.py | import torch as ch
from torch.distributions.categorical import Categorical
import numpy as np
'''
Common functions/utilities implemented in PyTorch
Sorted into categories:
- General functions
- Actor-critic helpers
- Policy gradient (PPO/TRPO) helpers
- Normalization helpers
- Neural network helpers
- Initialization helpers
'''
########################
### GENERAL UTILITY FUNCTIONS:
# Parameters, unroll, cu_tensorize, cpu_tensorize, shape_equal_cmp,
# shape_equal, scat, determinant, safe_op_or_neg_one
########################
CKPTS_TABLE = 'checkpoints'
class Parameters():
'''
Parameters class, just a nice way of accessing a dictionary
> ps = Parameters({"a": 1, "b": 3})
> ps.A # returns 1
> ps.B # returns 3
'''
def __init__(self, params):
self.params = params
def __getattr__(self, x):
return self.params[x.lower()]
def unroll(*tensors):
'''
Utility function unrolling a list of tensors
Inputs:
- tensors; all arguments should be tensors (at least 2D))))
Returns:
- The same tensors but with the first two dimensions flattened
'''
rets = []
for t in tensors:
assert len(t.shape) >= 2
new_shape = [t.shape[0]*t.shape[1]] + list(t.shape[2:])
rets.append(t.contiguous().view(new_shape))
return rets
def cu_tensorize(t):
'''
Utility function for turning arrays into cuda tensors
Inputs:
- t, list
Returns:
- Tensor version of t
'''
return ch.tensor(t).float().cuda()
def cpu_tensorize(t):
'''
Utility function for turning arrays into cpu tensors
Inputs:
- t, list
Returns:
- Tensor version of t
'''
return ch.tensor(t).float()
def gpu_mapper():
return ch.device('cuda:0') if not cpu else ch.device('cpu')
def shape_equal_cmp(*args):
'''
Checks that the shapes of the passed arguments are equal
Inputs:
- All arguments should be tensors
Returns:
- True if all arguments have the same shape, else ValueError
'''
for i in range(len(args)-1):
if args[i].shape != args[i+1].shape:
s = "\n".join([str(x.shape) for x in args])
raise ValueError("Expected equal shapes. Got:\n%s" % s)
return True
def shape_equal(a, *args):
'''
Checks that a group of tensors has a required shape
Inputs:
- a, required shape for all the tensors
- Rest of the arguments are tensors
Returns:
- True if all tensors are of shape a, otherwise ValueError
'''
for arg in args:
if list(arg.shape) != list(a):
if len(arg.shape) != len(a):
raise ValueError("Expected shape: %s, Got shape %s" \
% (str(a), str(arg.shape)))
for i in range(len(arg.shape)):
if a[i] == -1 or a[i] == arg.shape[i]:
continue
raise ValueError("Expected shape: %s, Got shape %s" \
% (str(a), str(arg.shape)))
return shape_equal_cmp(*args)
def scat(a, b, axis):
'''
Set-or-Cat (scat)
Circumventing a PyTorch bug that auto-squeezes empty tensors.
Inputs:
a - A torch tensor, or None
b - A torch tensor, can not be None
axis - Axis to concat with
Returns:
- b if a is None, otherwise b concatted to a
'''
if a is None:
return b
return ch.cat((a, b), axis)
def determinant(mat):
'''
Returns the determinant of a diagonal matrix
Inputs:
- mat, a diagonal matrix
Returns:
- The determinant of mat, aka product of the diagonal
'''
return ch.exp(ch.log(mat).sum())
def safe_op_or_neg_one(maybe_empty, op):
'''
Performs an operation on a tensor which may be empty.
Returns -1 if the tensor is empty, and returns the result
of the op otherwise.
Inputs:
- maybe_empty, tensor which may be empty
- op, an operation (tensor) -> (object) to perform
Returns:
- -1 if tensor is empty otherwise op(maybe_empty)
'''
if maybe_empty.nelement() == 0:
return -1.
else:
return op(maybe_empty)
########################
### ACTOR-CRITIC HELPERS:
# discount_path, get_path_indices, select_prob_dists
########################
# Can be used to convert rewards into discounted returns:
# ret[i] = sum of t = i to T of gamma^(t-i) * rew[t]
def discount_path(path, h):
'''
Given a "path" of items x_1, x_2, ... x_n, return the discounted
path, i.e.
X_1 = x_1 + h*x_2 + h^2 x_3 + h^3 x_4
X_2 = x_2 + h*x_3 + h^2 x_4 + h^3 x_5
etc.
Can do (more efficiently?) w SciPy. Python here for readability
Inputs:
- path, list/tensor of floats
- h, discount rate
Outputs:
- Discounted path, as above
'''
curr = 0
rets = []
for i in range(len(path)):
curr = curr*h + path[-1-i]
rets.append(curr)
rets = ch.stack(list(reversed(rets)), 0)
return rets
def get_path_indices(not_dones):
"""
Returns list of tuples of the form:
(agent index, time index start, time index end + 1)
For each path seen in the not_dones array of shape (# agents, # time steps)
E.g. if we have an not_dones of composition:
tensor([[1, 1, 0, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 0, 1, 1, 0, 1, 1, 0, 1]], dtype=torch.uint8)
Then we would return:
[(0, 0, 3), (0, 3, 10), (1, 0, 3), (1, 3, 5), (1, 5, 9), (1, 9, 10)]
"""
indices = []
num_timesteps = not_dones.shape[1]
for actor in range(not_dones.shape[0]):
last_index = 0
for i in range(num_timesteps):
if not_dones[actor, i] == 0.:
indices.append((actor, last_index, i + 1))
last_index = i + 1
if last_index != num_timesteps:
indices.append((actor, last_index, num_timesteps))
return indices
def select_prob_dists(pds, selected=None, detach=True):
'''
Given a tensor/tuple probability distributions, and
some indices, select a subset of the distributions
`pds`s according to the indices `selected`.
Inputs:
- pds: list of propo
'''
if type(pds) is tuple:
if selected is not None:
tup = (pds[0][selected], pds[1])
else:
tup = pds
return tuple(x.detach() if detach else x for x in tup)
out = pds[selected] if selected is not None else pds
return out.detach() if detach else out
########################
### POLICY GRADIENT HELPERS:
# vjp, jvp, cg_solve, backtracking_line_search
########################
def vjp(f_x, theta, v, create=True):
'''
Vector-jacobian product
Calculates v^TJ, or J^T v, using standard backprop
Input:
- f_x, function of which we want the Jacobian
- theta, variable with respect to which we want Jacobian
- v, vector that we want multiplied by the Jacobian
Returns:
- J^T @ v, without using n^2 space
'''
grad_list = ch.autograd.grad(f_x, theta, v, retain_graph=True, create_graph=create)
return ch.nn.utils.parameters_to_vector(grad_list)
def jvp(f_x, theta, v):
'''
Jacobian-vector product
Calculate the Jacobian-vector product, see
https://j-towns.github.io/2017/06/12/A-new-trick.html for math
Input:
- f_x, function of which we want the Jacobian
- theta, variable with respect to which we want Jacobian
- v, vector that we want multiplied by the Jacobian
Returns:
- J @ v, without using n^2 space
'''
w = ch.ones_like(f_x, requires_grad=True)
JTw = vjp(f_x, theta, w)
return vjp(JTw, w, v)
def cg_solve(fvp_func, b, nsteps):
'''
Conjugate Gradients Algorithm
Solves Hx = b, where H is the Fisher matrix and b is known
Input:
- fvp_func, a callable function returning Fisher-vector product
- b, the RHS of the above
- nsteps, the number of steps on CG to take
Returns:
- An approximate solution x of Hx = b
'''
# Initialize the solution, residual, direction vectors
x = ch.zeros(b.size())
r = b.clone()
p = b.clone()
new_rnorm = ch.dot(r,r)
for _ in range(nsteps):
rnorm = new_rnorm
fvp = fvp_func(p)
alpha = rnorm / ch.dot(p, fvp)
x += alpha * p
r -= alpha * fvp
new_rnorm = ch.dot(r, r)
ratio = new_rnorm / rnorm
p = r + ratio * p
return x
def backtracking_line_search(f, x, expected_improve_rate,
num_tries=10, accept_ratio=.1):
'''
Backtracking Line Search
Inputs:
- f, function for improvement of the objective
- x, biggest step to try (successively halved)
- num_tries, number of times to try halving x before giving up
- accept_ratio, how much of the expected improve rate we have to
improve by
'''
# f gives improvement
for i in range(num_tries):
scaling = 2**(-i)
scaled = x * scaling
improve = f(scaled)
expected_improve = expected_improve_rate * scaling
if improve/expected_improve > accept_ratio and improve > 0:
print("We good! %f" % (scaling,))
return scaled
return 0.
########################
### NORMALIZATION HELPERS:
# RunningStat, ZFilter, StateWithTime
########################
class RunningStat(object):
'''
Keeps track of first and second moments (mean and variance)
of a streaming time series.
Taken from https://github.com/joschu/modular_rl
Math in http://www.johndcook.com/blog/standard_deviation/
'''
def __init__(self, shape):
self._n = 0
self._M = np.zeros(shape)
self._S = np.zeros(shape)
def push(self, x):
x = np.asarray(x)
assert x.shape == self._M.shape
self._n += 1
if self._n == 1:
self._M[...] = x
else:
oldM = self._M.copy()
self._M[...] = oldM + (x - oldM) / self._n
self._S[...] = self._S + (x - oldM) * (x - self._M)
@property
def n(self):
return self._n
@property
def mean(self):
return self._M
@property
def var(self):
return self._S / (self._n - 1) if self._n > 1 else np.square(self._M)
@property
def std(self):
return np.sqrt(self.var)
@property
def shape(self):
return self._M.shape
class Identity:
'''
A convenience class which simply implements __call__
as the identity function
'''
def __call__(self, x, *args, **kwargs):
return x
def reset(self):
pass
class RewardFilter:
"""
Incorrect reward normalization [copied from OAI code]
update return
divide reward by std(return) without subtracting and adding back mean
"""
def __init__(self, prev_filter, shape, gamma, clip=None):
assert shape is not None
self.gamma = gamma
self.prev_filter = prev_filter
self.rs = RunningStat(shape)
self.ret = np.zeros(shape)
self.clip = clip
def __call__(self, x, **kwargs):
x = self.prev_filter(x, **kwargs)
self.ret = self.ret * self.gamma + x
self.rs.push(self.ret)
x = x / (self.rs.std + 1e-8)
if self.clip:
x = np.clip(x, -self.clip, self.clip)
return x
def reset(self):
self.ret = np.zeros_like(self.ret)
self.prev_filter.reset()
class ZFilter:
"""
y = (x-mean)/std
using running estimates of mean,std
"""
def __init__(self, prev_filter, shape, center=True, scale=True, clip=None):
assert shape is not None
self.center = center
self.scale = scale
self.clip = clip
self.rs = RunningStat(shape)
self.prev_filter = prev_filter
def __call__(self, x, **kwargs):
x = self.prev_filter(x, **kwargs)
self.rs.push(x)
if self.center:
x = x - self.rs.mean
if self.scale:
if self.center:
x = x / (self.rs.std + 1e-8)
else:
diff = x - self.rs.mean
diff = diff/(self.rs.std + 1e-8)
x = diff + self.rs.mean
if self.clip:
x = np.clip(x, -self.clip, self.clip)
return x
def reset(self):
self.prev_filter.reset()
class StateWithTime:
'''
Keeps track of the time t in an environment, and
adds t/T as a dimension to the state, where T is the
time horizon, given at initialization.
'''
def __init__(self, prev_filter, horizon):
self.counter = 0
self.horizon = horizon
self.prev_filter = prev_filter
def __call__(self, x, reset=False, count=True, **kwargs):
x = self.prev_filter(x, **kwargs)
self.counter += 1 if count else 0
self.counter = 0 if reset else self.counter
return np.array(list(x) + [self.counter/self.horizon,])
def reset(self):
self.prev_filter.reset()
# TODO: redo this in a not fucked way (ie using python language features)
class Trajectories:
def __init__(self, states=None, rewards=None, returns=None, not_dones=None,
actions=None, action_log_probs=None, advantages=None,
unrolled=False, values=None):
self.states = states
self.rewards = rewards
self.returns = returns
self.values = values
self.not_dones = not_dones
self.actions = actions
self.action_log_probs = action_log_probs
self.advantages = advantages
self.unrolled = unrolled
# this is disgusting and we should fix it
if states is not None:
num_saps = states.shape[0]
assert states is None or states.shape[0] == num_saps
assert rewards is None or rewards.shape[0] == num_saps
assert returns is None or returns.shape[0] == num_saps
assert values is None or values.shape[0] == num_saps
assert not_dones is None or not_dones.shape[0] == num_saps
assert actions is None or actions.shape[0] == num_saps
assert action_log_probs is None or action_log_probs.shape[0] == num_saps
assert advantages is None or advantages.shape[0] == num_saps
self.size = num_saps
def unroll(self):
assert not self.unrolled
return self.tensor_op(unroll, should_wrap=False)
def tensor_op(self, lam, should_wrap=True):
if should_wrap:
def op(*args):
return [lam(v) for v in args]
else:
op = lam
tt = op(self.states, self.rewards, self.returns, self.not_dones)
tt2 = op(self.actions, self.action_log_probs, self.advantages)
values, = op(self.values)
ts = Trajectories(states=tt[0], rewards=tt[1], returns=tt[2],
not_dones=tt[3], actions=tt2[0],
action_log_probs=tt2[1], advantages=tt2[2],
values=values, unrolled=True)
return ts
########################
### NEURAL NETWORK HELPERS:
# orthogonal_init
########################
def orthogonal_init(tensor, gain=1):
'''
Fills the input `Tensor` using the orthogonal initialization scheme from OpenAI
Args:
tensor: an n-dimensional `torch.Tensor`, where :math:`n \geq 2`
gain: optional scaling factor
Examples:
>>> w = torch.empty(3, 5)
>>> orthogonal_init(w)
'''
if tensor.ndimension() < 2:
raise ValueError("Only tensors with 2 or more dimensions are supported")
rows = tensor.size(0)
cols = tensor[0].numel()
flattened = tensor.new(rows, cols).normal_(0, 1)
if rows < cols:
flattened.t_()
# Compute the qr factorization
u, s, v = ch.svd(flattened, some=True)
if rows < cols:
u.t_()
q = u if tuple(u.shape) == (rows, cols) else v
with ch.no_grad():
tensor.view_as(q).copy_(q)
tensor.mul_(gain)
return tensor
| [] |
2024-01-10 | jmichaux/gym-robotics | gym_robotics~envs~base_env.py | """
Adopted from OpenAI gym
https://github.com/openai/gym/blob/master/gym/envs/robotics/robot_env.py
"""
import numpy as np
import gym
from gym import error, spaces
from gym.utils import seeding
class BaseEnv(gym.GoalEnv):
def __init__(self, n_actions, discrete_actions, n_substeps):
self.seed()
self.goal = self._sample_goal()
self.discrete_actions = discrete_actions
self.action_space = self._set_action_space(n_actions)
self.observation_space = self._set_observation_space()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
if self.discrete_actions:
pass
else:
action = np.clip(action, self.action_space.low, self.action_space.high)
self._apply_action(action)
obs = self._get_obs()
done = False
info = {
'is_success': self._is_success(obs['achieved_goal'], self.goal)
}
reward = self.compute_reward(obs['achieved_goal'], self.goal, info)
return obs, reward, done, info
def reset(self):
"""Resets the environment
"""
raise NotImplementedError()
def initial_setup(self):
"""Initial environment setup
"""
raise NotImplementedError()
def _get_obs(self):
"""Returns the observation.
"""
raise NotImplementedError()
def _set_observation_space(self):
"""Returns the observation space
"""
raise NotImplementedError()
def _set_action_space(self):
"""Returns the action space
"""
raise NotImplementedError()
def _apply_action(self, action):
"""Applies the given action to the simulation.
"""
raise NotImplementedError()
def _is_success(self, achieved_goal, desired_goal):
"""Indicates whether or not the achieved goal successfully achieved the desired goal.
"""
raise NotImplementedError()
def _sample_goal(self):
"""Samples a new goal and returns it.
"""
raise NotImplementedError()
def _env_setup(self, sim, arm, initial_pose):
"""Initial configuration of the environment. Can be used to choose configure initial state,
choose robot arm, choose simulation, load objects, and extract information from the simulation.
"""
return
def _viewer_setup(self):
"""Initial configuration of the viewer. Can be used to set the camera position,
for example.
"""
pass
def _render_callback(self):
"""A custom callback that is called before rendering. Can be used
to implement custom visualizations.
"""
pass
def _step_callback(self):
"""A custom callback that is called after stepping the simulation. Can be used
to enforce additional constraints on the simulation state.
"""
pass
| [] |
2024-01-10 | kesamet/ai-assistant | src~agents.py | from langchain.agents import AgentType, initialize_agent
from langchain.memory import ConversationBufferWindowMemory
from langchain.schema import HumanMessage, AIMessage
from langchain_google_genai import GoogleGenerativeAI
from src.tools import (
search_tool,
wikipedia_tool,
wolfram_tool,
calculator_tool,
newsapi_tool,
)
LLM = GoogleGenerativeAI(model="models/text-bison-001", temperature=0.0)
MEMORY_BUFFER_WINDOW = 10
def build_agent(messages: list):
memory = _build_memory(messages)
agent = initialize_agent(
[search_tool, wikipedia_tool, wolfram_tool, calculator_tool, newsapi_tool],
LLM,
agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION,
verbose=True,
max_iterations=3,
memory=memory,
)
return agent
def _build_memory(messages: list):
memory = ConversationBufferWindowMemory(
k=MEMORY_BUFFER_WINDOW, memory_key="chat_history", return_messages=True
)
for message in messages[-MEMORY_BUFFER_WINDOW:]:
if isinstance(message, AIMessage):
memory.chat_memory.add_ai_message(message.content)
elif isinstance(message, HumanMessage):
memory.chat_memory.add_user_message(message.content)
return memory
| [] |
2024-01-10 | kesamet/ai-assistant | src~tools~yahoo_finance.py | from typing import Optional
from requests.exceptions import HTTPError, ReadTimeout
from urllib3.exceptions import ConnectionError
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.tools.base import BaseTool
class YahooFinanceTool(BaseTool):
"""Tool that searches financial data on Yahoo Finance."""
name: str = "yahoo_finance"
description: str = (
"Useful for when you need to find financial data about a public company. "
"Input should be a company ticker. For example, AAPL for Apple, MSFT for Microsoft."
)
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Yahoo Finance News tool."""
try:
import yfinance
except ImportError:
raise ImportError(
"Could not import yfinance python package. "
"Please install it with `pip install yfinance`."
)
try:
company = yfinance.Ticker(query)
except (HTTPError, ReadTimeout, ConnectionError):
return f"Company ticker {query} not found."
try:
df = company.history()
except (HTTPError, ReadTimeout, ConnectionError):
return f"No data found for company that searched with {query} ticker."
if df.empty:
return f"No news found for company that searched with {query} ticker."
return df
| [] |
2024-01-10 | kesamet/ai-assistant | src~mistral.py | import logging
import os
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.llms.ctransformers import CTransformers
from src import CFG
logging.basicConfig(level=logging.INFO)
def load_mistral() -> CTransformers:
"""Load mistral model."""
logging.info("Loading mistral model ...")
model = CTransformers(
model=os.path.join(CFG.MODELS_DIR, CFG.MISTRAL.MODEL_PATH),
model_type=CFG.MISTRAL.MODEL_TYPE,
config={
"max_new_tokens": CFG.MAX_NEW_TOKENS,
"temperature": CFG.TEMPERATURE,
"repetition_penalty": CFG.REPETITION_PENALTY,
"context_length": CFG.CONTEXT_LENGTH,
},
callbacks=[StreamingStdOutCallbackHandler()],
)
logging.info("Model loaded")
return model
| [] |
2024-01-10 | kesamet/ai-assistant | src~llama2.py | import logging
import os
from typing import List, Union
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.llms.ctransformers import CTransformers
from langchain.schema import SystemMessage, HumanMessage, AIMessage
from src import CFG
logging.basicConfig(level=logging.INFO)
def load_llama2() -> CTransformers:
"""Load Llama-2 model."""
logging.info("Loading llama2 model ...")
model = CTransformers(
model=os.path.join(CFG.MODELS_DIR, CFG.LLAMA2.MODEL_PATH),
model_type=CFG.LLAMA2.MODEL_TYPE,
config={
"max_new_tokens": CFG.MAX_NEW_TOKENS,
"temperature": CFG.TEMPERATURE,
"repetition_penalty": CFG.REPETITION_PENALTY,
"context_length": CFG.CONTEXT_LENGTH,
},
callbacks=[StreamingStdOutCallbackHandler()],
)
logging.info("Model loaded")
return model
def llama2_prompt(messages: List[Union[SystemMessage, HumanMessage, AIMessage]]) -> str:
"""Convert the messages to Llama2 compliant format."""
messages = _convert_langchainschema_to_dict(messages)
B_INST = "[INST]"
E_INST = "[/INST]"
B_SYS = "<<SYS>>\n"
E_SYS = "\n<</SYS>>\n\n"
BOS = "<s>"
EOS = "</s>"
DEFAULT_SYSTEM_PROMPT = """You are a helpful, respectful and honest assistant. \
Always answer as helpfully as possible, while being safe. Please ensure that your responses \
are socially unbiased and positive in nature. If a question does not make any sense, \
or is not factually coherent, explain why instead of answering something not correct."""
if messages[0]["role"] != "system":
messages = [
{
"role": "system",
"content": DEFAULT_SYSTEM_PROMPT,
}
] + messages
messages = [
{
"role": messages[1]["role"],
"content": B_SYS + messages[0]["content"] + E_SYS + messages[1]["content"],
}
] + messages[2:]
messages_list = [
f"{BOS}{B_INST} {(prompt['content']).strip()} {E_INST} {(answer['content']).strip()} {EOS}"
for prompt, answer in zip(messages[::2], messages[1::2])
]
messages_list.append(f"{BOS}{B_INST} {(messages[-1]['content']).strip()} {E_INST}")
return "".join(messages_list)
def _convert_langchainschema_to_dict(
messages: List[Union[SystemMessage, HumanMessage, AIMessage]]
) -> List[dict]:
"""
Convert the chain of chat messages in list of langchain.schema format to
list of dictionary format.
"""
_messages = []
for message in messages:
if isinstance(message, SystemMessage):
_messages.append({"role": "system", "content": message.content})
elif isinstance(message, HumanMessage):
_messages.append({"role": "user", "content": message.content})
elif isinstance(message, AIMessage):
_messages.append({"role": "assistant", "content": message.content})
return _messages
| [
"content",
"You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct."
] |
2024-01-10 | kesamet/ai-assistant | streamlit_app~vision_assistant.py | import base64
import requests
import streamlit as st
from langchain.schema import HumanMessage, AIMessage
from src import CFG
from streamlit_app import get_http_status
from streamlit_app.utils import set_container_width
API_URL = f"http://{CFG.HOST}:{CFG.PORT_LLAVA}"
# sliding window of the most recent interactions
MEMORY_BUFFER_WINDOW = 6
def init_sess_state() -> None:
clear_button = st.sidebar.button("Clear Conversation", key="vision_assistant")
if clear_button or "llava_messages" not in st.session_state:
# llava_messages used in model
st.session_state.llava_messages = [
{
"role": "system",
"content": "You are an assistant who perfectly describes images.",
}
]
# chv_messages used for displaying
st.session_state.chv_messages = []
# image
st.session_state.image_bytes = None
def buffer_window_memory(messages: list) -> list:
"""
Sliding window of the most recent interactions
older interactions will not be sent to LLM except for
system and first user and assistant interaction to retain context
"""
return messages[:3] + messages[3:][-MEMORY_BUFFER_WINDOW:]
def get_output(messages: list) -> str:
headers = {"Content-Type": "application/json"}
response = requests.post(API_URL, headers=headers, json={"inputs": messages})
return response.json()["choices"][0]["message"]
def vision_assistant():
set_container_width("80%")
st.sidebar.title("Vision Assistant")
st.sidebar.info(
"Vision Assistant is powered by [LLaVA](https://llava-vl.github.io/)."
)
st.sidebar.info(f"Running on {CFG.DEVICE}")
get_http_status(API_URL)
uploaded_file = st.sidebar.file_uploader(
"Upload your image", type=["png", "jpg", "jpeg"], accept_multiple_files=False
)
init_sess_state()
_img_bytes = uploaded_file or st.session_state.image_bytes
if _img_bytes is None:
st.info("Upload an image first.")
return
c0, c1 = st.columns(2)
c0.image(_img_bytes)
st.session_state.image_bytes = _img_bytes
img_b64 = base64.b64encode(_img_bytes.getvalue()).decode("utf-8")
with c1:
# Display chat history
for message in st.session_state.chv_messages:
if isinstance(message, HumanMessage):
with st.chat_message("user"):
st.markdown(message.content)
elif isinstance(message, AIMessage):
with st.chat_message("assistant"):
st.markdown(message.content)
if user_input := st.chat_input("Your input"):
with c1.chat_message("user"):
st.markdown(user_input)
if len(st.session_state.llava_messages) == 1:
message = {
"role": "user",
"content": [
{
"type": "image_url",
"image_url": {"url": f"data:image/jpeg;base64,{img_b64}"},
},
{"type": "text", "text": user_input},
],
}
else:
message = {"role": "user", "content": user_input}
st.session_state.llava_messages.append(message)
st.session_state.llava_messages = buffer_window_memory(
st.session_state.llava_messages
)
st.session_state.chv_messages.append(HumanMessage(content=user_input))
with c1.chat_message("assistant"):
with st.spinner("Thinking ..."):
message = get_output(st.session_state.llava_messages)
st.markdown(message["content"])
st.session_state.llava_messages.append(message)
st.session_state.chv_messages.append(AIMessage(content=message["content"]))
| [
"[{'type': 'image_url', 'image_url': {'url': 'data:image/jpeg;base64,PLACEHOLDER'}}, {'type': 'text', 'text': PLACEHOLDER}]",
"content",
"You are an assistant who perfectly describes images."
] |
2024-01-10 | kesamet/ai-assistant | src~tools~newsapi.py | import os
from typing import Optional
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.tools.base import BaseTool
SOURCES = "bbc-news,the-verge,the-wall-street-journal"
class NewsAPITool(BaseTool):
"""Tool that searches news using News API."""
name: str = "news"
description: str = (
"Useful when you need to get top headlines from major news sources "
"such as BBC News and Wall Street Journal."
)
top_k: int = 10
"""The number of results to return."""
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use NewsAPI tool."""
try:
from newsapi import NewsApiClient
except ImportError:
raise ImportError(
"Could not import yfinance python package. "
"Please install it with `pip install newsapi-python`."
)
try:
newsapi = NewsApiClient(api_key=os.environ["NEWSAPI_API_KEY"])
except KeyError:
raise ("NEWSAPI_API_KEY is not found in environ.")
top_headlines = newsapi.get_top_headlines(q=query, sources=SOURCES)
result = "\n\n".join(
[
"\n".join([n["title"], n["description"]])
for n in top_headlines["articles"]
]
)
if not result:
return f"No news found for '{query}'."
return result
| [] |
2024-01-10 | kesamet/ai-assistant | streamlit_app~code_assistant.py | import requests
import streamlit as st
from langchain.schema import HumanMessage, AIMessage
from src import CFG
from src.codellama import get_prompt
from streamlit_app import get_http_status
API_URL = f"http://{CFG.HOST}:{CFG.PORT_CODELLAMA}"
def init_messages() -> None:
clear_button = st.sidebar.button("Clear Conversation", key="code_assistant")
if clear_button or "ca_messages" not in st.session_state:
st.session_state.ca_messages = []
def get_answer(inputs: str) -> str:
payload = {"inputs": get_prompt(inputs)}
headers = {"Content-Type": "application/json"}
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()["content"]
def code_assistant():
st.sidebar.title("Code Assistant")
st.sidebar.info("Code Assistant is powered by CodeLlama.")
st.sidebar.info(f"Running on {CFG.DEVICE}")
get_http_status(API_URL)
init_messages()
# Display chat history
for message in st.session_state.ca_messages:
if isinstance(message, HumanMessage):
with st.chat_message("user"):
st.markdown(message.content)
elif isinstance(message, AIMessage):
with st.chat_message("assistant"):
st.markdown(message.content)
if user_input := st.chat_input("Your input"):
with st.chat_message("user"):
st.markdown(user_input)
st.session_state.ca_messages.append(HumanMessage(content=user_input))
with st.chat_message("assistant"):
with st.spinner("Thinking ..."):
answer = get_answer(user_input)
st.markdown(answer)
st.session_state.ca_messages.append(AIMessage(content=answer))
| [] |
2024-01-10 | kesamet/ai-assistant | src~codellama.py | import logging
import os
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.llms.ctransformers import CTransformers
from src import CFG
logging.basicConfig(level=logging.INFO)
def load_codellama() -> CTransformers:
"""Load codellama model."""
logging.info("Loading codellama model ...")
model = CTransformers(
model=os.path.join(CFG.MODELS_DIR, CFG.CODELLAMA.MODEL_PATH),
model_type=CFG.CODELLAMA.MODEL_TYPE,
config={
"max_new_tokens": CFG.MAX_NEW_TOKENS,
"temperature": CFG.TEMPERATURE,
"repetition_penalty": CFG.REPETITION_PENALTY,
"context_length": CFG.CONTEXT_LENGTH,
},
callbacks=[StreamingStdOutCallbackHandler()],
)
logging.info("Model loaded")
return model
def get_prompt(query: str) -> str:
"""
Generate a prompt based on Llama-2 prompt template.
Args:
query (str): The coding problem.
Returns:
str: The prompt.
"""
template = """[INST] Write code to solve the following coding problem that obeys \
the constraints and passes the example test cases. Please wrap your code answer \
using ```:
{query}
[/INST]"""
return template.format(query=query)
| [
"[INST] Write code to solve the following coding problem that obeys the constraints and passes the example test cases. Please wrap your code answer using ```:\n{query}\n[/INST]"
] |
2024-01-10 | kesamet/ai-assistant | streamlit_app~ai_agents.py | import streamlit as st
from langchain.schema import HumanMessage, AIMessage
from src.agents import build_agent
def init_messages() -> None:
clear_button = st.sidebar.button("Clear Conversation", key="ai_agents")
if clear_button or "aa_messages" not in st.session_state:
st.session_state.aa_messages = []
def get_output(user_input: str, messages: list) -> str:
agent = build_agent(messages)
try:
return agent.run(user_input)
except Exception:
return "GoogleGenerativeAI is not available. Did you provide an API key?"
def ai_agents():
st.sidebar.title("AI Agents")
st.sidebar.info(
"AI Assistant is powered by text-bison and has access to wikipedia, search, "
"News API, Wolfram and calculator tools."
)
init_messages()
# Display chat history
for message in st.session_state.aa_messages:
if isinstance(message, HumanMessage):
with st.chat_message("user"):
st.markdown(message.content)
elif isinstance(message, AIMessage):
with st.chat_message("assistant"):
st.markdown(message.content)
if user_input := st.chat_input("Your input"):
with st.chat_message("user"):
st.markdown(user_input)
with st.chat_message("assistant"):
with st.spinner("Thinking ..."):
output = get_output(user_input, st.session_state.aa_messages)
st.markdown(output)
st.session_state.aa_messages.append(HumanMessage(content=user_input))
st.session_state.aa_messages.append(AIMessage(content=output))
| [] |
2024-01-10 | tayanzhuifeng/DocsGPT | scripts~parser~py2doc.py | import os
import ast
import tiktoken
from pathlib import Path
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
def find_files(directory):
files_list = []
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith('.py'):
files_list.append(os.path.join(root, file))
return files_list
def extract_functions(file_path):
with open(file_path, 'r') as file:
source_code = file.read()
functions = {}
tree = ast.parse(source_code)
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef):
func_name = node.name
func_def = ast.get_source_segment(source_code, node)
functions[func_name] = func_def
return functions
def extract_classes(file_path):
with open(file_path, 'r') as file:
source_code = file.read()
classes = {}
tree = ast.parse(source_code)
for node in ast.walk(tree):
if isinstance(node, ast.ClassDef):
class_name = node.name
function_names = []
for subnode in ast.walk(node):
if isinstance(subnode, ast.FunctionDef):
function_names.append(subnode.name)
classes[class_name] = ", ".join(function_names)
return classes
def extract_functions_and_classes(directory):
files = find_files(directory)
functions_dict = {}
classes_dict = {}
for file in files:
functions = extract_functions(file)
if functions:
functions_dict[file] = functions
classes = extract_classes(file)
if classes:
classes_dict[file] = classes
return functions_dict, classes_dict
def parse_functions(functions_dict, formats, dir):
c1 = len(functions_dict)
for i, (source, functions) in enumerate(functions_dict.items(), start=1):
print(f"Processing file {i}/{c1}")
source_w = source.replace(dir+"/", "").replace("."+formats, ".md")
subfolders = "/".join(source_w.split("/")[:-1])
Path(f"outputs/{subfolders}").mkdir(parents=True, exist_ok=True)
for j, (name, function) in enumerate(functions.items(), start=1):
print(f"Processing function {j}/{len(functions)}")
prompt = PromptTemplate(
input_variables=["code"],
template="Code: \n{code}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(code=function))
mode = "a" if Path(f"outputs/{source_w}").exists() else "w"
with open(f"outputs/{source_w}", mode) as f:
f.write(f"\n\n# Function name: {name} \n\nFunction: \n```\n{function}\n```, \nDocumentation: \n{response}")
def parse_classes(classes_dict, formats, dir):
c1 = len(classes_dict)
for i, (source, classes) in enumerate(classes_dict.items()):
print(f"Processing file {i+1}/{c1}")
source_w = source.replace(dir+"/", "").replace("."+formats, ".md")
subfolders = "/".join(source_w.split("/")[:-1])
Path(f"outputs/{subfolders}").mkdir(parents=True, exist_ok=True)
for name, function_names in classes.items():
print(f"Processing Class {i+1}/{c1}")
prompt = PromptTemplate(
input_variables=["class_name", "functions_names"],
template="Class name: {class_name} \nFunctions: {functions_names}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(class_name=name, functions_names=function_names))
with open(f"outputs/{source_w}", "a" if Path(f"outputs/{source_w}").exists() else "w") as f:
f.write(f"\n\n# Class name: {name} \n\nFunctions: \n{function_names}, \nDocumentation: \n{response}")
def transform_to_docs(functions_dict, classes_dict, formats, dir):
docs_content = ''.join([str(key) + str(value) for key, value in functions_dict.items()])
docs_content += ''.join([str(key) + str(value) for key, value in classes_dict.items()])
num_tokens = len(tiktoken.get_encoding("cl100k_base").encode(docs_content))
total_price = ((num_tokens / 1000) * 0.02)
print(f"Number of Tokens = {num_tokens:,d}")
print(f"Approx Cost = ${total_price:,.2f}")
user_input = input("Price Okay? (Y/N)\n").lower()
if user_input == "y" or user_input == "":
if not Path("outputs").exists():
Path("outputs").mkdir()
parse_functions(functions_dict, formats, dir)
parse_classes(classes_dict, formats, dir)
print("All done!")
else:
print("The API was not called. No money was spent.") | [
"Code: \n{code}, \nDocumentation: ",
"functions_names",
"class_name",
"Class name: {class_name} \nFunctions: {functions_names}, \nDocumentation: "
] |
2024-01-10 | tayanzhuifeng/DocsGPT | scripts~code_docs_gen.py | from pathlib import Path
from langchain.text_splitter import CharacterTextSplitter
import faiss
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import pickle
import dotenv
import tiktoken
import sys
from argparse import ArgumentParser
import ast
dotenv.load_dotenv()
ps = list(Path("inputs").glob("**/*.py"))
data = []
sources = []
for p in ps:
with open(p) as f:
data.append(f.read())
sources.append(p)
# with open('inputs/client.py', 'r') as f:
# tree = ast.parse(f.read())
# print(tree)
def get_functions_in_class(node):
functions = []
functions_code = []
for child in node.body:
if isinstance(child, ast.FunctionDef):
functions.append(child.name)
functions_code.append(ast.unparse(child))
return functions, functions_code
def get_classes_and_functions(source_code):
tree = ast.parse(source_code)
classes = {}
for node in tree.body:
if isinstance(node, ast.ClassDef):
class_name = node.name
function_name, function = get_functions_in_class(node)
# join function name and function code
functions = dict(zip(function_name, function))
classes[class_name] = functions
return classes
structure_dict = {}
c1 = 0
for code in data:
classes = get_classes_and_functions(ast.parse(code))
source = str(sources[c1])
structure_dict[source] = classes
c1 += 1
# save the structure dict as json
import json
with open('structure_dict.json', 'w') as f:
json.dump(structure_dict, f)
# llm = OpenAI(temperature=0)
# prompt = PromptTemplate(
# input_variables=["code"],
# template="Code: {code}, Documentation: ",
# )
#
# print(prompt.format(code="print('hello world')"))
# print(llm(prompt.format(code="print('hello world')")))
if not Path("outputs").exists():
Path("outputs").mkdir()
c1 = len(structure_dict)
c2 = 0
for source, classes in structure_dict.items():
c2 += 1
print(f"Processing file {c2}/{c1}")
f1 = len(classes)
f2 = 0
for class_name, functions in classes.items():
f2 += 1
print(f"Processing class {f2}/{f1}")
source_w = source.replace("inputs/", "")
source_w = source_w.replace(".py", ".txt")
if not Path(f"outputs/{source_w}").exists():
with open(f"outputs/{source_w}", "w") as f:
f.write(f"Class: {class_name}")
else:
with open(f"outputs/{source_w}", "a") as f:
f.write(f"\n\nClass: {class_name}")
# append class name to the front
for function in functions:
b1 = len(functions)
b2 = 0
print(f"Processing function {b2}/{b1}")
b2 += 1
prompt = PromptTemplate(
input_variables=["code"],
template="Code: \n{code}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(code=functions[function]))
if not Path(f"outputs/{source_w}").exists():
with open(f"outputs/{source_w}", "w") as f:
f.write(f"Function: {functions[function]}, \nDocumentation: {response}")
else:
with open(f"outputs/{source_w}", "a") as f:
f.write(f"\n\nFunction: {functions[function]}, \nDocumentation: {response}")
| [
"Code: \n{code}, \nDocumentation: "
] |
2024-01-10 | tayanzhuifeng/DocsGPT | application~parser~py2doc.py | import os
import ast
import tiktoken
from pathlib import Path
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
def find_files(directory):
files_list = []
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith('.py'):
files_list.append(os.path.join(root, file))
return files_list
def extract_functions(file_path):
with open(file_path, 'r') as file:
source_code = file.read()
functions = {}
tree = ast.parse(source_code)
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef):
func_name = node.name
func_def = ast.get_source_segment(source_code, node)
functions[func_name] = func_def
return functions
def extract_classes(file_path):
with open(file_path, 'r') as file:
source_code = file.read()
classes = {}
tree = ast.parse(source_code)
for node in ast.walk(tree):
if isinstance(node, ast.ClassDef):
class_name = node.name
function_names = []
for subnode in ast.walk(node):
if isinstance(subnode, ast.FunctionDef):
function_names.append(subnode.name)
classes[class_name] = ", ".join(function_names)
return classes
def extract_functions_and_classes(directory):
files = find_files(directory)
functions_dict = {}
classes_dict = {}
for file in files:
functions = extract_functions(file)
if functions:
functions_dict[file] = functions
classes = extract_classes(file)
if classes:
classes_dict[file] = classes
return functions_dict, classes_dict
def parse_functions(functions_dict, formats, dir):
c1 = len(functions_dict)
for i, (source, functions) in enumerate(functions_dict.items(), start=1):
print(f"Processing file {i}/{c1}")
source_w = source.replace(dir+"/", "").replace("."+formats, ".md")
subfolders = "/".join(source_w.split("/")[:-1])
Path(f"outputs/{subfolders}").mkdir(parents=True, exist_ok=True)
for j, (name, function) in enumerate(functions.items(), start=1):
print(f"Processing function {j}/{len(functions)}")
prompt = PromptTemplate(
input_variables=["code"],
template="Code: \n{code}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(code=function))
mode = "a" if Path(f"outputs/{source_w}").exists() else "w"
with open(f"outputs/{source_w}", mode) as f:
f.write(f"\n\n# Function name: {name} \n\nFunction: \n```\n{function}\n```, \nDocumentation: \n{response}")
def parse_classes(classes_dict, formats, dir):
c1 = len(classes_dict)
for i, (source, classes) in enumerate(classes_dict.items()):
print(f"Processing file {i+1}/{c1}")
source_w = source.replace(dir+"/", "").replace("."+formats, ".md")
subfolders = "/".join(source_w.split("/")[:-1])
Path(f"outputs/{subfolders}").mkdir(parents=True, exist_ok=True)
for name, function_names in classes.items():
print(f"Processing Class {i+1}/{c1}")
prompt = PromptTemplate(
input_variables=["class_name", "functions_names"],
template="Class name: {class_name} \nFunctions: {functions_names}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(class_name=name, functions_names=function_names))
with open(f"outputs/{source_w}", "a" if Path(f"outputs/{source_w}").exists() else "w") as f:
f.write(f"\n\n# Class name: {name} \n\nFunctions: \n{function_names}, \nDocumentation: \n{response}")
def transform_to_docs(functions_dict, classes_dict, formats, dir):
docs_content = ''.join([str(key) + str(value) for key, value in functions_dict.items()])
docs_content += ''.join([str(key) + str(value) for key, value in classes_dict.items()])
num_tokens = len(tiktoken.get_encoding("cl100k_base").encode(docs_content))
total_price = ((num_tokens / 1000) * 0.02)
print(f"Number of Tokens = {num_tokens:,d}")
print(f"Approx Cost = ${total_price:,.2f}")
user_input = input("Price Okay? (Y/N)\n").lower()
if user_input == "y" or user_input == "":
if not Path("outputs").exists():
Path("outputs").mkdir()
parse_functions(functions_dict, formats, dir)
parse_classes(classes_dict, formats, dir)
print("All done!")
else:
print("The API was not called. No money was spent.") | [
"Code: \n{code}, \nDocumentation: ",
"functions_names",
"class_name",
"Class name: {class_name} \nFunctions: {functions_names}, \nDocumentation: "
] |
2024-01-10 | VarunThejaT/palate | speech_to_text~audio_to_text.py | from pydub import AudioSegment
import math
import openai
import os
import tiktoken
from dotenv import load_dotenv
load_dotenv()
from nr_openai_observability import monitor
monitor.initialization()
def num_tokens_from_string(string: str, encoding_name: str) -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
# Set up the OpenAI API client
openai.api_key = os.getenv("OPENAI_API_KEY")
file_location = "/home/varun/Downloads/TheTimFerrissShow_Eric Cressey.mp3"
file_stats = os.stat(file_location)
print(file_stats)
print(f'File Size in Bytes is {file_stats.st_size}')
sound = AudioSegment.from_mp3(file_location)
MBBytes=2**20
num_slices=math.ceil(file_stats.st_size/MBBytes/25)
print(f"num slices: {num_slices}")
#whisper
transcription_arr = []
slice_len = len(sound) / num_slices
for i in range(num_slices):
if i==num_slices-1:
new = sound[i*slice_len:]
else:
new = sound[i*slice_len:(i+1)*slice_len]
# writing mp3 files is a one liner
new.export("file_"+ str(i) +".mp3", format="mp3")
file = open("file_"+ str(i) +".mp3", "rb")
print("calling whisper API")
part_transcription = openai.Audio.transcribe("whisper-1", file)
transcription_arr.append(part_transcription.text)
#Full text
full_transcription = "".join(transcription_arr)
words_of_transcription = full_transcription.split(" ")
GPT_slices=math.ceil(num_tokens_from_string(str(full_transcription), "cl100k_base")/3000)
#summarize full text
summarization=[]
for i in range(GPT_slices):
messages = [ {"role": "system", "content":
"You are a summarization machine for a podcast."} ]
message="Convert the text given by ```" + full_transcription[i*len(full_transcription)//GPT_slices:(i+1)*len(full_transcription)//GPT_slices] + " ``` into a " + str(2000//GPT_slices) + " word text"
if message:
messages.append(
{"role": "user", "content": message},
)
print("Calling chatgpt")
chat = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=messages
)
reply = chat.choices[0].message.content
print("Number of words:" + str(len(reply.split())))
print(f"ChatGPT: {reply}")
messages.append({"role": "assistant", "content": reply})
summarization.append(reply)
summary = "".join(summarization)
words_of_summary = summary.split(" ")
mins = 5
summarization=[]
messages = [ {"role": "system", "content":
"You are a text to podcast machine."} ]
message="Can you give me a text that is written in a way that David Attenborough would read it? Start with a catch phrase and mention 'Palate' as Sponsor. Make the podcast "+ str(mins*132) +" words long. \\ Content: ```" + summary + "```"
if message:
messages.append(
{"role": "user", "content": message},
)
print("Calling chatgpt")
chat = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=messages
)
reply = chat.choices[0].message.content
print("Number of words:" + str(len(reply.split())))
print(f"ChatGPT: {reply}")
#write to text
with open('TheTimFerrissShow_Eric_Cressey_'+ str(mins) +'_mins.txt', 'w') as f:
f.write(reply)
| [
"You are a text to podcast machine.",
"You are a summarization machine for a podcast."
] |
2024-01-10 | VarunThejaT/palate | speech_to_text~text_to_text.py | import math
import requests
import tiktoken
from newspaper import fulltext
import openai
from dotenv import load_dotenv
load_dotenv()
# Set up the OpenAI API client
openai.api_key = os.getenv("OPENAI_API_KEY")
html = requests.get("https://www.brookings.edu/research/how-artificial-intelligence-is-transforming-the-world/").text
text=fulltext(html)
full_transcription = text
print(full_transcription)
def num_tokens_from_string(string: str, encoding_name: str) -> int:
"""Returns the number of tokens in a text string."""
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
GPT_slices = math.ceil(num_tokens_from_string(str(text), "cl100k_base")/3000)
GPT_slices
summarization=[]
for i in range(GPT_slices):
messages = [ {"role": "system", "content":
"You are a summarization machine for a podcast."} ]
message = "Convert the text given by ```" + text[i*len(text)//GPT_slices:(i+1)*len(full_transcription)//GPT_slices] + " ``` into a " + str(2000//GPT_slices) + " word text"
if message:
messages.append(
{"role": "user", "content": message},
)
chat = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=messages
)
reply = chat.choices[0].message.content
messages.append({"role": "assistant", "content": reply})
summarization.append(reply)
summary = "".join(summarization)
words_of_summary = summary.split(" ")
mins=3
messages = [ {"role": "system", "content":
"You are a text to podcast machine."} ]
message="Can you give me a text that is written in a way that David Attenborough would read it? Start with a catch phrase and mention 'Palate' as Sponsor. Make the podcast "+ str(mins*132) +" words long. \\ Content: ```" + summary + "```"
if message:
messages.append(
{"role": "user", "content": message},
)
chat = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=messages
)
reply = chat.choices[0].message.content
print("Number of words:" + str(len(reply.split())))
print(f"ChatGPT: {reply}")
messages.append({"role": "assistant", "content": reply})
with open('david_AI_article_'+ str(mins) +'_mins.txt', 'w') as f:
f.write(reply) | [
"You are a text to podcast machine.",
"You are a summarization machine for a podcast."
] |
2024-01-10 | VarunThejaT/palate | speech_to_text~whisper_api_usage.py | import os
import openai
from dotenv import load_dotenv
load_dotenv()
# Set up the OpenAI API client
openai.api_key = os.getenv("OPENAI_API_KEY")
file = open("/home/varun/Downloads/TheTimFerrissShow_Eric Cressey.mp3", "rb")
transcription = openai.Audio.transcribe("whisper-1", file)
print(transcription)
| [] |
2024-01-10 | VarunThejaT/palate | speech_to_text~new_relic_example.py | import os
import openai
from dotenv import load_dotenv
from nr_openai_observability import monitor
load_dotenv()
monitor.initialization()
# Set up the OpenAI API client
openai.api_key = os.getenv("OPENAI_API_KEY")
file = open("/home/varun/Downloads/david.mp3", "rb")
transcription = openai.Audio.transcribe("whisper-1", file)
print(transcription)
# import os
# import openai
# from nr_openai_observability import monitor
# from dotenv import load_dotenv
# load_dotenv()
# os.environ["NEW_RELIC_LICENSE_KEY"] = "40efa7bf334917db4467aba7398ff2d7267aNRAL"
# monitor.initialization()
# openai.api_key = os.getenv("OPENAI_API_KEY")
# openai.Completion.create(
# model="text-davinci-003",
# prompt="What is Observability?",
# max_tokens=20,
# temperature=0
# )
| [] |
2024-01-10 | RekhuGopal/OpenAIHacks | QnA~QnA.py | import os
import openai
os.environ["OPENAI_API_KEY"] = "Your Open AI API Key"
openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.Completion.create(
model="text-davinci-003",
prompt="Q: who is elon musk?\n A:",
temperature=0,
max_tokens=100,
top_p=1,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=["\n"]
)
#print(response)
print(response['choices'][0]['text']) | [
"Q: who is elon musk?\n A:"
] |
2024-01-10 | RekhuGopal/OpenAIHacks | DALL-E-2~VariationImage.py | import os
import openai
os.environ["OPENAI_API_KEY"] = "Your Open AI API Key"
openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.Image.create_variation(
image=open("corgi_and_cat_paw.png", "rb"),
n=1,
size="1024x1024"
)
image_url = response['data'][0]['url'] | [] |
2024-01-10 | RekhuGopal/OpenAIHacks | DALL-E-2~EditImage.py | import os
import openai
os.environ["OPENAI_API_KEY"] = "Your Open AI API Key"
openai.api_key = os.getenv("OPENAI_API_KEY")
response = openai.Image.create_edit(
image=open("sunlit_lounge.png", "rb"),
mask=open("mask.png", "rb"),
prompt="A sunlit indoor lounge area with a pool containing a flamingo",
n=1,
size="1024x1024"
)
image_url = response['data'][0]['url'] | [] |
2024-01-10 | 5l1v3r1/GPT-Autonomous-SearchAgent | research_agent_custom.py | import openai
import arxiv
# Set up the OpenAI API
openai.api_key = "sk-" # Replace the string content with your OpenAI API key
"""
Wrap the OpenAI API call in this function
"""
def getResponse(prompt):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
temperature=0, # We want consistent behavior, so we set a very low temperature
messages=[
{"role": "system", "content": "You're a helpful assistant. Carefully follow the user's instructions."},
{"role": "user", "content": prompt}
]
)
response = str(response['choices'][0]['message']['content'])
return response
"""
Use GPT to determine the action to take by giving it the objective, memory, and tools.
If it think it has finished the objective, just give the answer.
If it needs more info, it will pick the tool to get the relevant information based on the tool description.
"""
def determineAction(objective, memory, tools):
formattedPrompt = f"""Determine if the following memory is enough to answer\n
the user's objective. Your past actions are stored in the memory for reference\n
If it is enough, answer the question in the format: 'FINAL ANSWER: '. \n
If the memory is not enough, you can use a tool in the available tools section\n
to get more information. When using a tool you should use this format: \n
'USE :'. If no tool can help you achieve the user's \n
objective, then answer 'FINAL: CANNOT ANSWER'.
```Objective
Answer: {objective}
```
```Memory
{memory}
```
```Available Tools
{tools}
```
"""
response = getResponse(formattedPrompt)
(finished, result, memory) = parseResponse(response, memory, tools)
return (finished, result, memory)
"""
Parse the response from GPT to determine if the objective is finished.
If it is finished, just give the final answer.
If the objective cannot be finished with the context and tools, it will say it cannot answer
If GPT picks a tool, execute the tool and save the result of the tool in memory.
"""
def parseResponse(response, memory, tools):
finished = False
if response.startswith('FINAL ANSWER:'):
finished = True
memory.append(response)
return (finished, response, memory)
elif response == 'FINAL: CANNOT ANSWER':
finished = True
memory.append(response)
return (finished, response, memory)
elif response.startswith('USE:'):
# split the string using ':' as the delimiter
parsed_str = response.split(':')
# 'USE: searchArxiv with the search key word "ReAct reasoning and acting in language models" to gather more information.'
# get the tool name and parameter
tool_name = parsed_str[1].split(" ")[1]
parameter = parsed_str[1]
print("THOUGHT: " + response)
memory.append("THOUGHT: " + response)
result = executeTool(tool_name, parameter, tools)
new_memory = "OBSERVATION: " + str(result)
print(new_memory)
memory.append(new_memory)
return (finished, result, memory)
"""
Execute the tool that GPT picks using the parameter it gives.
Returns the execution result so that GPT can have the relevant info.
"""
def executeTool(tool_name, parameter, tools):
# Find the tool with the given name
tool = None
for t in tools:
if t['tool_name'] == tool_name:
tool = t
break
# If the tool is found, execute its function with the given parameter
if tool:
return tool['function_name'](parameter)
else:
return "Tool not found"
"""
Wrap the search arxiv function as a tool for GPT
Input is a search keyword
Output is a list of dictionaries with title, published date, authors, and summary of papers
"""
def searchArxiv(keyword):
# Perform a search with the given query
search = arxiv.Search(query=keyword, max_results=3)
# Get the metadata for each result and extract relevant information
results = []
for result in search.results():
title = result.title
published_date = result.published.strftime("%Y-%m-%d")
authors = ", ".join(author.name for author in result.authors)
summary = result.summary
# Store the extracted information as a dictionary
results.append((
"title: " + title,
"published_date: " + published_date,
"authors: " + authors,
"summary: " + summary
))
# Return the list of tuples containing the result information
return results
"""
Initialize memory, tools for the GPT agent.
Ask for a user objective and let it run iteratively untill the objective is achieved.
As a safety measure, it will also stop after 5 iterations just in case things go wrong.
"""
def startAgent():
objective = input("What is your research question? ")
# For simplicity, we will just use a list to store every thing.
# For production, you will probably use vector databases.
memory = []
tools = [{'tool_name': 'searchArxiv',
'description': """You can use this tool to search for scientific papers on Arxiv. The response will have title, author, published date, and summary.""",
'function_name': searchArxiv,
'parameter': 'search key word'}]
n = 0
while True:
(finished, result, memory) = determineAction(objective, memory, tools)
n += 1
if finished:
print(result)
return
if n > 2:
print("Ended for reaching limit.")
return
# What is ReAct reasoning and acting in language models?
startAgent()
| [
"You're a helpful assistant. Carefully follow the user's instructions.",
"Determine if the following memory is enough to answer\n\n the user's objective. Your past actions are stored in the memory for reference\n\n If it is enough, answer the question in the format: 'FINAL ANSWER: '. \n\n If the memory is not enough, you can use a tool in the available tools section\n\n to get more information. When using a tool you should use this format: \n\n 'USE :'. If no tool can help you achieve the user's \n\n objective, then answer 'FINAL: CANNOT ANSWER'.\n\n ```Objective\n Answer: PLACEHOLDER\n ```\n\n ```Memory\n PLACEHOLDER\n ```\n\n ```Available Tools\n PLACEHOLDER\n ```\n\n "
] |
2024-01-10 | 5l1v3r1/GPT-Autonomous-SearchAgent | research_agent_langchain.py | # langchan research agent
import openai
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.agents import Tool
from langchain.chat_models import ChatOpenAI
from langchain.utilities import ArxivAPIWrapper
# Set up the OpenAI API
openai.api_key = "sk-" # Replace the string content with your OpenAI API key
llm = ChatOpenAI(temperature=0) # Initialize the LLM to be used
arxiv = ArxivAPIWrapper()
arxiv_tool = Tool(
name="arxiv_search",
description="Search on arxiv. The tool can search a keyword on arxiv for the top papers. It will return publishing date, title, authors, and summary of the papers.",
func=arxiv.run
)
tools = [arxiv_tool]
agent_chain = initialize_agent(
tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
agent_chain.run("What is ReAct reasoning and acting in language models?")
| [] |
2024-01-10 | zhch158/langchain-Study | study~study01.py | import os
from utils import setup_workdir
from utils import env
from langchain.llms import OpenAI
import openai
question = "What would be a good company name for a company that makes colorful socks?"
# context = ""
# model="text-davinci-003"
# max_tokens=150
# response = openai.Completion.create(
# prompt=f"Answer the question based on the context below, and if the question can't be answered based on the context, say \"I don't know\"\n\nContext: {context}\n\n---\n\nQuestion: {question}\nAnswer:",
# temperature=0,
# max_tokens=max_tokens,
# top_p=1,
# frequency_penalty=0,
# presence_penalty=0,
# stop=None,
# model=model,
# )
# print(response)
llm = OpenAI(temperature=0)
print(llm(question))
| [] |
2024-01-10 | zhch158/langchain-Study | study~datalevel.py | import os
from langchain.chat_models import ChatOpenAI as OpenAI
import openai
from langchain.embeddings import HuggingFaceEmbeddings
from llama_index import LLMPredictor, SimpleDirectoryReader, VectorStoreIndex, LangchainEmbedding, ServiceContext, Document
import logging
import sys
# 从同级目录下的utils目录中导入setup_workdir和setup_env函数
# sys.path.insert(0, os.path.expanduser("~")+"/langchain-ChatGLM")
sys.path.insert(0, os.path.dirname(__file__) + "/..")
from utils.base import setup_env, setup_workdir
LOG_FORMAT = "%(levelname) -5s %(asctime)s" "-1d: %(message)s"
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format=LOG_FORMAT, encoding='utf-8')
# logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
# get_data_level函数说明如下:
# 1. 从data_level.txt中读取数据,每一行为一个文档,每个文档之间用\n\n分割
# 2. 通过LangchainEmbedding加载模型,这里使用的是sentence-transformers/paraphrase-multilingual-mpnet-base-v2
# 3. 通过VectorStoreIndex.from_documents构建索引
# 4. 通过index.as_query_engine构建query_engine
# 5. 通过query_engine.query(query)进行查询
def get_data_level(query):
# define LLM model
llm_model = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo-0613", max_tokens=2048))
# embed_model = LangchainEmbedding(
# HuggingFaceEmbeddings(model_name="sentence-transformers/paraphrase-multilingual-mpnet-base-v2"))
embed_model = LangchainEmbedding(
HuggingFaceEmbeddings(model_name="GanymedeNil/text2vec-large-chinese"))
service_context = ServiceContext.from_defaults(
embed_model=embed_model,
llm_predictor=llm_model,
)
# documents = SimpleDirectoryReader(input_files=['data_level.txt']).load_data()
texts = open('data_level.txt', 'r', encoding='utf-8').read().split('\n\n')
# 生成Document对象,过滤空文档
documents = list()
for text in texts:
text=text.strip()
if text != '':
documents.append(Document(text))
index = VectorStoreIndex.from_documents(documents, service_context=service_context)
# index.storage_context.persist()
query_engine = index.as_query_engine(similarity_top_k=3)
result = query_engine.query(query)
return result
if __name__ == '__main__':
# os.environ['OPENAI_API_KEY'] = ""
# openai.api_key = os.environ['OPENAI_API_KEY']
setup_workdir(os.path.dirname(__file__))
setup_env()
query = "请说明客户信息表中,身份证号,吸烟史,是否患有糖尿病等属性属于什么安全级别?"
results = get_data_level(query)
print("======================")
print(results)
| [] |
2024-01-10 | GAIR-NLP/factool | factool~knowledge_qa~google_serper.py | # The following code was adapted from https://github.com/hwchase17/langchain/blob/master/langchain/utilities/google_serper.py
"""Util that calls Google Search using the Serper.dev API."""
import pdb
import requests
import asyncio
import aiohttp
import yaml
import os
from factool.env_config import factool_env_config
# env
# serper_api_key = factool_env_config.serper_api_key
class GoogleSerperAPIWrapper():
"""Wrapper around the Serper.dev Google Search API.
You can create a free API key at https://serper.dev.
To use, you should have the environment variable ``SERPER_API_KEY``
set with your API key, or pass `serper_api_key` as a named parameter
to the constructor.
Example:
.. code-block:: python
from langchain import GoogleSerperAPIWrapper
google_serper = GoogleSerperAPIWrapper()
"""
def __init__(self, snippet_cnt = 10) -> None:
self.k = snippet_cnt
self.gl = "us"
self.hl = "en"
self.serper_api_key = os.environ.get("SERPER_API_KEY", None)
assert self.serper_api_key is not None, "Please set the SERPER_API_KEY environment variable."
assert self.serper_api_key != '', "Please set the SERPER_API_KEY environment variable."
async def _google_serper_search_results(self, session, search_term: str, gl: str, hl: str) -> dict:
headers = {
"X-API-KEY": self.serper_api_key or "",
"Content-Type": "application/json",
}
params = {"q": search_term, "gl": gl, "hl": hl}
async with session.post(
"https://google.serper.dev/search", headers=headers, params=params, raise_for_status=True
) as response:
return await response.json()
def _parse_results(self, results):
snippets = []
if results.get("answerBox"):
answer_box = results.get("answerBox", {})
if answer_box.get("answer"):
element = {"content":answer_box.get("answer"),"source":"None"}
return [element]
elif answer_box.get("snippet"):
element = {"content":answer_box.get("snippet").replace("\n", " "),"source":"None"}
return [element]
elif answer_box.get("snippetHighlighted"):
element = {"content":answer_box.get("snippetHighlighted"),"source":"None"}
return [element]
if results.get("knowledgeGraph"):
kg = results.get("knowledgeGraph", {})
title = kg.get("title")
entity_type = kg.get("type")
if entity_type:
element = {"content":f"{title}: {entity_type}","source":"None"}
snippets.append(element)
description = kg.get("description")
if description:
element = {"content":description,"source":"None"}
snippets.append(element)
for attribute, value in kg.get("attributes", {}).items():
element = {"content":f"{attribute}: {value}","source":"None"}
snippets.append(element)
for result in results["organic"][: self.k]:
if "snippet" in result:
element = {"content":result["snippet"],"source":result["link"]}
snippets.append(element)
for attribute, value in result.get("attributes", {}).items():
element = {"content":f"{attribute}: {value}","source":result["link"]}
snippets.append(element)
if len(snippets) == 0:
element = {"content":"No good Google Search Result was found","source":"None"}
return [element]
# keep only the first k snippets
snippets = snippets[:int(self.k / 2)]
return snippets
async def parallel_searches(self, search_queries, gl, hl):
async with aiohttp.ClientSession() as session:
tasks = [self._google_serper_search_results(session, query, gl, hl) for query in search_queries]
search_results = await asyncio.gather(*tasks, return_exceptions=True)
return search_results
async def run(self, queries):
"""Run query through GoogleSearch and parse result."""
flattened_queries = []
for sublist in queries:
if sublist is None:
sublist = ['None', 'None']
for item in sublist:
flattened_queries.append(item)
results = await self.parallel_searches(flattened_queries, gl=self.gl, hl=self.hl)
snippets_list = []
for i in range(len(results)):
snippets_list.append(self._parse_results(results[i]))
snippets_split = [snippets_list[i] + snippets_list[i+1] for i in range(0, len(snippets_list), 2)]
return snippets_split
if __name__ == "__main__":
search = GoogleSerperAPIWrapper()
print(asyncio.run(search.run("What is the capital of the United States?"))) | [
"\n",
"snippet",
"snippetHighlighted",
" ",
"PLACEHOLDER: PLACEHOLDER",
"answer",
"No good Google Search Result was found"
] |
2024-01-10 | GAIR-NLP/factool | factool~utils~base~pipeline.py | import yaml
from factool.utils.openai_wrapper import OpenAIChat
import os
import pathlib
class pipeline():
def __init__(self, domain, foundation_model):
#if foundation_model == 'gpt-3.5-turbo' or foundation_model == 'gpt-4':
self.company = 'openai'
self.chat = OpenAIChat(model_name=foundation_model)
self.prompts_path = os.path.join(os.path.dirname(pathlib.Path(__file__)), "../prompts/")
with open(os.path.join(self.prompts_path, "self_check.yaml"), 'r') as file:
data = yaml.load(file, Loader=yaml.FullLoader)
self.self_check_prompt = data[domain] | [] |
2024-01-10 | GAIR-NLP/factool | factool~utils~claim_extractor.py | import os
import pathlib
import openai
import yaml
import json
import asyncio
from tqdm import tqdm
from factool.env_config import factool_env_config
# env
# openai.api_key = factool_env_config.openai_api_key
config = {
'model_name': 'gpt-3.5-turbo',
'max_tokens': 2000,
'temperature': 0.0,
'top_p': 1,
'frequency_penalty': 0.0,
'presence_penalty': 0.0,
'n': 1
}
# Make api calls asynchronously
async def run_api(messages):
async def single_run(message):
output = openai.ChatCompletion.create(
model=config['model_name'],
messages=message,
max_tokens=config['max_tokens'],
temperature=config['temperature'],
top_p=config['top_p'],
frequency_penalty=config['frequency_penalty'],
presence_penalty=config['presence_penalty'],
n=config['n'],
)
return output.choices[0].message.content.strip()
responses = [single_run(messages[index]) for index in range(len(messages))]
return await asyncio.gather(*responses)
# Import data from scientific.json
scientific_list = []
with open("../datasets/scientific/scientific.json", "r") as f:
data = json.load(f)
for dict_data in data:
cur_dict = {'dataset_name': 'scientific',
'question': dict_data["question"],
'factual_response': dict_data['factual_response']}
scientific_list.append(cur_dict)
# Apply template prompt
with open("./prompts/claim_extraction.yaml") as f:
data = yaml.load(f, Loader=yaml.FullLoader)
prompt = data['scientific']
messages_list = [
[
{"role": "system", "content": prompt['system']},
{"role": "user", "content": prompt['user'].format(input=sample['factual_response'])},
]
for sample in scientific_list
]
assert len(messages_list) == len(scientific_list), "The data length is different"
# Run the API to get the output
print("begin claims extraction...")
results = asyncio.run(run_api(messages_list))
for i in range(len(scientific_list)):
scientific_list[i]["claims"] = results[i]
with open('../datasets/scientific/scientific_claims.json', 'w') as f:
json.dump(scientific_list, f, indent=4)
"""
The scientific_claims.json file saved by the above code may have format problems, here are some adjustments
"""
with open("../datasets/scientific/scientific_claims.json", "r") as f:
data = json.load(f)
for data_i in tqdm(data, total=len(data)):
try:
data_i["claims"] = json.loads(data_i["claims"].strip())
except:
print(data_i["claims"])
continue
with open("../datasets/scientific/scientific_claims.json", "w") as f:
json.dump(data, f, indent=4)
| [
"scientific",
"factual_response"
] |
2024-01-10 | evanwrm/solidchain | apps~api~src~solidchain~utils~callbacks.py | import sys
from typing import Any, Dict, List, Union
from langchain.callbacks.base import BaseCallbackHandler
from langchain.schema import AgentAction, AgentFinish, LLMResult
class FastAPIStreamCallbackHandler(BaseCallbackHandler):
"""Callback handler for streaming. Only works with LLMs that support streaming."""
@property
def always_verbose(self) -> bool:
"""Whether to call verbose callbacks even if verbose is False."""
return True
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when LLM starts running."""
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
"""Run on new LLM token. Only available when streaming is enabled."""
sys.stdout.write(token)
sys.stdout.flush()
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
def on_llm_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Run when LLM errors."""
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> None:
"""Run when chain starts running."""
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
def on_chain_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Run when chain errors."""
def on_tool_start(
self, serialized: Dict[str, Any], action: AgentAction, **kwargs: Any
) -> None:
"""Run when tool starts running."""
def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running."""
def on_tool_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> None:
"""Run when tool errors."""
def on_text(self, text: str, **kwargs: Any) -> None:
"""Run on arbitrary text."""
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:
"""Run on agent end."""
| [] |
2024-01-10 | evanwrm/solidchain | apps~api~src~solidchain~api~api_v1~routes~causal.py | from importlib.metadata import version
from typing import Any, List, Optional
from fastapi import APIRouter, Body, Depends
from fastapi.responses import StreamingResponse
from langchain.agents import initialize_agent
from langchain.agents.load_tools import get_all_tool_names, load_tools
from langchain.agents.tools import Tool
from langchain.chains import ConversationChain, VectorDBQA
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.chains.summarize import load_summarize_chain
from pydantic import BaseModel
from sqlalchemy.orm import Session
from solidchain.api.dependencies import get_db
from solidchain.configs.config import settings
from solidchain.models.vectorstore import VectorStore
from solidchain.schemas.agents import Agent, AgentTool
from solidchain.schemas.chains import SummarizeChainType
from solidchain.schemas.text_generation import (
CausalGeneration,
CausalModel,
StreamingCausalGeneration,
)
from solidchain.schemas.vectorstore import VectorStore as VectorStoreSchema
from solidchain.utils import utils as sc_utils
from solidchain.utils.embeddings import get_embeddings_instance
from solidchain.utils.encoding import serialize_response
from solidchain.utils.llms import get_llm_instance
from solidchain.utils.vectorstores import get_vectorstore_instance
router = APIRouter()
@router.post("/generate", response_model=CausalGeneration)
def generate(
*,
text: str = Body(),
modelName: CausalModel = Body("text-curie-001"),
temperature: float = Body(0.7),
maxTokens: int = Body(1024),
streaming: bool = Body(False),
) -> Any:
llm_cls = get_llm_instance(llm_type=modelName)
llm = llm_cls(
model_name=modelName,
temperature=temperature,
max_tokens=maxTokens,
streaming=streaming,
openai_api_key=settings.OPENAI_API_KEY,
)
if streaming:
def streaming_response():
try:
generator = llm.stream(text)
for output in generator:
generation = StreamingCausalGeneration(
text=output["choices"][0]["text"]
)
yield generation.json()
except Exception as e:
generation = StreamingCausalGeneration(text=llm(text))
yield generation.json()
return StreamingResponse(streaming_response())
else:
output = llm(text)
generation = CausalGeneration(
text=output.strip(),
)
return generation
@router.post("/qa", response_model=CausalGeneration)
def qa(
*,
db: Session = Depends(get_db),
text: str = Body(),
modelName: CausalModel = Body("text-curie-001"),
temperature: float = Body(0.7),
maxTokens: int = Body(1024),
agent: Agent = Body("zero-shot-react-description"),
agentPath: str = Body(None),
agentTools: List[str] = Body(["serpapi", "llm-math"]),
chainType: SummarizeChainType = Body("stuff"),
) -> Any:
llm_cls = get_llm_instance(llm_type=modelName)
llm = llm_cls(
model_name=modelName,
temperature=temperature,
max_tokens=maxTokens,
openai_api_key=settings.OPENAI_API_KEY,
)
if agent or agentPath:
base_tools = set(agentTools) & set(get_all_tool_names())
vector_tools = set(agentTools) - base_tools
tools = load_tools(
base_tools, llm=llm, serpapi_api_key=settings.SERPAPI_API_KEY
)
if vector_tools:
vectorstores: List[VectorStoreSchema] = (
db.query(VectorStore)
.filter(VectorStore.vectorstoreId.in_(vector_tools))
.all()
)
for vectorstore_data in vectorstores:
embeddings = get_embeddings_instance(vectorstore_data.embeddingsType)
vectorstore = get_vectorstore_instance(
vectorstore_data.vectorDb,
persist_directory=vectorstore_data.index.path,
embedding_function=embeddings,
)
vectorstore_qachain = VectorDBQA.from_chain_type(
llm=llm, chain_type=chainType, vectorstore=vectorstore
)
tool = Tool(
name=vectorstore_data.name,
description=vectorstore_data.description,
func=vectorstore_qachain.run,
)
tools.append(tool)
if agent:
agent_executor = initialize_agent(
tools=tools,
llm=llm,
agent=agent,
max_iterations=5,
early_stopping_method="generate",
return_intermediate_steps=True,
)
output = agent_executor(text)
elif agentPath:
agent_executor = initialize_agent(
tools=tools,
llm=llm,
agent_path=agentPath,
max_iterations=5,
early_stopping_method="generate",
return_intermediate_steps=True,
)
output = agent_executor(text)
else:
output = llm(text)
generation = CausalGeneration(
text=output["output"].strip(), steps=output["intermediate_steps"]
)
return generation
@router.post("/summarize", response_model=CausalGeneration)
def summarize(
*,
text: str = Body(),
modelName: CausalModel = Body("text-curie-001"),
temperature: float = Body(0.7),
maxTokens: int = Body(1024),
chainType: SummarizeChainType = Body("stuff"),
) -> Any:
llm_cls = get_llm_instance(llm_type=modelName)
llm = llm_cls(
model_name=modelName,
temperature=temperature,
max_tokens=maxTokens,
openai_api_key=settings.OPENAI_API_KEY,
)
chain = load_summarize_chain(llm=llm, chain_type=chainType)
output = chain.run(text)
generation = CausalGeneration(
text=output.strip(),
)
return generation
@router.post("/conversational", response_model=CausalGeneration)
def conversational(
*,
text: str = Body(),
modelName: CausalModel = Body("gpt-3.5-turbo"),
temperature: float = Body(0.7),
maxTokens: int = Body(1024),
) -> Any:
llm_cls = get_llm_instance(llm_type=modelName)
llm = llm_cls(
model_name=modelName,
temperature=temperature,
max_tokens=maxTokens,
openai_api_key=settings.OPENAI_API_KEY,
)
chain = ConversationChain(llm=llm, memory=ConversationBufferMemory())
output = chain.run(text)
generation = CausalGeneration(
text=output.strip(),
)
return generation
| [] |
2024-01-10 | evanwrm/solidchain | apps~api~src~solidchain~utils~llms.py | from langchain.llms import OpenAI, OpenAIChat
from solidchain.configs.config import settings
from solidchain.schemas.text_generation import CausalModel
def get_llm_instance (llm_type: CausalModel):
match llm_type:
case CausalModel.TEXT_DAVINCI_003 | CausalModel.TEXT_CURIE_001 | CausalModel.TEXT_BABBAGE_001 | CausalModel.TEXT_ADA_001:
llm= OpenAI
case CausalModel.GPT_3_5_TURBO:
llm= OpenAIChat
case _:
raise NotImplementedError
return llm | [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.