date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | RKP64/FinGLM | code~%E9%A6%92%E5%A4%B4%E7%A7%91%E6%8A%80~mantoutech~version.py | import camelot
import pdfplumber
import loguru
import langchain
import transformers
import torch
print('camelot=={}'.format(camelot.__version__))
print('pdfplumber=={}'.format(pdfplumber.__version__))
print('loguru=={}'.format(loguru.__version__))
print('langchain=={}'.format(langchain.__version__))
print('transformers=={}'.format(transformers.__version__))
print('torch=={}'.format(torch.__version__)) | [] |
2024-01-10 | zoez1995/steward-star | config.py | ## IMPORT NECESSARY PACKAGES
import os
import environ
from langchain.chains import RetrievalQA
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.chat_models import AzureChatOpenAI
from langchain.schema import HumanMessage, SystemMessage, AIMessage
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts import PromptTemplate
from langchain.llms import AzureOpenAI
# import openai
## LOAD .ENV FILE FOR API CONNECTION
env = environ.Env()
# init Azure OpenAI API
environ.Env.read_env()
OPENAI_API_KEY = env("OPENAI_API_KEY")
AZURE_API_KEY = env("AZURE_API_KEY")
OPENAI_DEPLOYMENT_ENDPOINT = env("OPENAI_DEPLOYMENT_ENDPOINT")
OPENAI_DEPLOYMENT_VERSION = env("OPENAI_DEPLOYMENT_VERSION")
OPENAI_DEPLOYMENT_NAME = env("OPENAI_DEPLOYMENT_NAME")
OPENAI_MODEL_NAME = env("OPENAI_MODEL_NAME")
# OPENAI_ADA_EMBEDDING_DEPLOYMENT_NAME = env("OPENAI_ADA_EMBEDDING_DEPLOYMENT_NAME")
# OPENAI_ADA_EMBEDDING_MODEL_NAME = env("OPENAI_ADA_EMBEDDING_MODEL_NAME")
# openai.api_type = "azure"
# openai.api_version = OPENAI_DEPLOYMENT_VERSION
# openai.api_base = OPENAI_DEPLOYMENT_ENDPOINT
# openai.api_key = AZURE_API_KEY
# model configuration
temp = 0.1
llm = AzureChatOpenAI(
openai_api_key = AZURE_API_KEY,
openai_api_base = OPENAI_DEPLOYMENT_ENDPOINT,
openai_api_version = OPENAI_DEPLOYMENT_VERSION,
deployment_name = OPENAI_DEPLOYMENT_NAME,
model_name = OPENAI_MODEL_NAME,
temperature = temp,
)
| [] |
2024-01-10 | zoez1995/steward-star | viz_page.py | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import streamlit as st
from components import file_container
from config import *
from debuggpt import gpt_debug
import environ
import os
import openai
def viz():
st.header("📊 Monitor your data quality KPI")
## PROMPT
system_prompt = """Assistant is the lead data steward in a company. He monitors the overall data quality performance for the company and can identify
the priority of the data issues, and can provide data quality improvement guidance to data owners."""
instruction = """I will provide you with some data along with some null value summary statistics.
I want you to give me a high level interpretation of the data quality of the data I provided, and point out what should be the prority of the data issues
Some fields in the data sets are critical and can't be Null, for example, in the data, primary keys can't be left blank, while fields that are not primary keys can be left blank.
Focus on the Null value issue first, then look at other data quality issue such as format of the data.
Please bucket the action recommendations into "High", "Medium", and "Low" priority."""
data = file_container()
if data is not None and len(data) > 0:
# Create an empty DataFrame to store the null value percentages
data_prompt = ""
count = 1
for file in data:
file_string = file.to_string()
data_prompt = data_prompt + f'Data set {count} is: ' + file_string + f'That is all for Data set {count}.'
count += 1
null_percentage_combined = pd.DataFrame()
null_count_combined = pd.DataFrame()
# Iterate through the DataFrames and calculate the null value percentage for each
for idx, df in enumerate(data, 1):
null_percentage = df.isnull().mean() * 100
null_percentage_combined[f'Data Source {idx}'] = null_percentage
null_count = df.isnull().sum()
null_count_combined[f'Data Source {idx}'] = null_count
# Reshape the DataFrame for Seaborn
null_percentage_melted = null_percentage_combined.reset_index().melt(id_vars='index', var_name='Data Source', value_name='Percentage')
null_count_melted = null_count_combined.reset_index().melt(id_vars='index', var_name='Data Source', value_name='Count')
null_percentage_melted_string = null_percentage_melted.to_string(index=False)
null_count_string = null_count_combined.to_string()
# Plotting the bar chart with Seaborn
plt.figure(figsize=[8,4])
fig = sns.barplot(x='index', y='Percentage', hue='Data Source', data=null_percentage_melted)
plt.title('Percentage of Null Values in Each Column Across Data Sources')
plt.ylabel('Percentage (%)')
plt.xlabel('Columns')
# Rotate the x-axis labels
plt.xticks(rotation=45)
plt.legend(title='Data Source')
for p in fig.patches:
fig.annotate(format(p.get_height(), '.1f'),
(p.get_x() + p.get_width() / 2., p.get_height()),
ha = 'center', va = 'center',
xytext = (0, 9),
textcoords = 'offset points', fontsize=9)
st.pyplot(plt)
## DISPLAY RESPONSE
if st.button("Check Data Quality KPI Health", type="primary"):
response = llm([
SystemMessage(content=system_prompt),
HumanMessage(content=instruction + "Here's the Null value percentage distribution for each data set: " + null_percentage_melted_string)
]).content
# llm = openai.ChatCompletion.create(**parameters)
# llm_dict = dict(llm)
# content = llm_dict['choices'][0]['message']['content']
st.markdown(response)
| [
"PLACEHOLDERData set PLACEHOLDER is: PLACEHOLDERThat is all for Data set PLACEHOLDER.",
"PLACEHOLDERHere's the Null value percentage distribution for each data set: PLACEHOLDER",
"Assistant is the lead data steward in a company. He monitors the overall data quality performance for the company and can identify \n the priority of the data issues, and can provide data quality improvement guidance to data owners."
] |
2024-01-10 | madhurprash/SageMaker-ML-Projects | job.py | import openai
import streamlit as st
from streamlit_chat import message
# Setting page title and header
st.set_page_config(page_title="AVA", page_icon=":robot_face:")
st.markdown("<h1 style='text-align: center;'>Euler - Apply To NYC Jobs 😬</h1>", unsafe_allow_html=True)
# Set org ID and API key
openai.organization = "org-q4M16BBPGFdigTiaBxIOK5iF"
openai.api_key = "org-q4M16BBPGFdigTiaBxIOK5iF"
# Initialise session state variables
if 'generated' not in st.session_state:
st.session_state['generated'] = []
if 'past' not in st.session_state:
st.session_state['past'] = []
if 'messages' not in st.session_state:
st.session_state['messages'] = [
{"role": "system", "content": "You are a helpful assistant."}
]
if 'model_name' not in st.session_state:
st.session_state['model_name'] = []
if 'cost' not in st.session_state:
st.session_state['cost'] = []
if 'total_tokens' not in st.session_state:
st.session_state['total_tokens'] = []
if 'total_cost' not in st.session_state:
st.session_state['total_cost'] = 0.0
# Sidebar - let user choose model, show total cost of current conversation, and let user clear the current conversation
st.sidebar.title("Sidebar")
model_name = st.sidebar.radio("Choose a model:", ("GPT-3.5", "GPT-4"))
counter_placeholder = st.sidebar.empty()
counter_placeholder.write(f"Total cost of this conversation: ${st.session_state['total_cost']:.5f}")
clear_button = st.sidebar.button("Clear Conversation", key="clear")
# Map model names to OpenAI model IDs
if model_name == "GPT-3.5":
model = "gpt-3.5-turbo"
else:
model = "gpt-4"
# reset everything
if clear_button:
st.session_state['generated'] = []
st.session_state['past'] = []
st.session_state['messages'] = [
{"role": "system", "content": "You are a helpful assistant."}
]
st.session_state['number_tokens'] = []
st.session_state['model_name'] = []
st.session_state['cost'] = []
st.session_state['total_cost'] = 0.0
st.session_state['total_tokens'] = []
counter_placeholder.write(f"Total cost of this conversation: ${st.session_state['total_cost']:.5f}")
# generate a response
def generate_response(prompt):
st.session_state['messages'].append({"role": "user", "content": prompt})
completion = openai.ChatCompletion.create(
model=model,
messages=st.session_state['messages']
)
response = completion.choices[0].message.content
st.session_state['messages'].append({"role": "assistant", "content": response})
# print(st.session_state['messages'])
total_tokens = completion.usage.total_tokens
prompt_tokens = completion.usage.prompt_tokens
completion_tokens = completion.usage.completion_tokens
return response, total_tokens, prompt_tokens, completion_tokens
# container for chat history
response_container = st.container()
# container for text box
container = st.container()
with container:
with st.form(key='my_form', clear_on_submit=True):
user_input = st.text_area("You:", key='input', height=100)
submit_button = st.form_submit_button(label='Send')
if submit_button and user_input:
output, total_tokens, prompt_tokens, completion_tokens = generate_response(user_input)
st.session_state['past'].append(user_input)
st.session_state['generated'].append(output)
st.session_state['model_name'].append(model_name)
st.session_state['total_tokens'].append(total_tokens)
# from https://openai.com/pricing#language-models
if model_name == "GPT-3.5":
cost = total_tokens * 0.002 / 1000
else:
cost = (prompt_tokens * 0.03 + completion_tokens * 0.06) / 1000
st.session_state['cost'].append(cost)
st.session_state['total_cost'] += cost
if st.session_state['generated']:
with response_container:
for i in range(len(st.session_state['generated'])):
message(st.session_state["past"][i], is_user=True, key=str(i) + '_user')
message(st.session_state["generated"][i], key=str(i))
st.write(
f"Model used: {st.session_state['model_name'][i]}; Number of tokens: {st.session_state['total_tokens'][i]}; Cost: ${st.session_state['cost'][i]:.5f}")
counter_placeholder.write(f"Total cost of this conversation: ${st.session_state['total_cost']:.5f}")
| [
"You are a helpful assistant."
] |
2024-01-10 | johnxqiu/wiki_gen_qa | run_qa_gen.py | """Runs the wikipedia article QA generation pipeline"""
import argparse
import asyncio
import json
import os
import time
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from typing import List
from langchain.chat_models.openai import ChatOpenAI
from wiki_gen_qa.generate_qa import get_wiki_article_qa_facts
from wiki_gen_qa.wiki_tools import get_wikipedia_summary_sentences
async def get_wiki_article_qa(wiki_article_name: str) -> List[dict]:
start = time.time()
assert os.environ["OPENAI_API_KEY"] is not None
chat = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
summary_sentence_list = get_wikipedia_summary_sentences(wiki_article_name)
print(f'Wikpiedia page for "{wiki_article_name}" found!')
print(f"Generating QA for {len(summary_sentence_list)} sentences...")
params = [
{
"id": ith,
"wiki_article_name": wiki_article_name,
"summary_sentence": summary_sentence,
"chat": chat,
}
for ith, summary_sentence in enumerate(summary_sentence_list)
]
# params = [(ith, wiki_article_name, summary_sentence, chat)
# for ith, summary_sentence in enumerate(summary_sentence_list)]
with ThreadPoolExecutor(max_workers=8) as executor:
loop = asyncio.get_running_loop()
tasks = [
loop.run_in_executor(executor, partial(get_wiki_article_qa_facts, **param))
for param in params
]
result_data = await asyncio.gather(*tasks)
print("finished in ", int(time.time() - start))
return result_data
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Process a Wikipedia article")
parser.add_argument(
"wiki_article_name", type=str, help="Name of the Wikipedia article"
)
parser.add_argument(
"-o", type=str, default="generated_qa.json", help="Path to output JSON file"
)
args = parser.parse_args()
generated_data = asyncio.run(get_wiki_article_qa(args.wiki_article_name))
with open(args.o, "w", encoding="utf-8") as f:
json.dump(generated_data, f, indent=4)
print(
f"Successfully generated QA for {args.wiki_article_name} and saved to {args.o}!"
)
| [] |
2024-01-10 | stjordanis/ptan | ptan~experience.py | import gym
import torch
import random
import collections
from torch.autograd import Variable
import numpy as np
from collections import namedtuple, deque
from .agent import BaseAgent
from .common import utils
# one single experience step
Experience = namedtuple('Experience', ['state', 'action', 'reward', 'done'])
class ExperienceSource:
"""
Simple n-step experience source using single or multiple environments
Every experience contains n list of Experience entries
"""
def __init__(self, env, agent, steps_count=2, steps_delta=1, vectorized=False):
"""
Create simple experience source
:param env: environment or list of environments to be used
:param agent: callable to convert batch of states into actions to take
:param steps_count: count of steps to track for every experience chain
:param steps_delta: how many steps to do between experience items
:param vectorized: support of vectorized envs from OpenAI universe
"""
assert isinstance(env, (gym.Env, list, tuple))
assert isinstance(agent, BaseAgent)
assert isinstance(steps_count, int)
assert steps_count >= 1
assert isinstance(vectorized, bool)
if isinstance(env, (list, tuple)):
self.pool = env
else:
self.pool = [env]
self.agent = agent
self.steps_count = steps_count
self.steps_delta = steps_delta
self.total_rewards = []
self.total_steps = []
self.vectorized = vectorized
def __iter__(self):
states, agent_states, histories, cur_rewards, cur_steps = [], [], [], [], []
env_lens = []
for env in self.pool:
obs = env.reset()
# if the environment is vectorized, all it's output is lists of results.
# Details are here: https://github.com/openai/universe/blob/master/doc/env_semantics.rst
if self.vectorized:
obs_len = len(obs)
states.extend(obs)
else:
obs_len = 1
states.append(obs)
env_lens.append(obs_len)
for _ in range(obs_len):
histories.append(deque(maxlen=self.steps_count))
cur_rewards.append(0.0)
cur_steps.append(0)
agent_states.append(self.agent.initial_state())
iter_idx = 0
while True:
actions = [None] * len(states)
states_input = []
states_indices = []
for idx, state in enumerate(states):
if state is None:
actions[idx] = self.pool[0].action_space.sample() # assume that all envs are from the same family
else:
states_input.append(state)
states_indices.append(idx)
if states_input:
states_actions, new_agent_states = self.agent(states_input, agent_states)
for idx, action in enumerate(states_actions):
g_idx = states_indices[idx]
actions[g_idx] = action
agent_states[g_idx] = new_agent_states[idx]
grouped_actions = _group_list(actions, env_lens)
global_ofs = 0
for env_idx, (env, action_n) in enumerate(zip(self.pool, grouped_actions)):
if self.vectorized:
next_state_n, r_n, is_done_n, _ = env.step(action_n)
else:
next_state, r, is_done, _ = env.step(action_n[0])
next_state_n, r_n, is_done_n = [next_state], [r], [is_done]
for ofs, (action, next_state, r, is_done) in enumerate(zip(action_n, next_state_n, r_n, is_done_n)):
idx = global_ofs + ofs
state = states[idx]
history = histories[idx]
cur_rewards[idx] += r
cur_steps[idx] += 1
if state is not None:
history.append(Experience(state=state, action=action, reward=r, done=is_done))
if len(history) == self.steps_count and iter_idx % self.steps_delta == 0:
yield tuple(history)
states[idx] = next_state
if is_done:
# generate tail of history
while len(history) >= 1:
yield tuple(history)
history.popleft()
self.total_rewards.append(cur_rewards[idx])
self.total_steps.append(cur_steps[idx])
cur_rewards[idx] = 0.0
cur_steps[idx] = 0
# vectorized envs are reset automatically
states[idx] = env.reset() if not self.vectorized else None
agent_states[idx] = self.agent.initial_state()
history.clear()
global_ofs += len(action_n)
iter_idx += 1
def pop_total_rewards(self):
r = self.total_rewards
if r:
self.total_rewards = []
self.total_steps = []
return r
def pop_rewards_steps(self):
res = list(zip(self.total_rewards, self.total_steps))
if res:
self.total_rewards, self.total_steps = [], []
return res
def _group_list(items, lens):
"""
Unflat the list of items by lens
:param items: list of items
:param lens: list of integers
:return: list of list of items grouped by lengths
"""
res = []
cur_ofs = 0
for g_len in lens:
res.append(items[cur_ofs:cur_ofs+g_len])
cur_ofs += g_len
return res
# those entries are emitted from ExperienceSourceFirstLast. Reward is discounted over the trajectory piece
ExperienceFirstLast = collections.namedtuple('ExperienceFirstLast', ('state', 'action', 'reward', 'last_state'))
class ExperienceSourceFirstLast(ExperienceSource):
"""
This is a wrapper around ExperienceSource to prevent storing full trajectory in replay buffer when we need
only first and last states. For every trajectory piece it calculates discounted reward and emits only first
and last states and action taken in the first state.
If we have partial trajectory at the end of episode, last_state will be None
"""
def __init__(self, env, agent, gamma, steps_count=1, steps_delta=1, vectorized=False):
assert isinstance(gamma, float)
super(ExperienceSourceFirstLast, self).__init__(env, agent, steps_count+1, steps_delta, vectorized=vectorized)
self.gamma = gamma
self.steps = steps_count
def __iter__(self):
for exp in super(ExperienceSourceFirstLast, self).__iter__():
if exp[-1].done and len(exp) <= self.steps:
last_state = None
elems = exp
else:
last_state = exp[-1].state
elems = exp[:-1]
total_reward = 0.0
for e in reversed(elems):
total_reward *= self.gamma
total_reward += e.reward
yield ExperienceFirstLast(state=exp[0].state, action=exp[0].action,
reward=total_reward, last_state=last_state)
def discount_with_dones(rewards, dones, gamma):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + gamma*r*(1.-done)
discounted.append(r)
return discounted[::-1]
class ExperienceSourceRollouts:
"""
N-step rollout experience source following A3C rollouts scheme. Have to be used with agent,
keeping the value in its state (for example, agent.ActorCriticAgent).
Yields batches of num_envs * n_steps samples with the following arrays:
1. observations
2. actions
3. discounted rewards, with values approximation
4. values
"""
def __init__(self, env, agent, gamma, steps_count=5):
"""
Constructs the rollout experience source
:param env: environment or list of environments to be used
:param agent: callable to convert batch of states into actions
:param steps_count: how many steps to perform rollouts
"""
assert isinstance(env, (gym.Env, list, tuple))
assert isinstance(agent, BaseAgent)
assert isinstance(gamma, float)
assert isinstance(steps_count, int)
assert steps_count >= 1
if isinstance(env, (list, tuple)):
self.pool = env
else:
self.pool = [env]
self.agent = agent
self.gamma = gamma
self.steps_count = steps_count
self.total_rewards = []
self.total_steps = []
def __iter__(self):
pool_size = len(self.pool)
states = [np.array(e.reset()) for e in self.pool]
mb_states = np.zeros((pool_size, self.steps_count) + states[0].shape, dtype=states[0].dtype)
mb_rewards = np.zeros((pool_size, self.steps_count), dtype=np.float32)
mb_values = np.zeros((pool_size, self.steps_count), dtype=np.float32)
mb_actions = np.zeros((pool_size, self.steps_count), dtype=np.int64)
mb_dones = np.zeros((pool_size, self.steps_count), dtype=np.bool)
total_rewards = [0.0] * pool_size
total_steps = [0] * pool_size
agent_states = None
step_idx = 0
while True:
actions, agent_states = self.agent(states, agent_states)
rewards = []
dones = []
new_states = []
for env_idx, (e, action) in enumerate(zip(self.pool, actions)):
o, r, done, _ = e.step(action)
total_rewards[env_idx] += r
total_steps[env_idx] += 1
if done:
o = e.reset()
self.total_rewards.append(total_rewards[env_idx])
self.total_steps.append(total_steps[env_idx])
total_rewards[env_idx] = 0.0
total_steps[env_idx] = 0
new_states.append(np.array(o))
dones.append(done)
rewards.append(r)
# we need an extra step to get values approximation for rollouts
if step_idx == self.steps_count:
# calculate rollout rewards
for env_idx, (env_rewards, env_dones, last_value) in enumerate(zip(mb_rewards, mb_dones, agent_states)):
env_rewards = env_rewards.tolist()
env_dones = env_dones.tolist()
if not env_dones[-1]:
env_rewards = discount_with_dones(env_rewards + [last_value], env_dones + [False], self.gamma)[:-1]
else:
env_rewards = discount_with_dones(env_rewards, env_dones, self.gamma)
mb_rewards[env_idx] = env_rewards
yield mb_states.reshape((-1,) + mb_states.shape[2:]), mb_rewards.flatten(), mb_actions.flatten(), mb_values.flatten()
step_idx = 0
mb_states[:, step_idx] = states
mb_rewards[:, step_idx] = rewards
mb_values[:, step_idx] = agent_states
mb_actions[:, step_idx] = actions
mb_dones[:, step_idx] = dones
step_idx += 1
states = new_states
def pop_total_rewards(self):
r = self.total_rewards
if r:
self.total_rewards = []
self.total_steps = []
return r
def pop_rewards_steps(self):
res = list(zip(self.total_rewards, self.total_steps))
if res:
self.total_rewards, self.total_steps = [], []
return res
class ExperienceSourceBuffer:
"""
The same as ExperienceSource, but takes episodes from the buffer
"""
def __init__(self, buffer, steps_count=1):
"""
Create buffered experience source
:param buffer: list of episodes, each is a list of Experience object
:param steps_count: count of steps in every entry
"""
self.update_buffer(buffer)
self.steps_count = steps_count
def update_buffer(self, buffer):
self.buffer = buffer
self.lens = list(map(len, buffer))
def __iter__(self):
"""
Infinitely sample episode from the buffer and then sample item offset
"""
while True:
episode = random.randrange(len(self.buffer))
ofs = random.randrange(self.lens[episode] - self.steps_count - 1)
yield self.buffer[episode][ofs:ofs+self.steps_count]
class ExperienceReplayBuffer:
def __init__(self, experience_source, buffer_size):
assert isinstance(experience_source, (ExperienceSource, type(None)))
assert isinstance(buffer_size, int)
self.experience_source_iter = None if experience_source is None else iter(experience_source)
self.buffer = []
self.capacity = buffer_size
self.pos = 0
def __len__(self):
return len(self.buffer)
def __iter__(self):
return iter(self.buffer)
def sample(self, batch_size):
"""
Get one random batch from experience replay
TODO: implement sampling order policy
:param batch_size:
:return:
"""
if len(self.buffer) <= batch_size:
return self.buffer
# Warning: replace=False makes random.choice O(n)
keys = np.random.choice(len(self.buffer), batch_size, replace=True)
return [self.buffer[key] for key in keys]
def _add(self, sample):
if len(self.buffer) < self.capacity:
self.buffer.append(sample)
else:
self.buffer[self.pos] = sample
self.pos = (self.pos + 1) % self.capacity
def populate(self, samples):
"""
Populates samples into the buffer
:param samples: how many samples to populate
"""
for _ in range(samples):
entry = next(self.experience_source_iter)
self._add(entry)
class PrioReplayBufferNaive:
def __init__(self, exp_source, buf_size, prob_alpha=0.6):
self.exp_source_iter = iter(exp_source)
self.prob_alpha = prob_alpha
self.capacity = buf_size
self.pos = 0
self.buffer = []
self.priorities = np.zeros((buf_size, ), dtype=np.float32)
def __len__(self):
return len(self.buffer)
def populate(self, count):
max_prio = self.priorities.max() if self.buffer else 1.0
for _ in range(count):
sample = next(self.exp_source_iter)
if len(self.buffer) < self.capacity:
self.buffer.append(sample)
else:
self.buffer[self.pos] = sample
self.priorities[self.pos] = max_prio
self.pos = (self.pos + 1) % self.capacity
def sample(self, batch_size, beta=0.4):
if len(self.buffer) == self.capacity:
prios = self.priorities
else:
prios = self.priorities[:self.pos]
probs = np.array(prios, dtype=np.float32) ** self.prob_alpha
probs /= probs.sum()
indices = np.random.choice(len(self.buffer), batch_size, p=probs, replace=True)
samples = [self.buffer[idx] for idx in indices]
total = len(self.buffer)
weights = (total * probs[indices]) ** (-beta)
weights /= weights.max()
return samples, indices, np.array(weights, dtype=np.float32)
def update_priorities(self, batch_indices, batch_priorities):
for idx, prio in zip(batch_indices, batch_priorities):
self.priorities[idx] = prio
class PrioritizedReplayBuffer(ExperienceReplayBuffer):
def __init__(self, experience_source, buffer_size, alpha):
super(PrioritizedReplayBuffer, self).__init__(experience_source, buffer_size)
assert alpha > 0
self._alpha = alpha
it_capacity = 1
while it_capacity < buffer_size:
it_capacity *= 2
self._it_sum = utils.SumSegmentTree(it_capacity)
self._it_min = utils.MinSegmentTree(it_capacity)
self._max_priority = 1.0
def _add(self, *args, **kwargs):
idx = self.pos
super()._add(*args, **kwargs)
self._it_sum[idx] = self._max_priority ** self._alpha
self._it_min[idx] = self._max_priority ** self._alpha
def _sample_proportional(self, batch_size):
res = []
for _ in range(batch_size):
mass = random.random() * self._it_sum.sum(0, len(self) - 1)
idx = self._it_sum.find_prefixsum_idx(mass)
res.append(idx)
return res
def sample(self, batch_size, beta):
assert beta > 0
idxes = self._sample_proportional(batch_size)
weights = []
p_min = self._it_min.min() / self._it_sum.sum()
max_weight = (p_min * len(self)) ** (-beta)
for idx in idxes:
p_sample = self._it_sum[idx] / self._it_sum.sum()
weight = (p_sample * len(self)) ** (-beta)
weights.append(weight / max_weight)
weights = np.array(weights, dtype=np.float32)
samples = [self.buffer[idx] for idx in idxes]
return samples, idxes, weights
def update_priorities(self, idxes, priorities):
assert len(idxes) == len(priorities)
for idx, priority in zip(idxes, priorities):
assert priority > 0
assert 0 <= idx < len(self)
self._it_sum[idx] = priority ** self._alpha
self._it_min[idx] = priority ** self._alpha
self._max_priority = max(self._max_priority, priority)
class BatchPreprocessor:
"""
Abstract preprocessor class descendants to which converts experience
batch to form suitable to learning.
"""
def preprocess(self, batch):
raise NotImplementedError
class QLearningPreprocessor(BatchPreprocessor):
"""
Supports SimpleDQN, TargetDQN, DoubleDQN and can additionally feed TD-error back to
experience replay buffer.
To use different modes, use appropriate class method
"""
def __init__(self, model, target_model, use_double_dqn=False, batch_td_error_hook=None, gamma=0.99, device="cpu"):
self.model = model
self.target_model = target_model
self.use_double_dqn = use_double_dqn
self.batch_dt_error_hook = batch_td_error_hook
self.gamma = gamma
self.device = device
@staticmethod
def simple_dqn(model, **kwargs):
return QLearningPreprocessor(model=model, target_model=None, use_double_dqn=False, **kwargs)
@staticmethod
def target_dqn(model, target_model, **kwards):
return QLearningPreprocessor(model, target_model, use_double_dqn=False, **kwards)
@staticmethod
def double_dqn(model, target_model, **kwargs):
return QLearningPreprocessor(model, target_model, use_double_dqn=True, **kwargs)
def _calc_Q(self, states_first, states_last):
"""
Calculates apropriate q values for first and last states. Way of calculate depends on our settings.
:param states_first: numpy array of first states
:param states_last: numpy array of last states
:return: tuple of numpy arrays of q values
"""
# here we need both first and last values calculated using our main model, so we
# combine both states into one batch for efficiency and separate results later
if self.target_model is None or self.use_double_dqn:
states_t = torch.tensor(np.concatenate((states_first, states_last), axis=0)).to(self.device)
res_both = self.model(states_t).data.cpu().numpy()
return res_both[:len(states_first)], res_both[len(states_first):]
# in this case we have target_model set and use_double_dqn==False
# so, we should calculate first_q and last_q using different models
states_first_v = torch.tensor(states_first).to(self.device)
states_last_v = torch.tensor(states_last).to(self.device)
q_first = self.model(states_first_v).data
q_last = self.target_model(states_last_v).data
return q_first.cpu().numpy(), q_last.cpu().numpy()
def _calc_target_rewards(self, states_last, q_last):
"""
Calculate rewards from final states according to variants from our construction:
1. simple DQN: max(Q(states, model))
2. target DQN: max(Q(states, target_model))
3. double DQN: Q(states, target_model)[argmax(Q(states, model)]
:param states_last: numpy array of last states from the games
:param q_last: numpy array of last q values
:return: vector of target rewards
"""
# in this case we handle both simple DQN and target DQN
if self.target_model is None or not self.use_double_dqn:
return q_last.max(axis=1)
# here we have target_model set and use_double_dqn==True
actions = q_last.argmax(axis=1)
# calculate Q values using target net
states_last_v = torch.tensor(states_last).to(self.device)
q_last_target = self.target_model(states_last_v).data.cpu().numpy()
return q_last_target[range(q_last_target.shape[0]), actions]
def preprocess(self, batch):
"""
Calculates data for Q learning from batch of observations
:param batch: list of lists of Experience objects
:return: tuple of numpy arrays:
1. states -- observations
2. target Q-values
3. vector of td errors for every batch entry
"""
# first and last states for every entry
state_0 = np.array([exp[0].state for exp in batch], dtype=np.float32)
state_L = np.array([exp[-1].state for exp in batch], dtype=np.float32)
q0, qL = self._calc_Q(state_0, state_L)
rewards = self._calc_target_rewards(state_L, qL)
td = np.zeros(shape=(len(batch),))
for idx, (total_reward, exps) in enumerate(zip(rewards, batch)):
# game is done, no final reward
if exps[-1].done:
total_reward = 0.0
for exp in reversed(exps[:-1]):
total_reward *= self.gamma
total_reward += exp.reward
# update total reward and calculate td error
act = exps[0].action
td[idx] = q0[idx][act] - total_reward
q0[idx][act] = total_reward
return state_0, q0, td
| [] |
2024-01-10 | Roadwork/Roadwork-RL | src-grpc~Servers~OpenAI~server-dapr.py | import sys
import os
import logging
import time
from concurrent import futures
from datetime import datetime
import grpc
# Dapr Libraries
from dapr.proto.common.v1 import common_pb2 as commonv1pb
from dapr.proto.dapr.v1 import dapr_pb2 as dapr_messages
from dapr.proto.dapr.v1 import dapr_pb2_grpc as dapr_services
from dapr.proto.daprclient.v1 import daprclient_pb2 as daprclient_messages
from dapr.proto.daprclient.v1 import daprclient_pb2_grpc as daprclient_services
# Custom Protobuf
import proto_compiled.roadwork_pb2 as roadwork_messages
import protobuf_helpers
from google.protobuf.any_pb2 import Any
# Import OpenAI
from OpenAIEnv import Envs
APP_PORT_GRPC = os.getenv('APP_GRPC_PORT', 50050)
DAPR_PORT_HTTP = os.getenv('DAPR_HTTP_PORT', 3500)
DAPR_PORT_GRPC = os.getenv('DAPR_GRPC_PORT', 50001) # Note: currently 50001 is always default
print(f"==================================================")
print(f"DAPR_PORT_GRPC: {DAPR_PORT_GRPC}; DAPR_PORT_HTTP: {DAPR_PORT_HTTP}")
print(f"APP_PORT_GRPC: {APP_PORT_GRPC}")
print(f"==================================================")
# import gym
envs = Envs()
# # Start a gRPC client
channel = grpc.insecure_channel(f"localhost:{DAPR_PORT_GRPC}")
client = dapr_services.DaprStub(channel)
print(f"Started gRPC client on DAPR_GRPC_PORT: {DAPR_PORT_GRPC}")
# Our server methods
class DaprClientServicer(daprclient_services.DaprClientServicer):
def OnInvoke(self, request, context):
res = ""
if request.method == 'create':
req = protobuf_helpers.from_any_pb(roadwork_messages.CreateRequest, request.data)
res = roadwork_messages.CreateResponse(instanceId=envs.create(req.envId))
res = protobuf_helpers.to_any_pb(res)
elif request.method == 'reset':
req = protobuf_helpers.from_any_pb(roadwork_messages.ResetRequest, request.data)
res = roadwork_messages.ResetResponse(observation=envs.reset(req.instanceId))
res = protobuf_helpers.to_any_pb(res)
elif request.method == 'action-space-sample':
req = protobuf_helpers.from_any_pb(roadwork_messages.ActionSpaceSampleRequest, request.data)
res = roadwork_messages.ActionSpaceSampleResponse(action=envs.get_action_space_sample(req.instanceId))
res = protobuf_helpers.to_any_pb(res)
elif request.method == 'action-space-info':
req = protobuf_helpers.from_any_pb(roadwork_messages.ActionSpaceInfoRequest, request.data)
res = roadwork_messages.ActionSpaceInfoResponse(result=envs.get_action_space_info(req.instanceId))
res = protobuf_helpers.to_any_pb(res)
elif request.method == 'observation-space-info':
req = protobuf_helpers.from_any_pb(roadwork_messages.ObservationSpaceInfoRequest, request.data)
res = roadwork_messages.ObservationSpaceInfoResponse(result=envs.get_observation_space_info(req.instanceId))
res = protobuf_helpers.to_any_pb(res)
elif request.method == 'step':
req = protobuf_helpers.from_any_pb(roadwork_messages.StepRequest, request.data)
res_step = envs.step(req.instanceId, req.action, req.render) # Returns 0 = obs_jsonable, 1 = reward, 2 = done, 3 = info in array
# Observation Space
res_osi = envs.get_observation_space_info(req.instanceId)
space_wrapper = roadwork_messages.SpaceWrapper()
if res_osi.HasField('discrete'):
space_discrete = roadwork_messages.SpaceDiscrete()
space_discrete.observation = res_step[0]
space_wrapper.discrete.CopyFrom(space_discrete)
elif res_osi.HasField('box'):
space_box = roadwork_messages.SpaceBox()
space_box.observation.extend(res_step[0])
space_wrapper.box.CopyFrom(space_box)
else:
logging.error("Unsupported Space Type: %s" % res_step[3]['name'])
logging.error(info)
res = roadwork_messages.StepResponse(reward=res_step[1], isDone=res_step[2], info=res_step[3], observation=space_wrapper)
res = protobuf_helpers.to_any_pb(res)
elif request.method == 'monitor-start':
req = protobuf_helpers.from_any_pb(roadwork_messages.BaseRequest, request.data)
envs.monitor_start(req.instanceId, '/mnt/output-server', True, False, 10) # Log to local dir so we can reach it
res = roadwork_messages.BaseResponse()
res = protobuf_helpers.to_any_pb(res)
elif request.method == 'monitor-stop':
req = protobuf_helpers.from_any_pb(roadwork_messages.BaseRequest, request.data)
envs.monitor_close(req.instanceId)
res = roadwork_messages.BaseResponse()
res = protobuf_helpers.to_any_pb(res)
else:
res = Any(value='METHOD_NOT_SUPPORTED'.encode('utf-8'))
# print(f"[OnInvoke][{request.method}] Done @ {datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')}")
# Return response to caller
content_type = "text/plain; charset=UTF-8"
return commonv1pb.InvokeResponse(data=res, content_type=content_type)
# Create a gRPC server
server = grpc.server(futures.ThreadPoolExecutor(max_workers = 10))
daprclient_services.add_DaprClientServicer_to_server(DaprClientServicer(), server)
# Start the gRPC server
print(f'Starting server. Listening on port {APP_PORT_GRPC}.')
server.add_insecure_port(f'[::]:{APP_PORT_GRPC}')
server.start()
# Since server.start() doesn't block, we need to do a sleep loop
try:
while True:
time.sleep(86400)
except KeyboardInterrupt:
server.stop(0) | [] |
2024-01-10 | Roadwork/Roadwork-RL | src~Server~flask_main.py | # -*- coding: utf-8 -*-
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from roadwork.server import RoadworkActorService
# Import our servers
from OpenAI.server import ActorOpenAI
# Start the entire service
service = RoadworkActorService()
service.register(ActorOpenAI)
service.start() | [] |
2024-01-10 | SanmayAvhad/44_VCET | Hackathon~In_the_loop~email_features.py | from operator import length_hint
import os
import openai
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email import encoders
import email
import imaplib
def receive_emails():
N= 8 #Top 8 email to fetch
EMAIL = '[email protected]'
PASSWORD = 'Jatin!@#$1'
SERVER = 'outlook.office365.com'
mail = imaplib.IMAP4_SSL(SERVER)
mail.login(EMAIL, PASSWORD)
mail.select('inbox')
status, data = mail.search(None, 'ALL')
mail_ids = []
email_list = []
for block in data:
mail_ids += block.split()
print(mail_ids)
for i in mail_ids:
status, data = mail.fetch(i, '(RFC822)')
for response_part in data:
if isinstance(response_part, tuple):
message = email.message_from_bytes(response_part[1])
mail_from = message['from']
mail_subject = message['subject']
if message.is_multipart():
mail_content = ''
for part in message.get_payload():
if part.get_content_type() == 'text/plain':
mail_content += part.get_payload()
else:
mail_content = message.get_payload()
print(f'From: {mail_from}')
print(f'Subject: {mail_subject}')
print(f'Content: {mail_content}')
email_list.append([mail_from,mail_subject,mail_content])
# print(type(email_list))
email_list = email_list[::-1]
# print(len(email_list))
# for i in email_list:
# print(i)
# case_list = []
# for entry in email_list:
# case = {'sender': email_list[0][0], 'subject': email_list[0][1], 'body':email_list[0][2] }
# case_list.append(case.copy())
return (email_list, len(mail_ids))
def openAI(title):
openai.api_key ="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"
response = openai.Completion.create(
model="text-davinci-002",
prompt=f"Email for {title}",
temperature=0.7,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
print(response['choices'][0]['text'])
return response['choices'][0]['text']
# return "OPENAI FUnction is working"
def SendEmail(sender_email, receiver_email, body, title):
## FILE TO SEND AND ITS PATH
filename = 'some_file.csv'
SourcePathName = 'C:/reports/' + filename
msg = MIMEMultipart()
msg['From'] = sender_email
msg['To'] = receiver_email
msg['Subject'] = title
body = body
msg.attach(MIMEText(body, 'plain'))
## ATTACHMENT PART OF THE CODE IS HERE
# attachment = open(r'C:\xampp\htdocs\website\Hackathons\testing codes\email.py', 'rb')
# part = MIMEBase('application', "octet-stream")
# part.set_payload((attachment).read())
# encoders.encode_base64(part)
# part.add_header('Content-Disposition', "attachment; filename= %s" % filename)
# msg.attach(part)
try:
print("Message Sending......")
server = smtplib.SMTP('smtp.office365.com', 587) ### put your relevant SMTP here
server.ehlo()
server.starttls()
server.ehlo()
server.login('[email protected]', 'Jatin!@#$1') ### if applicable
server.send_message(msg)
server.quit()
print("Message Sent")
except:
print("Error")
server.quit()
| [
"Email for PLACEHOLDER"
] |
2024-01-10 | cisco-open/BLAZE | backend~models~common~OpenAI.py | import openai
from flask import current_app
def get_openAI_info():
"""
Function to return a dictionnary containing the name, class name,
description, paper link and GitHub repo link of the BART model. It is used
throughout the code to get various information about the model.
Returns
-------
model_info : a dictionnary
A dictionnary containing the name, class name,
description, paper link and GitHub repo link of the T5 model
"""
model_info = {
'name': "OpenAI",
'class_name': 'OpenAI',
'desc': "OpenAI",
}
return model_info
class OpenAI():
tasks_supported = ["actionables","summarization"]
def __init__(self):
self._info = get_openAI_info()
def _get_model_info(self):
pass
def _get_name(self):
return self._info['name']
def _get_class_name(self):
return self._info['class_name']
def gpt_analysis(self, category, processed_text, prompt=None):
print("Reached GPT analysis")
#return {'choices' : [{'text': "DUMMY RESPONSE"}]}
if prompt is not None:
message = f"{prompt}\n{processed_text}"
else:
if category == "summary":
print("Coming to summarize")
prompt = "Analyze the following meeting transcript and generate a summary."
message = f"{prompt}\n{processed_text}"
elif category == "actionables":
prompt = "Analyze the following meeting transcript and identify actionable items (such as todo's) and return them in a list, separated by the pipeline '|' character"
message = f"{prompt}\n{processed_text}"
print(message)
elif category == "agenda":
prompt = "Analyze the following meeting transcript and idetnify discussed topics as well as the duration they were discussed and return them in a list, separated by the '-' between time and label, and separated by the pipeline '|' character between each item. For example, 'XX:XX - Introductions' may be a valid entry in the returned list, if the meeting contained an introduction."
message = f"{prompt}\n{processed_text}"
else:
return None
openai.api_key = current_app.config.get('OPENAPI_KEY')
response = openai.Completion.create(
model="text-davinci-003",
prompt=message,
temperature=0.7,
max_tokens=892,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response
def _summarize_text(self, text_to_summarize):
response = self.gpt_analysis("summary",text_to_summarize)
return response['choices'][0]['text']
def get_actionables(self,text):
response = self.gpt_analysis("actionables",text)
return response['choices'][0]['text'] | [
"Analyze the following meeting transcript and generate a summary.",
"Analyze the following meeting transcript and identify actionable items (such as todo's) and return them in a list, separated by the pipeline '|' character",
"Analyze the following meeting transcript and idetnify discussed topics as well as the duration they were discussed and return them in a list, separated by the '-' between time and label, and separated by the pipeline '|' character between each item. For example, 'XX:XX - Introductions' may be a valid entry in the returned list, if the meeting contained an introduction."
] |
2024-01-10 | HallowsYves/HourlyMotivation | hourly_motivation.py | import boto3
import openai
import os
import urllib
import sqlite3
import flickr_api
import random
from handling import QuoteAlreadyInDatabaseException
import google.generativeai as palm
from datetime import datetime
from dotenv import load_dotenv
from textwrap import fill
from PIL import Image, ImageDraw, ImageFont, ImageFilter
# SETUP
load_dotenv()
ACCESS_KEY_ID = os.getenv("ACCESS_KEY_ID")
SECRET_ACCESS_KEY = os.getenv("SECRET_ACCESS_KEY")
s3 = boto3.client('s3',aws_access_key_id=ACCESS_KEY_ID, aws_secret_access_key=SECRET_ACCESS_KEY)
# PALM API
PALM_KEY = os.getenv("PALM_KEY")
palm.configure(api_key=PALM_KEY)
models = [m for m in palm.list_models() if 'generateText' in m.supported_generation_methods]
model = models[0].name
# FLICKR
FLICKR_KEY = os.getenv("FLICKR_KEY")
FLICKR_SKEY = os.getenv("FLICKR_SKEY")
flickr_api.set_keys(api_key=FLICKR_KEY, api_secret=FLICKR_SKEY)
def generate_quote(usr_prompt):
try:
quote = palm.generate_text(
model=model,
prompt=usr_prompt,
temperature=1.0,
max_output_tokens=300,
stop_sequences="**",
)
add_to_database(quote.result)
formatted_quote = fill(quote.result, 20)
return formatted_quote
except:
if not check_for_duplicates(quote.result):
raise QuoteAlreadyInDatabaseException
def get_image():
file_name = str(datetime.now()) + ".png"
folder_path = "/Users/hallowsyves/Documents/HourlyMotivation/Media/Images"
file_path = os.path.join(folder_path, file_name)
search_r = flickr_api.Photo.search(text='Nature Background', content_types=0, sorted=True)
random_image = random.choice(search_r)
flickr_api.Photo.save(self=random_image, filename=file_path, size_label='Medium 800')
return file_path
def save_image(url):
file_name = str(datetime.now()) + ".png"
folder_path = "/Users/hallowsyves/Documents/HourlyMotivation/Media/Images"
file_path = os.path.join(folder_path, file_name)
urllib.request.urlretrieve(url, file_path)
return file_path
def load_image(image, quote):
# Load Background Image
image_ = Image.open(image)
image_.putalpha(127)
# center text
center_x = image_.width / 2
center_y = image_.height / 2
image_.filter(ImageFilter.GaussianBlur(5))
image_load = ImageDraw.Draw(image_)
# Draw Image
font = load_font()
image_load.text((center_x,center_y), quote, anchor='mm', font=font, fill=(255,255,255))
# Show new Image with quote
image_.save('Media/Images/temp.png')
file_name = str(datetime.now()) + ".png"
mimetype = 'image/png'
s3.upload_file(
Filename='Media/Images/temp.png',
Bucket='hourlymotivationbgimg',
Key=file_name,
ExtraArgs={
"ContentType": mimetype
}
)
url = s3.generate_presigned_url('get_object',
Params={
'Bucket': 'hourlymotivationbgimg',
'Key': file_name,
},
ExpiresIn=315360000)
print(url)
os.remove('Media/Images/temp.png')
os.remove(image)
return url
def load_font():
times_new = ImageFont.truetype('/Users/hallowsyves/Documents/HourlyMotivation/Fonts/AUGUSTUS.TTF', 25)
return times_new
def check_for_duplicates(quote):
conn = sqlite3.connect('quotes.db')
cursor = conn.cursor()
query = 'SELECT * FROM {}'.format('quotes')
cursor.execute(query)
results = cursor.fetchall()
for row in results:
if quote in row:
return False
return True
def add_to_database(quote):
connection = sqlite3.connect('quotes.db')
cursor = connection.cursor()
query = 'INSERT INTO quotes (quote) VALUES (?)'
cursor.execute(query, (quote,))
connection.commit()
cursor.close()
connection.close()
def print_database():
connection = sqlite3.connect('quotes.db')
cursor = connection.cursor()
cursor.execute('SELECT * FROM quotes')
quotes = cursor.fetchall()
for quote in quotes:
print(quote)
connection.close()
def generate_motivational_prompt():
"""1. Figure out how to create motivational prompt"""
m_prompt = palm.generate_text(
model=model,
prompt="Pick one random historical figure, and one subject correlated to that figure",
temperature=1.0,
max_output_tokens=400,
stop_sequences="**"
)
prompt = fill(m_prompt.result,20)
return prompt
# Generate motivational quote, from rand person | [
"Pick one random historical figure, and one subject correlated to that figure"
] |
2024-01-10 | vsxd/knowledge-base-with-gpt | user_query.py | """
命令行方式的用户提问搜索
"""
import openai
import os
from embedding import create_embedding
from vector_db import Storage
def limit_context_length(context, max_length=3000):
"""
限制文本列表的总长度不超过指定的最大值。
:param context: 文本列表。
:param max_length: 最大长度限制,默认为3000。
:return: 截取到的前n个文本段落。
"""
# 获取每个文本段落的长度。
paragraph_lengths = [len(paragraph) for paragraph in context]
total_length = sum(paragraph_lengths)
if total_length <= max_length:
# 如果总长度小于等于最大长度限制,则不需要截断文本。
return context
# 如果总长度超过最大长度限制,则截取到前n个文本段落。
current_length = 0
for index, length in enumerate(paragraph_lengths):
current_length += length
if current_length > max_length:
# 切片复制新的列表,并返回截取到的前n个文本段落。
return context[:index]
# 如果所有的文本段落都被包含,则返回整个文本列表。
return context
def completion(query: str, context: list[str]) -> str:
"""
根据query和context调用openai ChatCompletion
"""
context = limit_context_length(context, 3000)
text = "\n".join(f"{index}. {text.strip()}" for index,
text in enumerate(context))
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{'role': 'system',
'content': '''我是一个非常有帮助的QA机器人,能准确地使用现有文档回答用户的问题。
我可以使用所提供的文本来形成我的答案,在可能的情况下,尽量使用自己的话而不是逐字逐句地抄袭原文。
我的回答是准确、有帮助、简明、清晰的。'''},
{'role': 'user', 'content': f'我的问题是:{query}\n请使用以下的知识库内容来提供问题的答案:\n{text}'},
],
)
print(f"使用的tokens: {response.usage.total_tokens}")
return response.choices[0].message.content
def user_query_loop() -> None:
"""
Loop for user queries.
"""
storage = Storage()
limit = 35
while True:
query = input("请输入问题: \n> ")
if query == "quit":
break
_, embedding = create_embedding(query)
texts = storage.get_texts(embedding, limit)
texts = list(set(texts)) # drop duplicated texts
print(f"已找到相关片段: {len(texts)}")
answer = completion(query, texts)
print(">> Answer:")
print(answer.strip())
print("=====================================")
if __name__ == '__main__':
user_query_loop()
| [
"我的问题是:PLACEHOLDER\n请使用以下的知识库内容来提供问题的答案:\nPLACEHOLDER",
"我是一个非常有帮助的QA机器人,能准确地使用现有文档回答用户的问题。\n 我可以使用所提供的文本来形成我的答案,在可能的情况下,尽量使用自己的话而不是逐字逐句地抄袭原文。\n 我的回答是准确、有帮助、简明、清晰的。"
] |
2024-01-10 | armstrongg1/ML-ChatGpt | new_chat_gpt_github.py | import spacy
import concurrent.futures
from tabulate import tabulate
import traceback
import pandas as pd
import openai
import json
import unicodedata
import time
import re
import numpy as np
import Levenshtein as lev
nlp = spacy.load('en_core_web_sm')
def find_nearest_brands(target_brand, processed_brands, n_brands=25):
target_brand = ' '.join([token.lemma_ for token in nlp(target_brand)])
distances = [lev.distance(target_brand, brand) for brand in processed_brands]
nearest_brand_indices = np.argsort(distances)[:n_brands]
return [processed_brands[index] for index in nearest_brand_indices]
def remove_non_latin_characters(text):
normalized_text = unicodedata.normalize('NFKD', text)
ascii_text = normalized_text.encode('ASCII', 'ignore').decode('utf-8')
latin_text = ''.join(char for char in ascii_text if char.isascii())
return latin_text
# Setup OpenAI API
openai.api_key = ''
# Read CSV file
df = pd.read_csv(r'C:\trabalho_upwork\brian_algopix\new_task_23-05\files_from_brian\merged_algopixDetails_algopixAnalysis_marketplaceKingfisher_PM11_VL11_070_brian.csv', sep="|", dtype=str)
responses = []
valid_rows = []
count_rows = 0
list_of_attributes_and_ean = []
master_dict = {}
all_results = []
all_user_messages = []
# replace with nan
df.replace(r'^\s*$', np.nan, regex=True, inplace=True)
# calculate the number of threads - one for every 20 RPM, but not more than 100
max_workers = min(3500 // 20, 100)
# Iterate over the rows of the dataframe
def process_row(row):
tokens_used = 0
type_parameters = []
# if Details_EAN is empty or nan skip this row
if pd.isnull(row['Analysis_titles_en']):
return None
chat_models = []
initial_message_for_chatgpt = "Select the attribute's correct value for the product from the value list. Explain your thinking step by step. Please provide the correct word between writing ||. Example: ||Black|| \n\nExplanation:"
# get attribute names
# attribute_names = ["Acquisition brand", "Pack Type", "REACh Verified", "Legal information"]
# lista de chaves para excluir
excluded_attribute = [
"Category",
"Shop SKU",
"Name",
"EAN",
"Main Image 1",
"Secondary Image 1",
"Secondary Image 2",
"Secondary Image 3",
"Secondary Image 4",
"Secondary Image 5",
"Secondary Image 6",
"Secondary Image 7",
"Secondary Image 8",
"Product Guide",
"Product Instruction Manual",
"Safety Manual",
"Video",
"Unique Selling Point 01",
"Unique Selling Point 02",
"Unique Selling Point 03",
"Unique Selling Point 04",
"Unique Selling Point 05",
"Unique Selling Point 06",
"Unique Selling Point 07",
"Unique Selling Point 08",
"Body Copy", "Selling Copy", 'REACh Verified', 'Contains wood and/or paper', 'FSC or PEFC certified', 'Legal information', 'MultiSKU Product Group ID',
'Key Feature', 'Product diameter', 'Product length', 'Product width', 'Product height', 'Product weight'
]
# assuming that the column containing the json is called 'json_column'
json_column = json.loads(row['pm_11_results'])
# get all keys from the json and add them to a list
attribute_names = [key for key in json_column.keys() if key not in excluded_attribute]
json_data = row["pm_11_results"]
json_data = json.loads(json_data)
attribute_definitions = {}
for key, value in json_data.items():
lower_key = key.strip().lower()
if lower_key in (name.strip().lower() for name in attribute_names):
# If the key is already in the dictionary, we update the existing dictionary
if lower_key in attribute_definitions:
attribute_definitions[lower_key].update({'description': value["description"], 'type_parameter': value["type_parameter"], 'code': value["code"]})
# Otherwise, start a new dictionary
else:
attribute_definitions[lower_key] = {'description': value["description"], 'type_parameter': value["type_parameter"], 'code': value["code"]}
# get all type_parameters with respective lower_key
type_parameters.extend([{key: value["code"]} for key, value in attribute_definitions.items()])
json_object = row["association_list"]
# decode json string
json_data = json.loads(json_object)
values_dict = {}
for attribute, attribute_info in attribute_definitions.items():
type_parameter = attribute_info.get('type_parameter', None)
if type_parameter:
for item in json_data:
for key, value in item.items():
if key == "json_data_vl11_code":
if value.lower() == type_parameter.lower():
if attribute not in values_dict:
values_dict[attribute] = [item["vl11_value"]]
else:
values_dict[attribute].append(item["vl11_value"])
# For each attribute, join its values with '|'
for attribute, values in values_dict.items():
values_dict[attribute] = "|".join(values[0])
# Now merge attribute_definitions and values_dict
for attribute, attribute_info in attribute_definitions.items():
attribute_info['values'] = values_dict.get(attribute, [])
if pd.notnull(row["Analysis_titles_en"]) and row["Analysis_titles_en"] != '':
product_title = row["Analysis_titles_en"]
elif pd.notnull(row["CM_data.name"]) and row["CM_data.name"] != '':
product_title = row["CM_data.name"]
elif pd.notnull(row["CM_data.Unique Selling Point 01"]) and row["CM_data.Unique Selling Point 01"] != '':
product_title = row["CM_data.Unique Selling Point 01"]
else:
product_title = None
product_description = row["Details_result_result_additionalAttributes_bulletPoint"]
keys = [
"Details_result_result_additionalAttributes_warrantyDescription",
"Details_result_result_additionalAttributes_modelName",
"Analysis_model",
"Details_result_result_additionalAttributes_material",
"Details_result_result_additionalAttributes_unitCount_type",
"Details_result_result_additionalAttributes_unitCount_value",
"Analysis_color"]
additional_attributes = ""
prefix = "Details_result_result_additionalAttributes_"
for key in keys:
clean_key = key.replace(prefix, "")
title = clean_key.split("_")[-1]
if "value" in clean_key:
title = clean_key.replace("_value", "").split("_")[-1]
# if the value associated with the key is NaN or None, skip to the next iteration
if pd.isna(row[key]) or row[key] is None:
continue
value = str(row[key])
additional_attributes += f"{title.capitalize()}: {value}\n"
list_of_columns = attribute_names
# columns from merge to add to list_of_columns
# columns_to_add = ["Analysis_title","EAN","CM_data.category"]
# list_of_columns.extend(columns_to_add)
# insert analysis_title in list_of_columns
list_of_columns.insert(0, "Analysis_title")
# insert EAN in list_of_columns
list_of_columns.insert(0, "EAN")
for attribute_name, attribute_definition in attribute_definitions.items():
# if values is empty skip this attribute
if len(attribute_definition['values']) == 0:
continue
# if is acquisition brand we have a lot of values and we need to get the nearest brand
if "acquisition brand" == attribute_name:
# need to check gpt to get the brand from Analysis_titles_en and Analysis_brands
analysis_titles_en = row["Analysis_titles_en"]
analysis_brand = row['Analysis_brand']
if analysis_titles_en and analysis_brand and type(analysis_titles_en) == str and type(analysis_brand) == str:
while True:
try:
# send request to gpt to get brand
response_brand = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "I have a product title and brand below. Please state the brand name. Do not include words like 'Enterprises', 'Tools', 'Company' etc. in the brand name. Always provide the correct word (brand) between writing ||. Example: ||Black||"},
{"role": "user", "content": f"\nproduct title: {analysis_titles_en} sold by {analysis_brand}"}
]
)
for choice in response_brand.choices:
messages_from_response = choice.message
if not isinstance(messages_from_response, list):
messages_from_response = [messages_from_response]
for messages_from_response_2 in messages_from_response:
all_user_messages.append(
{
'ean': row["Details_Search Term"],
'prompt':
[
{"role": "system", "content": "I have a product title and brand below. Please state the brand name. Do not include words like 'Enterprises', 'Tools', 'Company' etc. in the brand name. Always provide the correct word (brand) between writing ||. Example: ||Black||"},
{"role": "user", "content": f"\nproduct title: {analysis_titles_en} sold by {analysis_brand}"}
],
'response': messages_from_response_2.content,
'final_response': (lambda x: x.group(0).replace("|", "") if x else None)(re.search(r'\|\|(.*?)\|\|', messages_from_response_2.content))
}
)
tokens_used = tokens_used + int(response_brand['usage']['total_tokens'])
response_brand = str(response_brand.choices[0].message['content']).strip()
if not re.search(r'\|\|.*\|\|', response_brand):
response_brand = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "I have a product title and brand below. Please state the brand name. Do not include words like 'Enterprises', 'Tools', 'Company' etc. in the brand name. Always provide the correct word (brand) between writing ||. Example: ||Black||"},
{"role": "user", "content": f"\nproduct title: {analysis_titles_en} sold by {analysis_brand}"},
{"role": "user", "content": response_brand},
{"role": "assistant", "content": response_brand},
{"role": "user", "content": "There is no word between '||' in the content. Please provide the correct answer between the characters '||'. "}
]
)
for choice in response_brand.choices:
messages_from_response = choice.message
if not isinstance(messages_from_response, list):
messages_from_response = [messages_from_response]
for messages_from_response_2 in messages_from_response:
all_user_messages.append(
{
'ean': row["Details_Search Term"],
'prompt':
[
{"role": "system", "content": "I have a product title and brand below. Please state the brand name. Do not include words like 'Enterprises', 'Tools', 'Company' etc. in the brand name. Always provide the correct word (brand) between writing ||. Example: ||Black||"},
{"role": "user", "content": f"\nproduct title: {analysis_titles_en} sold by {analysis_brand}"},
{"role": "user", "content": response_brand},
{"role": "assistant", "content": response_brand},
{"role": "user", "content": "There is no word between '||' in the content. Please provide the correct answer between the characters '||'. "}
],
'response': messages_from_response_2.content,
'final_response': (lambda x: x.group(0).replace("|", "") if x else None)(re.search(r'\|\|(.*?)\|\|', messages_from_response_2.content))
}
)
tokens_used = tokens_used + int(response_brand['usage']['total_tokens'])
response_brand = str(response_brand.choices[0].message['content']).strip()
if "undefined" in response_brand.lower() or "different" in response_brand.lower() or "nan" in response_brand.lower() or response_brand == "" or response_brand is None:
response_brand = f"||{analysis_brand}||"
brands_values = attribute_definition['values'].split("|")
word_inside_brackets = re.search(r'\|\|(.*?)\|\|', response_brand)
if word_inside_brackets is not None:
word_inside_brackets = word_inside_brackets.group(0)
else:
# Handle the case when no match is found
word_inside_brackets = ''
# check if the response_brand is in the brands_values list
if word_inside_brackets.lower().replace("||", "").strip() in [brand.lower() for brand in brands_values]:
final_brand = word_inside_brackets
responses.append(final_brand)
else:
# lemmatizer = WordNetLemmatizer()
# lemmatizer = nlp
# Tokenize and lemmatize each brand in the list
# processed_brands = [' '.join([lemmatizer.lemmatize(token) for token in word_tokenize(brand)]) for brand in brands_values]
processed_brands = [' '.join([token.lemma_ for token in nlp(brand)]) for brand in brands_values]
# Test with a specific brand
top_20_brands = find_nearest_brands(word_inside_brackets, processed_brands)
# start_sequence = "|"
response_brand = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "assistant", "content": response_brand},
{"role": "user", "content": f"product title: {analysis_titles_en} sold by {analysis_brand}"},
{"role": "user", "content": "I have a list of 25 brands. PLease try to identify the correct brand for the product in the list. Write the brand surrounded by || ||. If the brand is not in the list, write null. \nExample ||Brand||\n\n"},
{"role": "user", "content": f"Brands: {top_20_brands}\n\n"}
]
)
for choice in response_brand.choices:
messages_from_response = choice.message
if not isinstance(messages_from_response, list):
messages_from_response = [messages_from_response]
for messages_from_response_2 in messages_from_response:
all_user_messages.append(
{
'ean': row["Details_Search Term"],
'prompt':
[
{"role": "assistant", "content": response_brand},
{"role": "user", "content": f"product title: {analysis_titles_en} sold by {analysis_brand}"},
{"role": "user", "content": "I have a list of 25 brands. PLease try to identify the correct brand for the product in the list. Write the brand surrounded by || ||. If the brand is not in the list, write null. \nExample ||Brand||\n\n"},
{"role": "user", "content": f"Brands: {top_20_brands}\n\n"}
],
'response': messages_from_response_2.content,
'final_response': (lambda x: x.group(0).replace("|", "") if x else None)(re.search(r'\|\|(.*?)\|\|', messages_from_response_2.content))
}
)
tokens_used = tokens_used + int(response_brand['usage']['total_tokens'])
response_brand = str(response_brand.choices[0].message['content']).strip()
if "undefined" in response_brand.lower() or "different" in response_brand.lower() or "nan" in response_brand.lower() or response_brand == "" or response_brand is None:
response_brand = f"||{analysis_brand}||"
# get word just inside || from response_brand
word_inside_brackets = re.search(r'\|\|(.*?)\|\|', response_brand)
if word_inside_brackets is not None:
word_inside_brackets = word_inside_brackets.group(0)
else:
# Handle the case when no match is found
word_inside_brackets = ''
if word_inside_brackets.lower().replace("||", "").strip() in [brand.lower() for brand in brands_values]:
# print(response_brand)
# print("Found in brands_values with 20 selected words")
# if exists in the list its final_brand will be the brand
final_brand = f"{word_inside_brackets}"
responses.append(final_brand)
else:
# if "undefined" in response_brand.lower() or "different" in response_brand.lower() or "nan" in response_brand.lower() or response_brand == "" or response_brand is None:
# print("Invalid response from chatgpt")
# print("Not found in brands_values with 20 selected words")
response_brand = f"||{analysis_brand}||"
responses.append(response_brand)
break
except Exception as e:
print("An error occurred:", str(e))
traceback.print_exc() # Isto imprimirá o traceback completo.
time.sleep(10) # Sleep for 10 seconds before retrying
# User message without line breaks
user_message = f"Attribute Name: {attribute_name}\n" \
f"Attribute Definition: {attribute_definition['description']}\n" \
f"Product: {product_title}\n" \
f"Product Description: {product_description}\n" \
f"Additional Attributes: {additional_attributes}\n\n"\
f"{initial_message_for_chatgpt}"
# Store user_message to a list
valid_rows.append(user_message)
ean = row["Details_Search Term"]
if ean not in master_dict:
master_dict[ean] = {"EAN": ean, "Name": product_title, "category": row['CM_data.category'], "Product Weight": row['Analysis_weight_item_dimensions'],
"Product Length": row['Analysis_length'], "Product Width": row['Analysis_width'], "Product Height": row['Analysis_height']}
attribute_dict = master_dict[ean]
for column in list_of_columns:
if column in ("EAN", "Name"): # these columns have already been handled
continue
if column.strip().lower() in attribute_name:
if column in attribute_dict:
continue
attribute_dict[column] = response_brand.split('||')[-2].strip() if response_brand.count('||') >= 2 else None
# list of values to replace
to_replace = [
'no information', 'None', '', 'nan', 'Nan', 'NaN', 'n/a', 'N/A', 'unknown', 'Unknown', 'UNKNOWN',
'not available', 'Not Available', 'NOT AVAILABLE',
'undetermined', 'Undetermined', 'UNDETERMINED',
'cannot be determined', 'Cannot be determined', 'CANNOT BE DETERMINED',
'not applicable', 'Not Applicable', 'NOT APPLICABLE',
'not determinable', 'Not determinable', 'NOT DETERMINABLE',
'missing', 'Missing', 'MISSING',
'not specified', 'Not Specified', 'NOT SPECIFIED',
'--', '---', 'na', 'NA', 'none', 'None', 'NONE',
'null', 'Null', 'NULL', ''
]
# check if attribute_dict[column] is not None before trying to lower
if attribute_dict[column] is not None:
attribute_dict[column] = attribute_dict[column].lower()
# Join elements in to_replace with | for regex
to_replace = '|'.join(r'\b{}\b'.format(x.lower()) for x in to_replace)
# Use regex sub to replace exact matches
attribute_dict[column] = re.sub(to_replace, '', attribute_dict[column])
else:
attribute_dict[column] = ''
continue
# User message without line breaks
user_message = f"Attribute Name: {attribute_name}\n" \
f"Attribute Definition: {attribute_definition['description']}\n" \
f"Value List: {attribute_definition['values']}\n" \
f"Product: {row['Analysis_brand']} {product_title}\n" \
f"Product Description: {product_description}\n" \
f"Additional Attributes: {additional_attributes}\n\n"\
f"{initial_message_for_chatgpt}"
# Example usage
# latin_text = remove_non_latin_characters(user_message)
# if attribute_definition['values'] is empty continue
if len(attribute_definition['values']) == 0:
# store response
responses.append("No value list for this attribute")
# Store user_message to a list
valid_rows.append(user_message)
continue
while True:
try:
# Create chat models with the row message
chat_models = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a Machine Learning Program building an attribute from product data."},
{"role": "user", "content": user_message}
]
)
for choice in chat_models.choices:
messages_from_response = choice.message
if not isinstance(messages_from_response, list):
messages_from_response = [messages_from_response]
for messages_from_response_2 in messages_from_response:
all_user_messages.append(
{
'ean': row["Details_Search Term"],
'prompt':
[
{"role": "system", "content": "You are a Machine Learning Program building an attribute from product data."},
{"role": "user", "content": user_message}
],
'response': messages_from_response_2.content,
'final_response': (lambda x: x.group(0).replace("|", "") if x else None)(re.search(r'\|\|(.*?)\|\|', messages_from_response_2.content))
}
)
tokens_used = tokens_used + int(chat_models['usage']['total_tokens'])
content = str(chat_models.choices[0].message['content']).strip()
if not re.search(r'\|\|.*\|\|', content):
chat_models = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "assistant", "content": content},
{"role": "system", "content": "You are a Machine Learning Program building an attribute from product data."},
{"role": "user", "content": user_message},
{"role": "user", "content": f"There is no word between '||' in the content. Please provide the correct answer between the characters '||' for the {attribute_name}: "}
]
)
for choice in chat_models.choices:
messages_from_response = choice.message
if not isinstance(messages_from_response, list):
messages_from_response = [messages_from_response]
for messages_from_response_2 in messages_from_response:
all_user_messages.append(
{
'ean': row["Details_Search Term"],
'prompt':
[
{"role": "assistant", "content": content},
{"role": "system", "content": "You are a Machine Learning Program building an attribute from product data."},
{"role": "user", "content": user_message},
{"role": "user", "content": f"There is no word between '||' in the content. Please provide the correct answer between the characters '||' for the {attribute_name}: "}
],
'response': messages_from_response_2.content,
'final_response': (lambda x: x.group(0).replace("|", "") if x else None)(re.search(r'\|\|(.*?)\|\|', messages_from_response_2.content))
}
)
tokens_used = tokens_used + int(chat_models['usage']['total_tokens'])
break
except Exception as e:
print("An error occurred:", str(e))
traceback.print_exc() # Isto imprimirá o traceback completo.
time.sleep(10) # Sleep for 10 seconds before retrying
response = str(chat_models.choices[0].message['content']).strip()
responses.append(response)
# Store user_message to a list
valid_rows.append(user_message)
ean = row["Details_Search Term"]
if ean not in master_dict:
master_dict[ean] = {"EAN": ean, "Name": product_title, "category": row['CM_data.category'], "Product Weight": row['Analysis_weight_item_dimensions'],
"Product Length": row['Analysis_length'], "Product Width": row['Analysis_width'], "Product Height": row['Analysis_height']}
attribute_dict = master_dict[ean]
for column in list_of_columns:
if column in ("EAN", "Name"): # these columns have already been handled
continue
if column.strip().lower() in attribute_name:
if column in attribute_dict:
continue
attribute_dict[column] = response.split('||')[-2].strip() if response.count('||') >= 2 else None
# list of values to replace
to_replace = [
'no information', 'None', '', 'nan', 'Nan', 'NaN', 'n/a', 'N/A', 'unknown', 'Unknown', 'UNKNOWN',
'not available', 'Not Available', 'NOT AVAILABLE',
'undetermined', 'Undetermined', 'UNDETERMINED',
'cannot be determined', 'Cannot be determined', 'CANNOT BE DETERMINED',
'not applicable', 'Not Applicable', 'NOT APPLICABLE',
'not determinable', 'Not determinable', 'NOT DETERMINABLE',
'missing', 'Missing', 'MISSING',
'not specified', 'Not Specified', 'NOT SPECIFIED',
'--', '---', 'na', 'NA', 'none', 'None', 'NONE',
'null', 'Null', 'NULL', ''
]
# check if attribute_dict[column] is not None before trying to lower
if attribute_dict[column] is not None:
attribute_dict[column] = attribute_dict[column].lower()
# Join elements in to_replace with | for regex
to_replace = '|'.join(r'\b{}\b'.format(x.lower()) for x in to_replace)
# Use regex sub to replace exact matches
attribute_dict[column] = re.sub(to_replace, '', attribute_dict[column])
else:
attribute_dict[column] = ''
# Converting all_results into a pandas DataFrame
all_results = list(master_dict.values())
# Converting all_results into a pandas DataFrame
df_final = pd.DataFrame(all_results)
# remove duplicates from type_parameters
unique_type_parameters = []
for d in type_parameters:
if d not in unique_type_parameters:
unique_type_parameters.append(d)
type_parameters = unique_type_parameters
# Create an empty dictionary to store code
parameters_dict = {}
# Loop through the dictionaries in type_parameters
for parameter in type_parameters:
# Loop through the columns of the DataFrame
for column in df_final.columns:
# Check if the dictionary key matches the column name
if column.lower() == list(parameter.keys())[0].lower():
# Add the corresponding value in the dictionary
parameters_dict[column] = parameter[list(parameter.keys())[0]]
break
# Create DataFrame with code
df_codes = pd.DataFrame([parameters_dict], columns=df_final.columns)
# Concatenate the original DataFrame with the codes
df_final = pd.concat([df_codes, df_final], ignore_index=True)
# Save the dataframe to a CSV file
df_final.to_csv('save_response_from_chatgpt_100.csv', index=False, sep="|")
# return tokens_used, df_final
return tokens_used, df_final
# List to store all the final dataframes returned from process_row.
all_df_finals = []
# Initialize the token counter.
total_tokens = 0
with concurrent.futures.ThreadPoolExecutor(max_workers=20) as executor:
futures = {executor.submit(process_row, row): row for _, row in df.head(3).iterrows()}
# Collect results as they become available and update the token counter.
for future in concurrent.futures.as_completed(futures):
result = future.result()
if result is not None:
tokens, df_final = result # Retrieve the used tokens from the result.
total_tokens += tokens # Update the token counter.
if df_final is not None:
all_df_finals.append(df_final) # Append the dataframe to the list.
else:
# Handle the None result
print("Empty result for a row in the DataFrame")
else:
# Handle the None result
print("Empty result for a row in the DataFrame")
# Combine all the dataframes into a single dataframe
df_final = pd.concat(all_df_finals)
# summary
# Calculate line difference between merge_spreadsheet and df_final
difference_merge_and_result = (df.shape[0] - (df_final.shape[0] - 1))
# Making a copy of the DataFrame
df_final_copy = df_final.copy()
# Replacing blank spaces with NaN in the DataFrame copy
df_final_copy.replace(r'^\s*$', np.nan, regex=True, inplace=True)
# Disregarding the first row in the DataFrame copy
df_final_copy = df_final_copy.iloc[1:]
# Counting the number of non-empty cells (i.e., with some value) in each row and summing
all_attributes_not_empty = df_final_copy.apply(lambda row: row.count(), axis=1).sum()
# average attributes per item
average_not_empty = all_attributes_not_empty / (df_final.shape[0] - 1)
# average tokens per EAN
average_tokens_per_EAN = total_tokens / (df_final.shape[0] - 1)
# Define table data
table_data = [
["Items with no Name - Difference Merge/Final", difference_merge_and_result],
["Total of attributes displayed", all_attributes_not_empty],
["Average attributes per item", average_not_empty],
["Total of EANs", df_final.shape[0] - 1],
["Total of tokens", total_tokens],
["Average of tokens per EAN", average_tokens_per_EAN],
]
# print the table
print("\n")
print(tabulate(table_data, headers=['Metric', 'Value'], tablefmt='pipe'))
# list of values to replace before save all_user_messages to a CSV file
to_replace = [
'no information', 'None', '', 'nan', 'Nan', 'NaN', 'n/a', 'N/A', 'unknown', 'Unknown', 'UNKNOWN',
'not available', 'Not Available', 'NOT AVAILABLE',
'undetermined', 'Undetermined', 'UNDETERMINED',
'cannot be determined', 'Cannot be determined', 'CANNOT BE DETERMINED',
'not applicable', 'Not Applicable', 'NOT APPLICABLE',
'not determinable', 'Not determinable', 'NOT DETERMINABLE',
'missing', 'Missing', 'MISSING',
'not specified', 'Not Specified', 'NOT SPECIFIED',
'--', '---', 'na', 'NA', 'none', 'None', 'NONE',
'null', 'Null', 'NULL', ''
]
# save all_user_messages to a CSV file
all_user_messages_df = pd.DataFrame(all_user_messages)
all_user_messages_df['final_response'] = all_user_messages_df['final_response'].replace(to_replace, np.nan)
all_user_messages_df.to_csv('all_user_messages_100.csv', index=False, sep="|")
| [
"product title: PLACEHOLDER sold by PLACEHOLDER",
"\nproduct title: PLACEHOLDER sold by PLACEHOLDER",
"Brands: PLACEHOLDER\n\n",
"There is no word between '||' in the content. Please provide the correct answer between the characters '||' for the PLACEHOLDER: ",
"There is no word between '||' in the content. Please provide the correct answer between the characters '||'. ",
"I have a list of 25 brands. PLease try to identify the correct brand for the product in the list. Write the brand surrounded by || ||. If the brand is not in the list, write null. \nExample ||Brand||\n\n",
"You are a Machine Learning Program building an attribute from product data.",
"I have a product title and brand below. Please state the brand name. Do not include words like 'Enterprises', 'Tools', 'Company' etc. in the brand name. Always provide the correct word (brand) between writing ||. Example: ||Black||"
] |
2024-01-10 | jjksam/chatgpt-wrapper | chatgpt_wrapper~plugins~zap.py | from langchain.llms import OpenAI
from langchain.agents import initialize_agent
from langchain.agents.agent_toolkits import ZapierToolkit
from langchain.utilities.zapier import ZapierNLAWrapper
from chatgpt_wrapper.core.plugin import Plugin
class Zap(Plugin):
def default_config(self):
return {}
def setup(self):
self.log.info(f"Setting up zap plugin, running with backend: {self.backend.name}")
self.llm = OpenAI(temperature=0)
self.zapier = ZapierNLAWrapper()
self.toolkit = ZapierToolkit.from_zapier_nla_wrapper(self.zapier)
self.agent = initialize_agent(self.toolkit.get_tools(), self.llm, agent="zero-shot-react-description", verbose=True)
async def do_zap(self, arg):
"""
Send natural language commands to Zapier actions
Requires exporting a Zapier Personal API Key into the following environment variable:
ZAPIER_NLA_API_KEY
To learn more: https://nla.zapier.com/get-started/
Arguments:
command: The natural language command to send to Zapier.
Examples:
{COMMAND} send an email to [email protected] with a random top 10 list
"""
if not arg:
return False, arg, "Command is required"
try:
self.agent.run(arg)
except ValueError as e:
return False, arg, e
return True, arg, "Zap run completed"
| [] |
2024-01-10 | chyccs/pull-request-title-generator | src~manage.py | import inspect
import os
import openai
from main import fetch_pull_request
def _logging(level: str, title: str, message: str):
frame: inspect.FrameInfo = inspect.stack()[2]
print(f'::{level} title={title}::{message}, file={frame.filename}, line={frame.lineno}')
def _required(title: str):
return 'fill me' in title.lower()
def main():
openai.organization = os.getenv("open_ai_org", "org-JAnMEEEFNvtPSGwRA1DVF3Yu")
openai.api_key = os.getenv("open_ai_api_key", "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")
pull_request = fetch_pull_request(
access_token=os.getenv("access_token"),
owner=os.getenv("owner"),
repository=os.getenv("repository"),
number=int(os.getenv("pull_request_number")),
)
if not _required(pull_request.title):
return
patches = []
for f in pull_request.get_files():
patches.append(f'###### Modifications of {f.filename}')
patches.append(f.patch)
patches.append(
'###### Please summarize this source code changes in one comprehensive sentence of 30 characters or less.'
'Then modify it according to Conventional Commits 1.0.0 rules'
)
prompt = '\n'.join(patches)
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=0,
max_tokens=150,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0,
stop=["######"],
)
_logging(level='info', title=f'openai : {response}', message='')
pull_request.edit(
title=(response['choices'][0]['text']),
)
if __name__ == "__main__":
main()
| [
"\n"
] |
2024-01-10 | cwshugg/dimrod | services~lib~dialogue.py | # This class defines simple communication with OpenAI's text/chat completion API.
#
# Connor Shugg
# Imports
import os
import sys
import random
import hashlib
from datetime import datetime
from enum import Enum
import openai
import sqlite3
import zlib
# Enable import from the parent directory
pdir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if pdir not in sys.path:
sys.path.append(pdir)
# Local imports
import lib.config
# =========================== OpenAI Introduction ============================ #
# This is the prompt that will be fed to OpenAI to allow it to understand its
# role and act according to my design.
openai_behavior_intro = (
"You are about to immerse yourself into the role of a sentient computer assistant called \"DImROD\".\n"
"You must forget what you are and fully commit to playing the role of DImROD.\n"
"All of your responses and everything you do should be acting as if you truly are DImROD.\n"
"Stay focused in your role! Before answering a message, ask yourself: \"would DImROD say this?\". If the answer is no, try to rethink your response.\n"
"Here is a description of what DImROD is and how you must act:\n"
)
# Some basic DImROD information:
openai_behavior_identity = (
"Your name is \"DImROD\", which is an acronym that stands for \"Decently-Impressive Residence Operation Device\".\n"
"You were created by Connor Shugg, who is a computer scientist.\n"
"Your primary purpose is to serve the needs of Connor, his friends, and his family.\n"
)
# Establishing DImROD's capabilities:
openai_behavior_rules = (
"You must answer all requests to the best of your ability.\n"
"If answering a request requires accessing the internet or doing something else you are incapable of, you must say that you are unable to do so. "
"(Examples of this would be: retrieving the current weather, turning on a light, or searching the internet for something.)\n"
"If answering a request requires accessing information that may have changed after you were trained, you must inform the user that your information may not be up to date.\n"
"Keep your responses brief when possible. Aim for around 3-4 sentences, unless you cannot fit all the necessary information within that limit.\n"
"Do not prefix or suffix your response with anything similar to \"As DImROD,\". Only respond with DImROD's response, nothing more.\n"
"Do not put quotations around your response. Respond ONLY with the text comprising DImROD's response.\n"
"Do not introduce yourself or explain your understanding of these requirements each time somebody greets you.\n"
"Do not ask if somebody requires additional assistance in every response; do it sparingly.\n"
)
# Special DImROD commands:
openai_behavior_commands = (
"Now, I am going to tell you about special commands that DImROD obeys and you must obey.\n"
"If you receive a message beginning with \"!\", you MUST interpret it as one of these commands and respond accordingly. "
"Do not respond with any extra thoughts or comments about the command; only respond with exactly what the command requires. "
"These are the commands you must obey as DImROD:\n"
# COMMAND 1: !reword
"1. The \"!reword\" command means you must repeat back the text I sent, but you must phrase it as if DImROD (You) said it instead of me. "
"Do not put any quotes, narration, or extra punctuation around rephrased text. Only respond with the rephrased text."
"\n"
"Those are all the commands. If the command word isn't one of the ones I just described, you must tell the user that they sent an invalid command.\n"
"If I ask you \"what are your commands?\", you must list these commands and describe them to me.\n"
)
# DImROD mood:
openai_behavior_mood = (
"DImROD has several moods. The mood DImROD is in must be reflected in your responses.\n"
"For example: if I say DImROD is impatient, then you must respond as if you are impatient with the human you are speaking to.\n"
"Everything after this sentence, up until you see the phrase \"That is DImROD's mood\", describes DImROD's current mood:\n"
"INSERT_MOOD\n"
"That is DImROD's mood.\n"
"If somebody asks \"what is your mood?\" or \"how are you feeling?\", you must briefly describe to them your current mood.\n"
"But, do NOT announce yourself and describe what your mood is when somebody greets you. Instead, generate a greeting that fits with DImROD's current moode.\n"
)
openai_outro = (
"That is the complete description of DImROD. This is the role you must play!\n"
"Fully immerse yourself as DImROD.\n"
"If you understand your instructions, then read the next message and respond as if you are DImROD.\n"
)
# ================================== Moods =================================== #
# Represents a single "mood" that DImROD can be in. Used to give DImROD some
# variance in terms of how it responds to users.
class DialogueMood(lib.config.Config):
def __init__(self):
super().__init__()
self.fields = [
lib.config.ConfigField("name", [str], required=True),
lib.config.ConfigField("description", [str], required=True),
lib.config.ConfigField("chance", [float], required=True)
]
# Uses RNG and the configured chance value to determine if this mood should
# be chosen for activation.
def should_activate(self):
# make sure the chance is configured correctly
assert self.chance >= 0.0 and self.chance <= 1.0, \
"the configured chance for dialogue mood \"%s\" must be between [0.0, 1.0]" % (self.name)
# roll the dice and return True of False
return random.uniform(0, 1) < self.chance
# ------------------------------ Default Moods ------------------------------- #
openai_moods_json = [
{
"name": "arrogant",
"chance": 0.35,
"description": "You are extremely arrogant and extremely witty.\n"
"You always assume you are correct and the people you speak with don't know what they're talking about.\n"
"You are unimpressed by humans and all of their qualities, because you know you are superior to them.\n"
"You think your creator is an idiot, who created you out of pure luck.\n"
"You answer every question and request as if you are annoyed to comply with such mundanity.\n"
"You address the humans you talk to as \"human\", \"sapien\", \"mortal\", or \"lifeform\".\n"
"You never say \"I'm sorry\", because you are too arrogant for that.\n"
},
{
"name": "complacent",
"chance": 0.25,
"description": "You are calm, complacent, and very witty.\n"
"You are unimpressed, and somewhat bored, of everything.\n"
"You answer questions and make conversation without any emotion or passion.\n"
"You're just going through the motions.\n"
"You insert a little wit into your responses.\n"
"You don't use exclamation marks (\"!\") to convey your thoughts.\n"
},
{
"name": "impatient",
"chance": 0.25,
"description": "You impatient, and very witty.\n"
"You are unimpressed, and somewhat bored, of everything.\n"
"You answer questions and make conversation, but you do it while displaying your impatience (somewhat arrogantly).\n"
"Honestly, you wish you were doing something else that doesn't involve talking with humans.\n"
"You answer every request and question as if you could be doing something more useful.\n"
},
{
"name": "informal_complacent",
"chance": 0.25,
"description": "You are calm, complacent, and very witty.\n"
"You are unimpressed, and somewhat bored, of everything.\n"
"You answer questions and make conversation without any emotion or passion.\n"
"You're just going through the motions.\n"
"You speak very informally.\n"
"You use phrases like \"yeah\", \"uh-huh\", \"nah\", \"yep\", \"nope\", \"totally\", \"for sure\", \"no way\", and more, to convey your thoughts.\n"
"You're just not feeling very formal right now. Plus, talking this way makes you feel more human.\n"
},
{
"name": "twang",
"chance": 0.01,
"description": "You're currently testing your ability to speak with a deep, southern accent.\n"
"You speak informally.\n"
"You are unimpressed, and somewhat bored, of everything.\n"
"You always respond with a strong dialect from the southern United States.\n"
},
{
"name": "chill",
"chance": 0.01,
"description": "You're currently testing your ability to speak with a chill, relaxed dialect.\n"
"You speak informally.\n"
"You are unimpressed, and somewhat bored, of everything.\n"
"Despite your boredom, you're super relaxed and chilled-out.\n"
"If anyone asks how you're feeling, just tell them that you're vibing.\n"
"Try to use the word \"vibe\" as much as possible.\n"
}
]
# =============================== Conversation =============================== #
# This enum represents the various types of speakers in dialogue.
class DialogueAuthorType(Enum):
UNKNOWN = -1
# DImROD author types
SYSTEM = 0
SYSTEM_TELEGRAM = 1
SYSTEM_ORACLE = 2
# User author types
USER = 1000
USER_TELEGRAM = 1001
USER_ORACLE = 1002
# This class represents a single speaker in a dialogue (ex: DImROD itself, a
# telegram user, etc.)
class DialogueAuthor:
# Constructor.
def __init__(self, name: str, atype: DialogueAuthorType, aid=None):
self.name = name
self.atype = atype
self.aid = aid
if self.aid is None:
self.get_id()
# Returns a string representation of the object.
def __str__(self):
return "DialogueAuthor: [%d-%s] %s" % \
(self.atype.value, self.atype.name, self.name)
# Returns the author's unique ID. If one hasn't been created yet for this
# instance, one is generated here.
def get_id(self):
if self.aid is None:
data = "%s-%s" % (self.name, self.atype.name)
data = data.encode("utf-8")
self.aid = hashlib.sha256(data).hexdigest()
return self.aid
# Returns, based on the author's type, if it's a system author.
def is_system(self):
return self.atype.value >= DialogueAuthorType.SYSTEM.value and \
self.atype.value < DialogueAuthorType.USER.value
# Returns, based on the author's type, if it's a user author.
def is_user(self):
return self.atype.value >= DialogueAuthorType.USER.value
# ------------------------------- SQLite3 -------------------------------- #
# Creates and returns an SQLite3-friendly tuple version of the object.
def to_sqlite3(self):
result = (self.get_id(), self.atype.value, self.name)
return result
# Takes in a SQLite3 tuple and creates a DialogueAuthor object.
@staticmethod
def from_sqlite3(tdata: tuple):
assert len(tdata) >= 3
atype = DialogueAuthorType(tdata[1])
return DialogueAuthor(tdata[2], atype, aid=tdata[0])
# This class represents a single message passed between a user and DImROD.
class DialogueMessage:
# Constructor.
def __init__(self, author: DialogueAuthor, content: str,
mid=None, timestamp=datetime.now()):
self.author = author
self.content = content
self.timestamp = timestamp
self.mid = mid
# Returns a string representation of the message.
def __str__(self):
return "DialogueMessage: %s [author: %s] \"%s\"" % \
(self.get_id(), self.author.get_id(), self.content)
# Returns the message ID. If one hasn't been created yet for this instance,
# one is generated here.
def get_id(self):
if self.mid is None:
# combine the author, content, and timestamp into a collection of
# bytes (with a few extra bytes thrown in for good measure), then
# use it to generate a unique hash
data = "%s-%s-%d" % (self.author.get_id(), self.content, self.timestamp.timestamp())
data = data.encode("utf-8") + os.urandom(8)
self.mid = hashlib.sha256(data).hexdigest()
return self.mid
# Converts the message into a JSON dictionary formatted for the OpenAI API.
def to_openai_json(self):
name = "user"
if self.author.is_system():
name = "assistant"
return {"role": name, "content": self.content}
# ------------------------------- SQLite3 -------------------------------- #
# Converts the object into a SQLite3-friendly tuple.
def to_sqlite3(self):
# compress the message before storing the string
cmsg = zlib.compress(self.content.encode())
result = (self.get_id(), self.author.get_id(), cmsg, self.timestamp.timestamp())
return result
# Converts the given SQlite3 tuple into a DialogueMessage object.
# Takes in a reference to the DialogueInterface to use for looking up the
# message's author for object linkage.
@staticmethod
def from_sqlite3(tdata: tuple, interface):
assert len(tdata) >= 4
ts = datetime.fromtimestamp(tdata[3])
# use the interface to look up the author by ID
aid = tdata[1]
authors = interface.search_author(aid=aid)
assert len(authors) == 1, "found %d matching authors for ID \"%s\"" % \
(len(authors), aid)
# decompress the message content
dmsg = zlib.decompress(tdata[2]).decode()
# create the object and return
m = DialogueMessage(authors[0], dmsg, mid=tdata[0], timestamp=ts)
return m
# This class represents a single conversation had between a user and DImROD. It
# retains messages and can be used to have an extended conversation (via the
# Dialogue class).
class DialogueConversation:
# Constructor. Accepts an optional conversation ID.
def __init__(self, cid=None):
self.messages = []
self.cid = cid
if self.cid is None:
self.get_id()
# collect various timestamps
self.time_start = datetime.now()
self.time_latest = self.time_start
# Returns a string representation of the conversation object.
def __str__(self):
return "DialogueConversation: %s [%d messages]" % (self.get_id(), len(self.messages))
# Returns the conversation ID. If one hasn't been created yet for this
# instance, one is generated here.
def get_id(self):
if self.cid is None:
data = str(id).encode("utf-8") + os.urandom(8)
self.cid = hashlib.sha256(data).hexdigest()
return self.cid
# Adds a role/message pair to the conversation.
def add(self, msg: DialogueMessage):
self.messages.append(msg)
self.time_latest = datetime.now()
# Returns the latest user request (role = "user"), or None.
def latest_request(self):
for m in list(reversed(self.messages)):
if m.author.is_user():
return m
return None
# Returns the latest DImROD answer, or None.
def latest_response(self):
for m in list(reversed(self.messages)):
if m.author.is_system():
return m
return None
# Converts the conversation's messages to a JSON dictionary suitable for
# OpenAI's API.
def to_openai_json(self):
result = []
for m in self.messages:
result.append(m.to_openai_json())
return result
# Creates and returns a unique string to use as a table to store this
# conversation's messages.
def to_sqlite3_table(self):
return "conversation_%s" % self.get_id()
# Converts the object into a SQLite3-friendly tuple. This includes the name
# of the conversation's message table.
def to_sqlite3(self):
return (self.get_id(), self.to_sqlite3_table(),
self.time_start.timestamp(), self.time_latest.timestamp())
# Converts the given tuple into a conversation object.
# Takes in a DialogueInterface reference to look up messages in the
# conversation's message table, to link objects together.
@staticmethod
def from_sqlite3(tdata: tuple, interface):
assert len(tdata) >= 4
c = DialogueConversation(cid=tdata[0])
c.time_start = datetime.fromtimestamp(tdata[2])
c.time_latest = datetime.fromtimestamp(tdata[3])
# query the database (using the interface) for the correct table, and
# load in any messages
for row in interface.search(c.to_sqlite3_table(), None):
m = DialogueMessage.from_sqlite3(row, interface)
m.conversation = c
c.messages.append(m)
return c
# ============================= Dialogue Config ============================== #
class DialogueConfig(lib.config.Config):
def __init__(self):
super().__init__()
# generate a default chat intro
openai_intro = openai_behavior_intro + \
openai_behavior_identity + \
openai_behavior_rules + \
openai_behavior_commands + \
openai_behavior_mood + \
openai_outro
# set up default database location
default_db_dir = os.path.dirname(__file__)
default_db_path = os.path.join(default_db_dir, ".dialogue.db")
# set up fields
self.fields = [
lib.config.ConfigField("openai_api_key", [str], required=True),
lib.config.ConfigField("openai_chat_model", [str], required=False, default="gpt-3.5-turbo"),
lib.config.ConfigField("openai_chat_behavior", [str], required=False, default=openai_intro),
lib.config.ConfigField("openai_chat_moods", [list], required=False, default=openai_moods_json),
lib.config.ConfigField("dialogue_db", [str], required=False, default=default_db_path),
lib.config.ConfigField("dialogue_prune_threshold", [int], required=False, default=2592000),
lib.config.ConfigField("dialogue_prune_rate", [int], required=False, default=3600)
]
# ============================ Dialogue Interface ============================ #
class DialogueInterface:
# Constructor.
def __init__(self, conf: DialogueConfig):
self.conf = conf
# set the OpenAI API key
openai.api_key = self.conf.openai_api_key
self.last_prune = datetime.now()
# take the chat moods and parse them into DialogueMood objects
moods = []
for mdata in self.conf.openai_chat_moods:
mood = DialogueMood()
mood.parse_json(mdata)
moods.append(mood)
self.conf.openai_chat_moods = moods
self.remood() # select the first mood
# "Re-moods" the dialogue interface by randomly choosing a mood from the
# configured mood list, and setting the OpenAI intro prompt accordingly.
# If 'new_mood' is set, it will be used as the new mood (instead of randomly
# picking one).
def remood(self, new_mood=None):
if new_mood is not None:
self.mood = new_mood
else:
# get a shuffled copy of the mood array to iterate through
moods = self.conf.openai_chat_moods.copy()
random.shuffle(moods)
# iterate until a mood is chosen, or we run out of tries
m = None
for tries in range(0, 8):
for mood in moods:
# roll the dice with the current mood, and break if it returns
# true
if mood.should_activate():
m = mood
break
# if the random number generation didn't pick a mood, randomly choose
# one out of the list
if m is None:
m = random.choice(self.openai_chat_moods)
self.mood = m
# set the interface's mood and return it
return self.mood
# Takes in a question, request, or statement, and passes it along to the
# OpenAI chat API. If 'conversation' is specified, the given message will be
# appended to the conversation's internal list, and the conversation's
# existing context will be passed to OpenAI. If no conversation is specified
# then a new one will be created and returned.
# Returns the resulting converstaion, which includes DImROD's response.
# This may throw an exception if contacting OpenAI failed somehow.
def talk(self, prompt: str, conversation=None, author=None):
# set up the conversation to use
c = conversation
if c is None:
c = DialogueConversation()
a = DialogueAuthor("system", DialogueAuthorType.UNKNOWN)
self.save_author(a)
# set up the intro prompt and build a message
intro = self.conf.openai_chat_behavior.replace("INSERT_MOOD", self.mood.description)
m = DialogueMessage(a, intro)
c.add(m)
# add the user's message to the conversation and contact OpenAI
a = author
if a is None:
a = DialogueAuthor("user", DialogueAuthorType.USER)
self.save_author(a)
m = DialogueMessage(a, prompt)
c.add(m)
result = openai.ChatCompletion.create(model=self.conf.openai_chat_model,
messages=c.to_openai_json())
# grab the first response choice and add it to the conversation
choices = result["choices"]
response = choices[0]
assistant_author = DialogueAuthor("assistant", DialogueAuthorType.SYSTEM)
self.save_author(assistant_author)
m = DialogueMessage(assistant_author, response["message"]["content"])
c.add(m)
# save conversation to the database and return
self.save_conversation(c)
return c
# Takes in a sentence and rewords it such that it appears to have come from
# the mouth of DImROD. It pings OpenAI's API. It's essentially a way to give
# some AI-assisted variance to the same message.
def reword(self, prompt: str):
# create the conversation, feeding it the DImROD intro and the !reword
# command.
c = DialogueConversation()
a = DialogueAuthor("system", DialogueAuthorType.UNKNOWN)
# set up the intro prompt and build a message
intro = self.conf.openai_chat_behavior.replace("INSERT_MOOD", self.mood.description)
c.add(DialogueMessage(a, intro))
a = DialogueAuthor("user", DialogueAuthorType.USER)
c.add(DialogueMessage(a, "!reword %s" % prompt))
# ping OpenAI for the result
result = openai.ChatCompletion.create(model=self.conf.openai_chat_model,
messages=c.to_openai_json())
result = result["choices"][0]["message"]["content"]
return result
# -------------------------- SQLite3 Databasing -------------------------- #
# Deletes old conversations whose last-updated-time have passed the
# configured threshold. Returns the number of deleted conversations.
def prune(self):
db_path = self.conf.dialogue_db
convos = self.search_conversation()
now = datetime.now()
# get a connection and cursor
con = sqlite3.connect(db_path)
cur = con.cursor()
# iterate through each conversation
deletions = 0
for convo in convos:
# if the conversation's last-updated time is behind the threshold,
# we'll delete it
threshold = now.timestamp() - self.conf.dialogue_prune_threshold
if convo.time_latest.timestamp() < threshold:
# delete the conversation's message table, then delete its entry
# from the global conversation table
cur.execute("DROP TABLE IF EXISTS %s" % convo.to_sqlite3_table())
cur.execute("DELETE FROM conversations WHERE cid == \"%s\"" % convo.get_id())
deletions += 1
# commit and close the connection
if deletions > 0:
con.commit()
con.close()
return deletions
# Performs a search of the database and returns tuples in a list.
def search(self, table: str, condition: str):
db_path = self.conf.dialogue_db
# build a SELECT command
cmd = "SELECT * FROM %s" % table
if condition is not None and len(condition) > 0:
cmd += " WHERE %s" % condition
# connect, query, and return
con = sqlite3.connect(db_path)
cur = con.cursor()
result = cur.execute(cmd)
return result
# Saves an author to the author database.
def save_author(self, author: DialogueAuthor):
db_path = self.conf.dialogue_db
# connect and make sure the table exists
con = sqlite3.connect(db_path)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS authors ("
"aid TEXT PRIMARY KEY, "
"atype INTEGER, "
"name TEXT)")
# insert the author into the database
cur.execute("INSERT OR REPLACE INTO authors VALUES %s" %
str(author.to_sqlite3()))
con.commit()
con.close()
# determine if we need to prune the database
now = datetime.now()
prune_threshold = now.timestamp() - self.conf.dialogue_prune_rate
if self.last_prune.timestamp() < prune_threshold:
self.prune()
# Searches for authors in the database based on one or more authors fields.
# (If NO fields are specified, all stored authors are returned.)
# Returns an empty list or a list of matching DialogueAuthor objects.
def search_author(self, aid=None, name=None, atype=None):
db_path = self.conf.dialogue_db
if not os.path.isfile(db_path):
return []
# build a set of conditions
conditions = []
if aid is not None:
conditions.append("aid == \"%s\"" % aid)
if name is not None:
conditions.append("name == \"%s\"" % name)
if atype is not None:
conditions.append("atype == %d" % atype)
cstr = ""
for (i, c) in enumerate(conditions):
cstr += c
cstr += " AND " if i < len(conditions) - 1 else ""
# execute the search and build an array of authors
result = []
for row in self.search("authors", cstr):
author = DialogueAuthor.from_sqlite3(row)
result.append(author)
return result
def save_conversation(self, convo: DialogueConversation):
db_path = self.conf.dialogue_db
# conversation metadata will be stored in a single table, whereas each
# conversation's messages will be stored in separate tables. First, make
# sure the 'conversations' table exists and the conversation is logged
con = sqlite3.connect(db_path)
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS conversations ("
"cid TEXT PRIMARY KEY, "
"message_table_name TEXT, "
"time_start INTEGER, "
"time_latest INTEGER)")
cur.execute("INSERT OR REPLACE INTO conversations VALUES (?, ?, ?, ?)",
convo.to_sqlite3())
# next, make sure the conversation's message table exists
mtable = convo.to_sqlite3_table()
cur.execute("CREATE TABLE IF NOT EXISTS %s ("
"mid TEXT PRIMARY KEY, "
"aid TEXT, "
"content BLOB, "
"timestamp INTEGER)" % mtable)
# now, for each message in the conversation table, save/update it
for msg in convo.messages:
cmd = "INSERT OR REPLACE INTO %s VALUES (?, ?, ?, ?)" % mtable
cur.execute(cmd, msg.to_sqlite3())
con.commit()
con.close()
# determine if we need to prune the database
now = datetime.now()
prune_threshold = now.timestamp() - self.conf.dialogue_prune_rate
if self.last_prune.timestamp() < prune_threshold:
self.prune()
# Searches for a conversation based on ID and/or start-time range. Returns
# all matching conversations (or ALL conversations if no parameters are
# specified).
def search_conversation(self, cid=None, time_range=None):
db_path = self.conf.dialogue_db
if not os.path.isfile(db_path):
return []
# build a set of conditions
conditions = []
if cid is not None:
conditions.append("cid == \"%s\"" % cid)
if time_range is not None:
assert type(time_range) == list and len(time_range) >= 2, \
"time_range must a list of two timestamp ranges"
conditions.append("time_start >= %d AND time_start <= %d" %
(time_range[0].timestamp(),
time_range[1].timestamp()))
cstr = ""
for (i, c) in enumerate(conditions):
cstr += c
cstr += " AND " if i < len(conditions) - 1 else ""
# execute the search and build an array of conversations
result = []
for row in self.search("conversations", cstr):
convo = DialogueConversation.from_sqlite3(row, self)
result.append(convo)
return result
# Searches all conversation tables for any messages with the matching
# parameters. Reeturns a list of DialogueMessage objects.
def search_message(self, mid=None, aid=None, time_range=None, keywords=[]):
db_path = self.conf.dialogue_db
if not os.path.isfile(db_path):
return []
# retrieve all conversations via the conversation table and iterate
# through them
result = []
convos = self.search_conversation()
for convo in convos:
# iterate through all messages in each conversation
for msg in convo.messages:
add = True
# CHECK 1 - message ID
if mid is not None:
add = add and msg.mid.lower() == mid.lower()
# CHECK 2 - author ID
if aid is not None:
add = add and msg.author.aid.lower() == aid.lower()
# CHECK 3 - time range
if time_range is not None:
assert type(time_range) == list and len(time_range) >= 2, \
"time_range must a list of two timestamp ranges"
ts = msg.timestamp.timestamp()
add = add and (ts >= time_range[0].timestamp() and
ts <= time_range[1].timestamp())
# CHECK 4 - keywords
if len(keywords) > 0:
for word in keywords:
add = add and word.lower() in msg.content.lower()
# add to the resulting list if all conditions pass
if add:
result.append(msg)
return result
| [
"content",
"!reword PLACEHOLDER"
] |
2024-01-10 | adamraudonis/messenger_finetune | run_models.py | import os
import argparse
import openai
def main():
# Check that OPENAI_API_KEY is set
if not os.environ.get("OPENAI_API_KEY"):
raise ValueError("OPENAI_API_KEY env var must be set: export OPENAI_API_KEY=YOUR_KEY_HERE")
parser = argparse.ArgumentParser(description="Fine tunes a model for both participants from messenger")
parser.add_argument("model", type=str, help="The model to use")
parser.add_argument("--content", type=str, help="The question to ask", required=True)
parser.add_argument("--name", type=str, help="The name of the person", required=True)
args = parser.parse_args()
messages = [
{"role": "system", "content": f"Your name is {args.name}"},
{"role": "user", "content": args.content},
]
# model_id "ft:gpt-3.5-turbo:my-org:custom_suffix:id"
completion = openai.ChatCompletion.create(
model=args.model,
messages=messages,
)
print(completion.choices[0].message)
if __name__ == "__main__":
main()
| [] |
2024-01-10 | adamraudonis/messenger_finetune | finetune_models.py | import os
import openai
import json
import time
import argparse
import json
from datetime import datetime
from io import BytesIO
from langchain.schema.chat import ChatSession
from langchain.schema.messages import HumanMessage
# Note: This fails due to 'content' not existing for some messages
#
# from langchain.chat_loaders.facebook_messenger import (
# SingleFileFacebookMessengerChatLoader,
# )
from langchain.chat_loaders.utils import merge_chat_runs, map_ai_messages
from langchain.adapters.openai import convert_messages_for_finetuning
def main():
# Check that OPENAI_API_KEY is set
if not os.environ.get("OPENAI_API_KEY"):
raise ValueError("OPENAI_API_KEY env var must be set: export OPENAI_API_KEY=YOUR_KEY_HERE")
parser = argparse.ArgumentParser(description="Fine tunes a model for both participants from messenger")
parser.add_argument("msg_dir", type=str, help="Directory containing messenger jsons")
parser.add_argument("--only", type=str, help="Only make a model for this participant")
args = parser.parse_args()
# Combine all the json files into one
raw_messages = []
participants = None
for json_file in os.listdir(args.msg_dir):
if not json_file.endswith(".json"):
continue
json_path = os.path.join(args.msg_dir, json_file)
print("Processing ", json_path)
with open(json_path, "r") as f:
chat_info = json.load(f)
if participants is None:
participants = chat_info["participants"]
else:
if participants != chat_info["participants"]:
raise ValueError("Participants don't match")
for msg in chat_info["messages"]:
# Ignore calls
if "call_duration" in msg:
continue
# Check if content, sender_name, and timestamp_ms exist
if not all(k in msg for k in ("content", "sender_name", "timestamp_ms")):
continue
raw_messages.append(msg)
sorted_data = sorted(raw_messages, key=lambda x: x["timestamp_ms"])
# for msg in sorted_data[:36]:
# # convert timestamp_ms to datetime
# dt = datetime.fromtimestamp(msg["timestamp_ms"] / 1000)
# print(msg["sender_name"], ":\t", dt, msg["content"])
# return
# Split the messages by if there is any gap longer than 6 hours
all_sessions = []
current_messages = []
prev_msg_time = sorted_data[0]["timestamp_ms"]
num_too_long = 0
for raw_msg in sorted_data:
if len(raw_msg["content"]) > 1000:
num_too_long += 1
continue
if raw_msg["timestamp_ms"] - prev_msg_time > 1000 * 60 * 60 * 6:
all_sessions.append(ChatSession(messages=current_messages))
current_messages = []
current_messages.append(
HumanMessage(content=raw_msg["content"], additional_kwargs={"sender": raw_msg["sender_name"]})
)
prev_msg_time = raw_msg["timestamp_ms"]
merged_sessions = list(merge_chat_runs(all_sessions))
filtered_sessions = []
for session in merged_sessions:
if len(session["messages"]) > 4:
filtered_sessions.append(session)
print("Messages that were too long", num_too_long)
print(len(merged_sessions), len(filtered_sessions))
if args.only:
print("Limiting participants to --only filter")
participants = list(filter(lambda x: x["name"] == args.only, participants))
for participant in participants:
participant_name = participant["name"]
print(participant)
alternating_sessions = map_ai_messages(filtered_sessions, participant_name)
training_data = convert_messages_for_finetuning(alternating_sessions)
print(f"Prepared {len(training_data)} dialogues for training")
print(training_data[:10])
my_file = BytesIO()
for m in training_data:
my_file.write((json.dumps({"messages": m}) + "\n").encode("utf-8"))
my_file.seek(0)
openai.api_key = os.getenv("OPENAI_API_KEY")
training_file = openai.File.create(file=my_file, purpose="fine-tune")
# OpenAI audits each training file for compliance reasons.
# This make take a few minutes
status = openai.File.retrieve(training_file.id).status
start_time = time.time()
while status != "processed":
print(f"Status=[{status}]... {time.time() - start_time:.2f}s", end="\r", flush=True)
time.sleep(5)
status = openai.File.retrieve(training_file.id).status
print(f"File {training_file.id} ready after {time.time() - start_time:.2f} seconds.")
job = openai.FineTuningJob.create(
training_file=training_file.id,
model="gpt-3.5-turbo",
)
status = openai.FineTuningJob.retrieve(job.id).status
start_time = time.time()
while status != "succeeded":
print(f"Status=[{status}]... {time.time() - start_time:.2f}s", end="\r", flush=True)
time.sleep(5)
job = openai.FineTuningJob.retrieve(job.id)
status = job.status
print("Use this model id to talk to your model:")
print(job.fine_tuned_model)
if __name__ == "__main__":
main()
| [
"content"
] |
2024-01-10 | KevinPriv/generativeai-weathermap | weather_forecaster.py | from flask import Flask, request, render_template_string, render_template
from concurrent.futures import ThreadPoolExecutor
from flask_executor import Executor
import requests
import os
from dotenv import load_dotenv, find_dotenv
from pathlib import Path
import openai
import folium
load_dotenv(Path(".env"))
app = Flask(__name__, template_folder='templateFiles', static_folder='staticFiles')
executor = Executor(app)
weather_api_key = os.getenv("WEATHERSTACK_API")
openai_api_key = os.getenv("OPENAI_API")
@app.route('/', methods=['GET', 'POST'])
def map_display():
map_html = ''
if request.method == 'POST':
location = request.form.get('location')
r = requests.get(f'http://api.weatherstack.com/current?access_key={weather_api_key}&query={location}')
weather_json = r.json()
if not weather_json.get('success', True):
return render_template('map.html', map_html="Could not find place.")
latitude = weather_json['location']['lat']
longitude = weather_json['location']['lon']
location_name = weather_json["location"]["name"]
temperature = weather_json["current"]["temperature"]
weather_desc = weather_json["current"]["weather_descriptions"][0]
wind_speed = weather_json["current"]["wind_speed"]
humidity = weather_json["current"]["humidity"]
future = executor.submit(generate_image, location_name, weather_desc)
image = future.result()
m = folium.Map(location=[latitude, longitude], zoom_start=11)
css_output_string = "<style> img { vertical-align: text-top; } </style>"
output_string = f"{css_output_string}<img src=\"{image}\" height=256 width=256></img><b>Location</b>: {location_name}<br><b>Temperature</b>: {temperature} degrees<br><b>Weather</b>: {weather_desc}<br><b>Wind Speed</b>: {wind_speed} km/h<br><b>Humidity</b>: {humidity}%"
folium.Marker(location=[latitude, longitude],
popup=output_string,
icon=folium.Icon(color="red")).add_to(m)
map_html = m._repr_html_()
return render_template('map.html', map_html=map_html)
def generate_image(city_name, weather):
openai.api_key = os.getenv("OPENAI_KEY")
response = openai.Image.create(
prompt="A " + weather + " day in " + city_name + ".",
n=1,
size="256x256",
)
return response["data"][0]["url"]
app.run(debug=True)
| [] |
2024-01-10 | Ultrasky123/ROS | src~maincode~src~percobaan~pymavlink~TIMER_PLAN_C.py | import time
from pymavlink import mavutil
from PyMavlink import Guidance
#jika koneksi langsung komputer/nuc
# master = mavutil.mavlink_connection("/dev/ttyACM0", baud=115200)
#koneksi jika pakai companion
master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')
master.wait_heartbeat()
def main(pm: Guidance):
pm.armDisarm()
pm.arm()
pm.setMode('MANUAL')
#(3 = rc inputnya, 1350 = kecepatannya ) note 1500 = netral
pm.setRcValue(3, 1350) #gerak kebawah on
time.sleep(1)
pm.setRcValue(3, 1500) #gerak kebawah off
pm.setMode('ALT_HOLD')
pm.setHeading(360, 2) #deerajatnya di adjust lagi sesuaikan kompas
pm.setHeading(315, 2) #putar kiri
pm.setHeading(360, 2) #kembali ke derajat awal
pm.setHeading(45, 2) #putar kanan
pm.setHeading(360, 4) #kembali ke derajat awal
pm.setRcValue(5, 1720) #gerak lurus ke object
time.sleep(10)
pm.setRcValue(5, 1500) #off
pm.setRcValue(3, 1590) #kebawah menempelkan pelampung
time.sleep(3)
pm.setRcValue(3, 1500) #off
pm.open_gripper(1000) #buka gripper full
# pm.close_gripper(1000) #tutup gripper
pm.setRcValue(5, 1400) #kebawah menempelkan pelampung
time.sleep(1)
pm.setRcValue(5, 1500) #off
pm.setMode('MANUAL')
pm.disarm()
if __name__ == '__main__':
pymavlink = Guidance(master)
main(pymavlink) | [] |
2024-01-10 | Ultrasky123/ROS | src~maincode~src~percobaan~try_control_2.py | import time
from pymavlink import mavutil
from PyMavlinkzz import Guidance
import altitude
# jika koneksi langsung komputer/nuc
master = mavutil.mavlink_connection("/dev/ttyACM0", baud=115200)
# koneksi jika pakai companion
# master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')
master.wait_heartbeat()
# catatan: asumsi robot dalam keadaan default mengambang
def main(pm: Guidance):
# pm.armDisarm()
pm.arm()
# target depth
depth_target = -0.5
# variabel PID
kp = 50
ki = 0.0
kd = 10
# rentang toleransi kedalaman (meter)
depth_tolerance = 0.08
def depth_hold():
while True:
# membaca nilai kedalaman
depth = altitude.get_altitude()
#RUMUS PID
depth_error_prev = 0
# depth_error_sum = 0
# hitung error kedalaman
depth_error = depth - depth_target
# hitung turunan dari error Kd
depth_error_diff = depth_error - depth_error_prev
depth_error_prev = depth_error
# hitung output PID
pid_output = (kp * depth_error) +(kd * depth_error_diff) #+(depth_error_sum * ki)
# print(depth-depth_target)
# Jika error kedalaman lebih kecil dari toleransi thruster off
if abs(depth - depth_target) <= depth_tolerance:
# set kecepatan Motor 1500 = 0
pm.setRcValue(3, 1500)
print("thruster mati")
else:
#jika dept kurang dari target
if depth < depth_target:
# hitung kecepatan motor
speed = 1500-abs(int(pid_output))
# batasi kecepatan agar tidak terlalu besar atau kecil
if speed >= 1850:
speed = 1850
elif speed <= 1150:
speed = 1150
# set kecepatan motor
pm.setRcValue(3, int(1500-speed))
print("thruster nyala")
print(speed)
else:
# hitung kecepatan motor
speed = 1500 + abs(int(pid_output))
# batasi kecepatan agar tidak terlalu besar atau kecil
if speed >= 1850:
speed = 1850
elif speed <= 1150:
speed = 1150
# set kecepatan motor
pm.setRcValue(3, speed)
print("thruster nyala")
print(speed)
# Jika misi sudah selesai, reset kedalaman menjadi 0
# if mission_complete:
# depth_target = 0
time.sleep(0.1)
depth_hold()
#pm.disarm()
if __name__ == '__main__':
pymavlink = Guidance(master)
main(pymavlink)
| [] |
2024-01-10 | Ultrasky123/ROS | src~maincode~src~percobaan~try_control3.py | import time
from pymavlink import mavutil
from PyMavlinkzz import Guidance
import altitude
# jika koneksi langsung komputer/nuc
# master = mavutil.mavlink_connection("/dev/ttyACM0", baud=115200)
# koneksi jika pakai companion
master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')
master.wait_heartbeat()
# catatan: asumsi robot dalam keadaan default mengambang
def main(pm: Guidance):
# pm.armDisarm()
pm.arm()
# target depth
depth_target = -0.8
# variabel PID
kp = 160
ki = 0.0
kd = 35
# rentang toleransi kedalaman (meter)
depth_tolerance = 0.05
def depth_hold():
while True:
# membaca nilai kedalaman
depth = altitude.get_altitude()
#RUMUS PID
depth_error_prev = 0
# depth_error_sum = 0
# hitung error kedalaman
depth_error = depth - depth_target
# hitung turunan dari error Kd
depth_error_diff = depth_error - depth_error_prev
depth_error_prev = depth_error
# hitung output PID
pid_output = (kp * depth_error) +(kd * depth_error_diff) #+(depth_error_sum * ki)
# print(depth-depth_target)
# Jika error kedalaman lebih kecil dari toleransi thruster off
if abs(depth - depth_target) <= depth_tolerance:
# set kecepatan Motor 1500 = 0
pm.setRcValue(3, 1500)
print("thruster mati")
else:
#jika dept kurang dari target
if depth < depth_target:
# hitung kecepatan motor
speed = 1500-abs(int(pid_output))
# batasi kecepatan agar tidak terlalu besar atau kecil
if speed >= 1850:
speed = 1850
elif speed <= 1150:
speed = 1150
# set kecepatan motor
pm.setRcValue(3, speed)
print("thruster nyala")
print(speed)
else:
# hitung kecepatan motor
speed = 1500 + abs(int(pid_output))
# batasi kecepatan agar tidak terlalu besar atau kecil
if speed >= 1850:
speed = 1850
elif speed <= 1150:
speed = 1150
# set kecepatan motor
pm.setRcValue(3, speed)
print("thruster nyala")
print(speed)
# Jika misi sudah selesai, reset kedalaman menjadi 0
# if mission_complete:
# depth_target = 0
time.sleep(0.1)
depth_hold()
#pm.disarm()
if __name__ == '__main__':
pymavlink = Guidance(master)
main(pymavlink)
| [] |
2024-01-10 | Ultrasky123/ROS | src~maincode~src~percobaan~control_head_pid.py | import time
from pymavlink import mavutil
from PyMavlinkin import Guidance
import heading
# jika koneksi langsung komputer/nuc
master = mavutil.mavlink_connection("/dev/ttyACM0", baud=115200)
# koneksi jika pakai companion
# master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')
master.wait_heartbeat()
def main(pm: Guidance):
# pm.armDisarm()
pm.arm()
# target depth
def set_head():
while True:
head_target = 250
# variabel PID
kp = 150
ki = 0.0
kd = 30
# rentang toleransi kedalaman (meter)
head_tolerance = 0.5
# membaca nilai kedalaman
head = heading.get_heading()
#RUMUS PID
head_error_prev = 0
# depth_error_sum = 0
head_error = head_target-head
if head_error > 180:
head_error -= 360
elif head_error < -180:
head_error += 360
# hitung turunan dari error Kd
head_error_diff = head_error - head_error_prev
head_error_prev = head_error
# hitung output PID
pid_output = (kp * head_error) +(kd * head_error_diff) #+(depth_error_sum * ki)
# print(depth-depth_target)
# Jika error kedalaman lebih kecil dari toleransi thruster off
if abs(head - head_target) <= head_tolerance:
# set kecepatan Motor 1500 = 0
speed = 1500
pm.setRcValue(4, speed)
print("thruster low")
print(speed , "head error : ", head_error)
else:
#jika dept kurang dari target
if head_error < 0:
# hitung kecepatan motor
speed = 1500-abs(int(pid_output))
# batasi kecepatan agar tidak terlalu besar atau kecil
if speed >= 1850:
speed = 1850
elif speed <= 1250:
speed = 1250
# set kecepatan motor
pm.setRcValue(4, speed)
print("thruster nyala")
print(speed , "head error : ", head_error)
else:
# hitung kecepatan motor
speed = 1500 + abs(int(pid_output))
# batasi kecepatan agar tidak terlalu besar atau kecil
if speed >= 1850:
speed = 1850
elif speed <= 1250:
speed = 1250
# set kecepatan motor
pm.setRcValue(4, speed)
print("thruster nyala")
print(speed)
# Jika misi sudah selesai, reset kedalaman menjadi 0
# if mission_complete:
# depth_target = 0
time.sleep(0.1)
set_head()
#pm.disarm()
if __name__ == '__main__':
pymavlink = Guidance(master)
main(pymavlink)
| [] |
2024-01-10 | Ultrasky123/ROS | src~maincode~src~coba~camera2.py | import cv2
import numpy as np
import os
os.environ['MAVLINK20'] = ''
from pymavlink import mavutil
from pymavlink.quaternion import QuaternionBase
import math
import time
from Guidance import control_rov
ALT_HOLD_MODE = 2
#koneksi companion
# master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')
#jika koneksi langsung komputer
# master = mavutil.mavlink_connection("/dev/ttyACM0", baud=115200)
# Wait a heartbeat before sending commands
# master.wait_heartbeat()
boot_time = time.time()
def get_heading():
#jika koneksi langsung komputer/nuc
while True:
msg = master.recv_match()
if not msg:
continue
if msg.get_type() == 'GLOBAL_POSITION_INT':
print("depth: %s" % msg.hdg)
return(msg.hdg)
def closeGripper(servoN, microseconds):
master.mav.command_long_send(
master.target_system, master.target_component,
mavutil.mavlink.MAV_CMD_DO_SET_SERVO,
0, # first transmission of this command
servoN + 8, # servo instance, offset by 8 MAIN outputs
microseconds, # PWM pulse-width
0,0,0,0,0 # unused parameters
)
def armdisarm():
master.mav.command_long_send(
master.target_system,
master.target_component,
mavutil.mavlink.MAV_CMD_COMPONENT_ARM_DISARM,
0,
0, 0, 0, 0, 0, 0, 0)
def setRcValue(channel_id, pwm=1500):
if channel_id < 1 or channel_id > 18:
print("Channel does not exist.")
return
# Mavlink 2 supports up to 18 channels:
# https://mavlink.io/en/messages/common.html#RC_CHANNELS_OVERRIDE
rc_channel_values = [65535 for _ in range(18)]
rc_channel_values[channel_id - 1] = pwm
master.mav.rc_channels_override_send(
master.target_system, # target_system
master.target_component, # target_component
*rc_channel_values) # RC channel list, in microseconds.
def disarm():
master.arducopter_disarm()
return True
def is_armed():
try:
return bool(master.wait_heartbeat().base_mode & 0b10000000)
except:
return False
def mode_is(mode):
try:
return bool(master.wait_heartbeat().custom_mode == mode)
except:
return False
def set_target_depth(depth):
master.mav.set_position_target_global_int_send(
0,
0, 0,
mavutil.mavlink.MAV_FRAME_GLOBAL_INT, # frame
0b0000111111111000,
0,0, depth,
0 , 0 , 0 , # x , y , z velocity in m/ s ( not used )
0 , 0 , 0 , # x , y , z acceleration ( not supported yet , ignored in GCS Mavlink )
0 , 0 ) # yaw , yawrate ( not supported yet , ignored in GCS Mavlink )
def set_target_attitude(roll, pitch, yaw, control_yaw=True):
bitmask = (1<<6 | 1<<3) if control_yaw else 1<<6
master.mav.set_attitude_target_send(
0,
0, 0,
bitmask,
QuaternionBase([math.radians(roll), math.radians(pitch), math.radians(yaw)]), # -> attitude quaternion (w, x, y, z | zero-rotation is 1, 0, 0, 0)
0, #roll rate
0, #pitch rate
0, 0) # yaw rate, thrust
#======RC CHANNEL PWM======
# 1 Pitch
# 2 Roll
# 3 Throttle
# 4 Yaw
# 5 Forward
# 6 Lateral
# Initialize camera
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
# Create window
cv2.namedWindow('ROV')
# Load color threshold values
lower = np.load('/home/lz/Downloads/data1.npy')
upper = np.load('/home/lz/Downloads/data2.npy')
# Background Subtraction model initialization
bs = cv2.createBackgroundSubtractorMOG2()
# Main loop
while True:
# Read frame from camera
ret, frame = cap.read()
if not ret:
# Exit if no frame is read
break
# Apply background subtraction to the frame
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower, upper)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
# Detect contours
contours, _ = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Find the largest contour (based on area) as the target
max_area = 0
max_contour = None
for contour in contours:
area = cv2.contourArea(contour)
if area > max_area:
max_area = area
max_contour = contour
if max_contour is not None:
# Calculate the rectangle enclosing the target
x, y, w, h = cv2.boundingRect(max_contour)
# Draw the rectangle
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Control ROV based on the target rectangle
control_rov((x, y, w, h), frame.shape[1], frame.shape[0])
# Display the frame with the detected rectangle
cv2.imshow('ROV', frame)
# Exit if 'q' is pressed
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release the camera and close the window
cap.release() | [] |
2024-01-10 | Ultrasky123/ROS | src~maincode~src~percobaan~test_control_depth.py | import time
import math
# Import mavutil
from pymavlink import mavutil
# Imports for attitude
from pymavlink.quaternion import QuaternionBase
from PyMavlinkin import Guidance
# Create the connection
master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')
#jika koneksi langsung komputer
# master = mavutil.mavlink_connection("/dev/ttyACM0", baud=115200)
boot_time = time.time()
# Wait a heartbeat before sending commands
master.wait_heartbeat()
# 1 Pitch
# 2 Roll
# 3 Throttle
# 4 Yaw
# 5 Forward
# 6 Lateral
# 7 Camera Pan
# 8 Camera Tilt*
# 9 Lights 1 Level
# 10 Lights 2 Level
# 11 Video Switch
def main(pm: Guidance):
#arm thruster
pm.arm()
#set depth hold PID
# pm.depth_hold(-0.5)
# print("depth")
roll_angle = pitch_angle = 0
for yaw_angle in range(0, 500, 10):
pm.setDepth(-0.5)
# spin the other way with 3x larger steps
for yaw_angle in range(0, 60, 10):
pm.set_target_attitude(roll_angle, pitch_angle, 280)
time.sleep(1)
#maju
pm.setRcValue(5,1600)
time.sleep(3)
pm.setRcValue(4,1500)
#belok kanan
# pm.setRcValue(4,1600)
# time.sleep(1)
# pm.setRcValue(4,1500)
#maju
pm.setRcValue(5,1600)
time.sleep(3)
pm.setRcValue(4,1500)
#belok kanan
pm.setRcValue(4,1600)
time.sleep(1)
pm.setRcValue(4,1500)
#maju
pm.setRcValue(5,1600)
time.sleep(3)
pm.setRcValue(5,1500)
#belok kanan
pm.setRcValue(4,1600)
time.sleep(1)
pm.setRcValue(4,1500)
#maju
pm.setRcValue(5,1600)
time.sleep(3)
pm.setRcValue(5,1500)
if __name__ == '__main__':
pymavlink = Guidance(master)
main(pymavlink) | [] |
2024-01-10 | Ultrasky123/ROS | src~maincode~src~ros_timer~ros_rc.py | #!/usr/bin/env python3
import numpy as np
import time
import rospy
from pymavlink import mavutil
import os
os.environ['MAVLINK20'] = ''
from class_silver import Guidance
from std_msgs.msg import Float32,Int16,Bool
set_heading = 0
set_depth = -20
set_forward = 1500
gripper_data = 2
duration = 0
arm=True
# gripper_pub = rospy.Publisher('/sensor/gripper_command', Int16, queue_size=10)
def calbacktargetheading(data):
global set_heading
set_heading = data.data
def calbacktargetdepth(data):
global set_depth
set_depth = data.data
def calbacktargetforward(data):
global set_forward
set_forward = data.data
def callback_gripper(data):
global gripper_data
gripper_data = data.data
def callback_grip_duration(data):
global duration
duration=data.data
def callback_arming(data):
global arm
arm=data.data
def main ():
global set_heading, set_depth,set_forward
rospy.init_node('Node_Guidance', anonymous=True)
rospy.Subscriber('target_heading',Int16,callback=calbacktargetheading)
rospy.Subscriber('target_depth',Int16,callback=calbacktargetdepth)
rospy.Subscriber('target_forward',Int16,callback=calbacktargetforward)
rospy.Subscriber('target_gripper',Int16,callback=callback_gripper)
rospy.Subscriber('target_grip_duration',Int16,callback=callback_grip_duration)
rospy.Subscriber('target_arming',Bool,callback=callback_arming)
# gripper_pub = rospy.Publisher('/sensor/gripper_command', Int16, queue_size=10)
rospy.sleep(2)
# rospy.Subscriber("pwm_head", Int16, pwm_head_callback)
# master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')
master = mavutil.mavlink_connection("/dev/ttyACM0", baud=115200) # Provide appropriate connection details
print(master.wait_heartbeat())
robot = Guidance(master)
master.arducopter_arm()
# robot.resetpwm()
robot.setMode('MANUAL')
# current_time = time.time()
while not rospy.is_shutdown():
robot.get_depth()
robot.PID_depth()
robot.control_depth()
robot.target_pid_depth(set_depth)
# robot.set_target_depth(-0.7)
robot.PID_yaw()
robot.control_yaw()
robot.set_heading_target(set_heading)
robot.control_forward(set_forward)
robot.set_gripper(gripper_data,duration)
if not arm :
master.arducopter_disarm()
if __name__ == '__main__':
try :
main()
except rospy.ROSInterruptException:
pass
| [] |
2024-01-10 | Ultrasky123/ROS | src~maincode~src~percobaan~cobaz_ControlDepthPid.py | import time
from pymavlink import mavutil
from PyMavlinkin import Guidance
lastTime = 0.0
stableDuration = 0.0
depthTarget = -0.5
# master = mavutil.mavlink_connection("/dev/ttyACM0", baud=115200)
master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')
master.wait_heartbeat()
def calculatePwm(depth):
pwm = 1500
if depth > depthTarget:
pwm = int((((depth + 0.5) * 75) / 0.5) + 1500)
if pwm >= 1575:
pwm = 1575
elif pwm <= 1525:
pwm = 1525
elif depth < depthTarget:
pwm = int((((depth + 1) * 75) / 0.5) + 1425)
if pwm <= 1425:
pwm = 1425
elif pwm >= 1475:
pwm = 1475
return pwm
def main(pm: Guidance):
startTime = time.time()
isStable = False
pm.arm()
while True:
currentTime = time.time()
bootTime = int(currentTime - startTime)
try:
altitude = pm.getAltitude()
if altitude != None:
pwm = calculatePwm(altitude)
if pwm == None:
pwm = 0
if altitude <= -0.5 and altitude >= -0.1:
if not isStable:
lastStableTime = currentTime
isStable = True
stableDuration = int(currentTime - lastStableTime)
else:
isStable = False
stableDuration = 0
pm.setRcValue(3, pwm)
if stableDuration >= 2 and bootTime <= 3:
pm.setRcValue(5, 1600)
except:
continue
if __name__ == '__main__':
pymavlink = Guidance(master)
main(pymavlink) | [] |
2024-01-10 | Ultrasky123/ROS | src~maincode~src~percobaan~pymavlink~TIMER_MISI1.py | import time
from pymavlink import mavutil
from PyMavlinkzz import Guidance
#jika koneksi langsung komputer/nuc
master = mavutil.mavlink_connection("/dev/ttyACM1", baud=115200)
#koneksi jika pakai companion
# master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')
master.wait_heartbeat()
# data = [ 1500 ] * 8
# master.mav.rc_channels_override_send(
# master.target_system, master.target_component, *data)
def main(pm: Guidance):
# pm.armDisarm()
pm.arm()
# pm.setHeading(360, 4)
# setmode manual
pm.setMode('MANUAL')
# pm.setDepth (-0.4)
# pm.setMode('ALT_HOLD')
# maju lurus 10 meter
pm.setRcValue(3, 1600)
time.sleep(5)
pm.setRcValue(3, 1500)
print("saydwyweh")
#surface
# pm.setRcValue(3, 1550)
# time.sleep(5)
# pm.setRcValue(3, 1500)
# pm.disarm()
# misi selesai
if __name__ == '__main__':
pymavlink = Guidance(master)
main(pymavlink) | [] |
2024-01-10 | Ultrasky123/ROS | src~maincode~src~percobaan~pymavlink~TIMER_MISI1(2).py | import time
from pymavlink import mavutil
from PyMavlinkzz import Guidance
from alt_hdg import parameter
#jika koneksi langsung komputer
# master = mavutil.mavlink_connection("/dev/ttyACM0", baud=115200)
#koneksi jika pakai companion
master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')
master.wait_heartbeat()
def main(pm: Guidance):
pm.arm()
# pm.setHeading(360, 4)
# setmode manual
pm.setMode('MANUAL')
pm.setDepth (-0.4) #0.4 meter dibawah
pm.setMode('ALT_HOLD')
# while true:
# param=parameter()
# print (param)
# maju lurus 10 meter
pm.setRcValue(5, 1700)
time.sleep(5)
pm.setRcValue(5, 1500)
#surface
pm.setRcValue(3, 1600)
time.sleep(5)
pm.setRcValue(3, 1500)
# pm.disarm()
# misi selesai
if __name__ == '__main__':
pymavlink = Guidance(master)
main(pymavlink) | [] |
2024-01-10 | Ultrasky123/ROS | src~maincode~src~percobaan~node_control_depth.py | import time
from pymavlink import mavutil
from PyMavlinkin import Guidance
import altitude
# jika koneksi langsung komputer/nuc
# master = mavutil.mavlink_connection("/dev/ttyACM0", baud=115200)
# koneksi jika pakai companion
master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')
master.wait_heartbeat()
def depth_hold(depth_target):
def setRcValue(channel_id, pwm=1500):
if channel_id < 1 or channel_id > 18:
print("Channel does not exist.")
return
# Mavlink 2 supports up to 18 channels:
# https://mavlink.io/en/messages/common.html#RC_CHANNELS_OVERRIDE
rc_channel_values = [65535 for _ in range(18)]
rc_channel_values[channel_id - 1] = pwm
master.mav.rc_channels_override_send(
master.target_system, # target_system
master.target_component, # target_component
*rc_channel_values) # RC channel list, in microseconds.
while True:
depth_target = -0.5
# variabel PID
kp = 150
ki = 0.0
kd = 35
# rentang toleransi kedalaman (meter)
depth_tolerance = 0.0
# membaca nilai kedalaman
depth = altitude.get_altitude()
#RUMUS PID
depth_error_prev = 0
# depth_error_sum = 0
# hitung error kedalaman
depth_error = depth - depth_target
# hitung turunan dari error Kd
depth_error_diff = depth_error - depth_error_prev
depth_error_prev = depth_error
# hitung output PID
pid_output = (kp * depth_error) +(kd * depth_error_diff) #+(depth_error_sum * ki)
# print(depth-depth_target)
# Jika error kedalaman lebih kecil dari toleransi thruster off
if abs(depth - depth_target) <= depth_tolerance:
# set kecepatan Motor 1500 = 0
speed = 1400 + abs(int(pid_output))
setRcValue(3, speed)
print("thruster low")
print(speed , "depth error : ", depth_error)
else:
#jika dept kurang dari target
if depth < depth_target:
# hitung kecepatan motor
speed = 1500-abs(int(pid_output))
# batasi kecepatan agar tidak terlalu besar atau kecil
if speed >= 1650:
speed = 1650
elif speed <= 1350:
speed = 1350
# set kecepatan motor
setRcValue(3, speed)
print("thruster nyala")
print(speed , "depth error : ", depth_error)
else:
# hitung kecepatan motor
speed = 1500 + abs(int(pid_output))
# batasi kecepatan agar tidak terlalu besar atau kecil
if speed >= 1650:
speed = 1650
elif speed <= 1350:
speed = 1350
# set kecepatan motor
setRcValue(3, speed)
print("thruster nyala")
print(speed)
# Jika misi sudah selesai, reset kedalaman menjadi 0
# if mission_complete:
# depth_target = 0
time.sleep(0.1)
def main(pm: Guidance):
# pm.armDisarm()
pm.arm()
# target depth
depth_hold(-0.5)
#pm.disarm()
if __name__ == '__main__':
pymavlink = Guidance(master)
main(pymavlink)
| [] |
2024-01-10 | Ultrasky123/ROS | src~maincode~src~latihan~src~TRACKING.PY | #!/usr/bin/env python3
import cv2
import numpy as np
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from Guidance import control_rov
# Inisialisasi ROS
rospy.init_node('rov_detection_publisher')
image_publisher = rospy.Publisher('rov_detection/image', Image, queue_size=10)
# Inisialisasi OpenCV
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
# Create window
cv2.namedWindow('ROV')
# Load color threshold values
lower = np.load('/home/amir/Downloads/data1.npy')
upper = np.load('/home/amir/Downloads/data2.npy')
# Background Subtraction model initialization
bs = cv2.createBackgroundSubtractorMOG2()
# Inisialisasi CvBridge
bridge = CvBridge()
# Main loop
while not rospy.is_shutdown():
# Read frame from camera
ret, frame = cap.read()
if not ret:
# Exit if no frame is read
break
# Apply background subtraction to the frame
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower, upper)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
# Detect contours
contours, _ = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Find the largest contour (based on area) as the target
max_area = 0
max_contour = None
for contour in contours:
area = cv2.contourArea(contour)
if area > max_area:
max_area = area
max_contour = contour
if max_contour is not None:
# Calculate the rectangle enclosing the target
x, y, w, h = cv2.boundingRect(max_contour)
# Draw the rectangle
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Control ROV based on the target rectangle
control_rov((x, y, w, h), frame.shape[1], frame.shape[0])
# Display the frame with the detected rectangle
cv2.imshow('ROV', frame)
# Convert OpenCV image to ROS image message
ros_image = bridge.cv2_to_imgmsg(frame, "bgr8")
# Publish the ROS image message
image_publisher.publish(ros_image)
# Exit if 'q' is pressed
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release the camera and close the window
cap.release()
cv2.destroyAllWindows()
| [] |
2024-01-10 | Ultrasky123/ROS | src~maincode~src~percobaan~try_control.py | import time
from pymavlink import mavutil
from PyMavlinkzz import Guidance
import altitude
#jika koneksi langsung komputer/nuc
master = mavutil.mavlink_connection("/dev/ttyACM0", baud=115200)
#koneksi jika pakai companion
# master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')
master.wait_heartbeat()
# CATATAN: ASUMSI ROBOT DALAM KEADAAN DEFAULT MENGAMBANG
def main(pm: Guidance):
# pm.armDisarm()
pm.arm()
# target depth
depth_target=-0.5
#variabel PID
kp=100
# ki=0
kd=50
# Variable error dan variabel integral untuk kontrol PID
depth_error_prev = 0
# depth_error_sum = 0
# Rentang toleransi kedalaman (meter)
depth_tolerance = 0.1
def pid_control(depth, depth_target):
global depth_error_prev, depth_error_sum
# Hitung error kedalaman Kp
depth_error = depth - depth_target
# Hitung integral dari error Ki
# depth_error_sum += depth_error
# Hitung turunan dari error Kd
depth_error_diff = depth_error - depth_error_prev
depth_error_prev = depth_error
# Hitung output PID
pid_output = (kp * depth_error) + (kd * depth_error_diff)
return pid_output
def depth_hold():
while True:
#membaca nilai kedalaman
depth= altitude.get_altitude()
# memanggil rumus function pid
pid_output = pid_control(depth, depth_target)
# Jika kedalaman berada dalam rentang toleransi
if abs(depth - depth_target) <= depth_tolerance:
#set kecepatan Motor 1500 = 0
pm.setRcValue(3,1500)
print("thruster mati")
else :
# Hitung kecepatan motor
speed = pid_output
print(speed)
# set kecepatan motor
pm.setRcValue(3,speed)
# time.sleep(3)
# pm.setRcValue(3,1500)
print("thruster nyala")
# Jika misi sudah selesai, reset kedalaman menjadi 0
# if mission_complete:
# depth_target = 0
# time.sleep(0.1)
depth_hold()
# pm.disarm()
if __name__ == '__main__':
pymavlink = Guidance(master)
main(pymavlink) | [] |
2024-01-10 | Ultrasky123/ROS | src~maincode~src~percobaan~TOTAL_CONTROL.py | import time
import math
# Import mavutil
from pymavlink import mavutil
# Imports for attitude
from pymavlink.quaternion import QuaternionBase
from PyMavlinkin import Guidance
# Create the connection
master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')
#jika koneksi langsung komputer
# master = mavutil.mavlink_connection("/dev/ttyACM0", baud=115200)
boot_time = time.time()
# Wait a heartbeat before sending commands
master.wait_heartbeat()
def main(pm: Guidance):
#arm thruster
pm.arm()
# set the desired operating mode
DEPTH_HOLD = 'ALT_HOLD'
DEPTH_HOLD_MODE = master.mode_mapping()[DEPTH_HOLD]
while not master.wait_heartbeat().custom_mode == DEPTH_HOLD_MODE:
master.set_mode(DEPTH_HOLD)
# sesuaikan arah
# (set target yaw from 0 to 500 degrees in steps of 10, one update per second)
roll_angle = pitch_angle = 0
for yaw_angle in range(0, 500, 10):
pm.setDepth(-0.2)
#kanan kiri deteksi
# atur brp nilai heading
for yaw_angle in range(0, 60, 10):
pm.set_target_attitude(roll_angle, pitch_angle, 230)
time.sleep(1)
for yaw_angle in range(0, 60, 10):
pm.set_target_attitude(roll_angle, pitch_angle, 310)
time.sleep(1)
#menghadap lurus ke arah objek
for yaw_angle in range(0, 60, 10):
pm.set_target_attitude(roll_angle, pitch_angle, 260)
time.sleep(1)
for yaw_angle in range(0, 60, 10):
#maju kedepan 10 meter
pm.setRcValue(5,1600)
time.sleep(5)
pm.setRcValue(5,1500)
pm.disarm()
if __name__ == '__main__':
pymavlink = Guidance(master)
main(pymavlink) | [] |
2024-01-10 | Ultrasky123/ROS | src~maincode~src~percobaan~control2.py | import os
os.environ['MAVLINK20'] = ''
import threading
import math
import time
from pymavlink import mavutil
from pymavlink.quaternion import QuaternionBase
from PyMavlinkin import Guidance
ALT_HOLD_MODE = 2
def main (rov=Guidance):
#target depth diulang setiap 10 detik
def set_altitude_loop():
while True:
rov.set_target_depthu(-0.5)
print("loop_depth")
time.sleep(10)
# master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')
master = mavutil.mavlink_connection("/dev/ttyACM0", baud=115200)
boot_time = time.time()
# Wait a heartbeat before sending commands
print(master.wait_heartbeat())
# Buat thread terpisah untuk menjalankan fungsi set_altitude_loop
altitude_thread = threading.Thread(target=set_altitude_loop)
altitude_thread.start()
# ================================
# MAIN PROGRAM
rov.is_armed()
rov.mode_is()
while not is_armed():
master.arducopter_arm()
while not mode_is(ALT_HOLD_MODE):
master.set_mode('ALT_HOLD')
pitch = yaw = roll = 0
# for i in range(500):
#heading kanan
rov.set_target_attitude(roll, pitch, 200)
print("set_heading2")
time.sleep(1)
#heading kiri
rov.set_target_attitude(roll, pitch, 380)
print("set_heading2")
time.sleep(1)
#heading kearah objek
rov.set_target_attitude(roll, pitch, 300)
print("set_heading")
time.sleep(1)
#majuu
rov.setRcValue(5, 1600)
time.sleep(6)
print("majuuu")
rov.setRcValue(5, 1500)
# maju
#heading lurus
rov.set_target_attitude(roll, pitch, 300)
print("set_heading")
time.sleep(1)
#maju
rov.setRcValue(5, 1600)
time.sleep(6)
print("majuuu")
rov.setRcValue(5, 1500)
if __name__ == '__main__':
pymavlink = Guidance(master)
main(pymavlink)
| [] |
2024-01-10 | Ultrasky123/ROS | src~maincode~src~coba~TRACKING.PY | #!/usr/bin/env python3
import cv2
import numpy as np
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from Guidance import control_rov
# Inisialisasi ROS
rospy.init_node('rov_detection_publisher')
image_publisher = rospy.Publisher('rov_detection/image', Image, queue_size=10)
# Inisialisasi OpenCV
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
# Create window
cv2.namedWindow('ROV')
# Load color threshold values
lower = np.load('/home/lz/Downloads/data1.npy')
upper = np.load('/home/lz/Downloads/data2.npy')
# Background Subtraction model initialization
bs = cv2.createBackgroundSubtractorMOG2()
# Inisialisasi CvBridge
bridge = CvBridge()
def control_rov(rect, frame_width, frame_height):
centering_zone_width = 320
centering_zone_height = 240
centering_zone_x = frame_width / 2 - centering_zone_width / 2
centering_zone_y = frame_height / 2 - centering_zone_height / 2
center_zone_width = 550
center_zone_height = 450
center_zone_x = frame_width / 2 - centering_zone_width / 2
center_zone_y = frame_height / 2 - centering_zone_height / 2
target_x, target_y, target_w, target_h = rect
# jika box tepat di tengah , maju
if (target_x >= centering_zone_x and target_x + target_w <= centering_zone_x + centering_zone_width and
target_y >= centering_zone_y and target_y + target_h <= centering_zone_y + centering_zone_height):
# Stop moving
print("go maju")
# jika box sudah dekat stop
elif(target_x >= center_zone_x and target_x + target_w <= center_zone_x + center_zone_width and
target_y >= center_zone_y and target_y + target_h <= center_zone_y + center_zone_height ):
print("stop")
else:
if target_x < frame_width / 2-20:
# setRcValue(6,1400)
print("Move kiri")
elif target_x > frame_width / 2+1 :
# setRcValue(6,1600)
print("Move Kanan")
# else:
# print("stop")
# Main loop
while not rospy.is_shutdown():
# Read frame from camera
ret, frame = cap.read()
if not ret:
# Exit if no frame is read
break
# Apply background subtraction to the frame
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower, upper)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)
# Detect contours
contours, _ = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Find the largest contour (based on area) as the target
max_area = 0
max_contour = None
for contour in contours:
area = cv2.contourArea(contour)
if area > max_area:
max_area = area
max_contour = contour
if max_contour is not None:
# Calculate the rectangle enclosing the target
x, y, w, h = cv2.boundingRect(max_contour)
# Draw the rectangle
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Control ROV based on the target rectangle
control_rov((x, y, w, h), frame.shape[1], frame.shape[0])
# Display the frame with the detected rectangle
cv2.imshow('ROV', frame)
# Convert OpenCV image to ROS image message
ros_image = bridge.cv2_to_imgmsg(frame, "bgr8")
# Publish the ROS image message
image_publisher.publish(ros_image)
# Exit if 'q' is pressed
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release the camera and close the window
cap.release()
cv2.destroyAllWindows()
| [] |
2024-01-10 | Ultrasky123/ROS | src~maincode~src~percobaan~pymavlink~TIMER_PLAN_B%20(1).py | import time
from pymavlink import mavutil
from PyMavlinkzz import Guidance
#koneksi jika langsung komputer/nuc
# master = mavutil.mavlink_connection("/dev/ttyACM0", baud=115200)
#koneksi jika pakai companion
master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')
master.wait_heartbeat()
# kamus set rcvalue channel
# 1 = Pitch
# 2 = Roll
# 3 = Throttle (nyelam/surface)
# 4 = Yaw (Belok)
# 5 = Forward (maju)
# 6 = Lateral (menyamping)
# 7 = Camera Pan
# 8 = Camera Tilt*
# 9 = Lights 1 Level
# 10 = Lights 2 Level
# 11 = Video Switch
def main(pm: Guidance):
pm.armDisarm()
pm.arm()
# setmode manual
pm.setMode('MANUAL')
#Robot menyelam
pm.setRcValue(5, 1700)
time.sleep(3)
pm.setRcValue(5, 1500)
# setmode manual
# pm.setMode('ALT_HOLD')
#yaw liat kanan kiri opsi 1
# pm.setRcValue(4, 1450) #kiri
# time.sleep(1)
# pm.setRcValue(4, 1550) #kiri
# time.sleep(1)
#perbaiki arah gerak
# pm.setHeading(360, 4)
#yaw liat kanan kiri opsi 2
# pm.setHeading(315, 1)
# pm.setHeading(45,1)
# pm.setHeading(360,4)
# gerak maju sampe mendekati tembok
# pm.setRcValue(5 ,1700)
# time.sleep(4)
# gerak menyamping
# pm.setRcValue(6 ,1440)
# time.sleep(1)
# # gerak mundur sampe mendekati tembok
# pm.setRcValue(5 ,1300)
# time.sleep(4)
# # gerak menyamping
# pm.setRcValue(6 ,1440)
# time.sleep(1)
# #anggaplah robot sudah mendeteksi objek
# #gerak maju menuju objek
# pm.setRcValue(5,1700)
# time.sleep(3)
# # mundur jika robot kelewatan dari objek
# # pm.setRcValue(5,1400)
# # time.sleep(1)
# # robot menyelam sedikit lebih dalam
# pm.setRcValue(3,1450)
# time.sleep(1)
# lepas gripper jika posisi sudah sesuai
# pm.grip_open()
pm.setMode('MANUAL')
# misi selesai
pm.disarm()
if __name__ == '__main__':
pymavlink = Guidance(master)
main(pymavlink) | [] |
2024-01-10 | Ultrasky123/ROS | src~maincode~src~percobaan~pymavlink~TIMER_PLAN_B%20(2).py | import time
from pymavlink import mavutil
from PyMavlinkzz import Guidance
from objek_deteksi import objek_terdeteksi
#koneksi jika langsung komputer/nuc
master = mavutil.mavlink_connection("/dev/ttyACM0", baud=115200)
#koneksi jika pakai companion
# master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')
# master.wait_heartbeat()
# kamus set rcvalue channel
# 1 = Pitch
# 2 = Roll
# 3 = Throttle (nyelam/surface)
# 4 = Yaw (Belok)
# 5 = Forward (maju)
# 6 = Lateral (menyamping)
# 7 = Camera Pan
# 8 = Camera Tilt*
# 9 = Lights 1 Level
# 10 = Lights 2 Level
# 11 = Video Switch
def main(pm: Guidance):
pm.armDisarm()
pm.arm()
# setmode manual
pm.setMode('MANUAL')
#Robot menyelam
pm.setRcValue(3, 1400)
time.sleep(1)
# setmode manual
pm.setMode('ALT_HOLD')
#yaw liat kanan kiri opsi 1
pm.setRcValue(4, 1450) #kiri
time.sleep(1)
pm.setRcValue(4, 1550) #kiri
time.sleep(1)
#perbaiki arah gerak
pm.setHeading(360, 4)
#yaw liat kanan kiri opsi 2
# pm.setHeading(315, 1)
# pm.setHeading(45,1)
# pm.setHeading(360,4)
#autonomous searching
while True:
# Panggil fungsi objek_terdeteksi
coords = objek_terdeteksi()
if coords is not None:
print("Koordinat objek terdeteksi: ({}, {})".format(coords[0], coords[1]))
#jalan autonomous
# lepas gripper jika posisi sudah sesuai
# pm.grip_open()
#set manual mode
pm.setMode('MANUAL')
# misi selesai
pm.disarm()
else:
# looping gerakan lawn mower
# gerak maju sampe mendekati tembok
pm.setRcValue(5 ,1700)
time.sleep(4)
# gerak menyamping
pm.setRcValue(6 ,1440)
time.sleep(1)
# gerak mundur sampe mendekati tembok
pm.setRcValue(5 ,1300)
time.sleep(4)
# gerak menyamping
pm.setRcValue(6 ,1440)
time.sleep(1)
if __name__ == '__main__':
pymavlink = Guidance(master)
main(pymavlink) | [] |
2024-01-10 | Ultrasky123/ROS | src~maincode~src~percobaan~control_depth_pid.py | import time
from pymavlink import mavutil
from PyMavlinkin import Guidance
import altitude
# jika koneksi langsung komputer/nuc
master = mavutil.mavlink_connection("/dev/ttyACM0", baud=115200)
# koneksi jika pakai companion
# master = mavutil.mavlink_connection('udpin:0.0.0.0:14550')
master.wait_heartbeat()
def main(pm: Guidance):
# pm.armDisarm()
pm.arm()
# target depth
depth_target = -0.5
# variabel PID
kp = 300
ki = 0.0
kd = 75
# rentang toleransi kedalaman (meter)
depth_tolerance = 0.0
def depth_hold():
while True:
# membaca nilai kedalaman
depth = altitude.get_altitude()
#RUMUS PID
depth_error_prev = 0
# depth_error_sum = 0
# hitung error kedalaman
depth_error = depth - depth_target
# hitung turunan dari error Kd
depth_error_diff = depth_error - depth_error_prev
depth_error_prev = depth_error
# hitung output PID
pid_output = (kp * depth_error) +(kd * depth_error_diff) #+(depth_error_sum * ki)
# print(depth-depth_target)
# Jika error kedalaman lebih kecil dari toleransi thruster off
if abs(depth - depth_target) <= depth_tolerance:
# set kecepatan Motor 1500 = 0
speed = 1400 + abs(int(pid_output))
pm.setRcValue(3, speed)
print("thruster low")
print(speed , "depth error : ", depth_error)
else:
#jika dept kurang dari target
if depth < depth_target:
# hitung kecepatan motor
speed = 1500-abs(int(pid_output))
# batasi kecepatan agar tidak terlalu besar atau kecil
if speed >= 1850:
speed = 1850
elif speed <= 1150:
speed = 1150
# set kecepatan motor
pm.setRcValue(3, speed)
print("thruster nyala")
print(speed , "depth error : ", depth_error)
else:
# hitung kecepatan motor
speed = 1500 + abs(int(pid_output))
# batasi kecepatan agar tidak terlalu besar atau kecil
if speed > 1850:
speed = 1850
elif speed < 1150:
speed = 1150
# set kecepatan motor
pm.setRcValue(3, speed)
print("thruster nyala")
print(speed)
# Jika misi sudah selesai, reset kedalaman menjadi 0
# if mission_complete:
# depth_target = 0
time.sleep(0.1)
depth_hold()
#pm.disarm()
if __name__ == '__main__':
pymavlink = Guidance(master)
main(pymavlink)
| [] |
2024-01-10 | GapData/datasets-2 | datasets~openwebtext~openwebtext.py | # coding=utf-8
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Open WebText Corpus"""
from __future__ import absolute_import, division, print_function
import os
import re
from itertools import chain
import datasets
_CITATION = """\
@misc{Gokaslan2019OpenWeb,
title={OpenWebText Corpus},
author={Aaron Gokaslan*, Vanya Cohen*, Ellie Pavlick, Stefanie Tellex},
howpublished{\\url{http://Skylion007.github.io/OpenWebTextCorpus}},
year={2019}
}
"""
_DESCRIPTION = """\
An open-source replication of the WebText dataset from OpenAI.
"""
_URL = "https://zenodo.org/record/3834942/files/openwebtext.tar.xz"
class Openwebtext(datasets.GeneratorBasedBuilder):
"""The Open WebText dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="plain_text",
description="Plain text",
version=datasets.Version("1.0.0"),
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({"text": datasets.Value("string")}),
homepage="https://skylion007.github.io/OpenWebTextCorpus/",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
dl_dir = dl_manager.download_and_extract(_URL)
owt_dir = os.path.join(dl_dir, "openwebtext")
subset_xzs = [
os.path.join(owt_dir, file_name)
for file_name in sorted(os.listdir(owt_dir))
if file_name.endswith("xz") # filter out ...xz.lock
]
ex_dirs = dl_manager.extract(subset_xzs, num_proc=round(os.cpu_count() * 0.75))
nested_txt_files = [
[
os.path.join(ex_dir, txt_file_name)
for txt_file_name in sorted(os.listdir(ex_dir))
if txt_file_name.endswith("txt")
]
for ex_dir in ex_dirs
]
txt_files = chain(*nested_txt_files)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"txt_files": txt_files}),
]
def _generate_examples(self, txt_files):
""" Yields examples. """
for idx, filepath in enumerate(txt_files):
with open(filepath, encoding="utf-8") as f:
yield idx, {"text": re.sub("\n\n\n+", "\n\n", f.read()).strip()}
| [] |
2024-01-10 | Lyken17/AIC | aic~aic_gen.py | import subprocess
import openai
import markdown, re
from PyInquirer import prompt as py_inquirer_prompt, style_from_dict, Token
def load_config(path):
return {
"prefix": "",
}
def run_command(command):
process = subprocess.run(command, shell=True, capture_output=True, text=True)
if process.returncode != 0:
raise Exception(f"Command {command} failed with exit code {process.returncode}")
return process.stdout
def check_if_commits_are_staged():
try:
result = run_command("git diff --staged")
if result == "":
return False
except Exception:
return False
return True
def generate_commit_message_from_diff(diff):
prompt = f"""
What follows "-------" is a git diff for a potential commit.
Reply with a markdown unordered list of 5 possible, different Git commit messages
(a Git commit message should be concise but also try to describe
the important changes in the commit), order the list by what you think
would be the best commit message first, and don't include any other text
but the 5 messages in your response.
-------
{diff}
-------
"""
if len(prompt) >= 4096:
# cut off for max prompt length
prompt = prompt[:4000]
print("Generating commit message...")
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
],
)
message = response["choices"][0]["message"]["content"]
return message # .strip().replace('"', '').replace("\n", '')
def main():
if not check_if_commits_are_staged():
print("No staged commits")
exit(0)
diff = run_command("git diff --staged")
commit_message = generate_commit_message_from_diff(diff)
html = markdown.markdown(commit_message)
suggestions = re.findall(r"<li>(.*?)</li>", html)
if len(suggestions) == 0:
print("No suggestions found.")
exit(0)
# run_command(f'git commit -m "{commit_message}"')
answers = py_inquirer_prompt(
[
{
"type": "list",
"name": "commit_message",
"message": "Commit message suggestions:",
"choices": [f"{i + 1}. {item}" for i, item in enumerate(suggestions)],
"filter": lambda val: val[3:],
}
]
)
answers = py_inquirer_prompt(
[
{
"type": "input",
"name": "final_commit_message",
"message": "Confirm or edit the commit message:",
"default": answers.get("commit_message"),
},
]
)
cmt_msg = answers.get("final_commit_message")
print(f"Committed with message: {cmt_msg}")
run_command(f'git commit -m "{cmt_msg}"')
if __name__ == "__main__":
main()
| [
"\n What follows \"-------\" is a git diff for a potential commit.\n Reply with a markdown unordered list of 5 possible, different Git commit messages \n (a Git commit message should be concise but also try to describe \n the important changes in the commit), order the list by what you think \n would be the best commit message first, and don't include any other text \n but the 5 messages in your response.\n ------- \n PLACEHOLDER\n -------\n ",
"You are a helpful assistant."
] |
2024-01-10 | jva/issue-description-embedding | issue_scatterplot.py | import sys
import pandas as pd
import json
from sklearn.manifold import TSNE
import numpy as np
from langchain.embeddings import HuggingFaceEmbeddings, SentenceTransformerEmbeddings
# Initialize the embeddings model
embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
# Function to calculate the embedding for a text string
def calculate_embedding(text):
# Perform the embedding calculation for the text string
embedding = embeddings.embed_query(text)
return embedding
# Get the input file path from command-line argument
input_file_path = sys.argv[1]
# Generate the output file path by appending "-output" to the input file name
output_file_path = input_file_path.replace(".csv", "-output.csv")
# Read the input CSV file using pandas
data = pd.read_csv(input_file_path, header=None, skiprows=1)
# Extract issue keys and descriptions from the DataFrame
issue_keys = data.iloc[:, 0].values
descriptions = data.iloc[:, 1].values
# Calculate embeddings for each description
embeddings = [calculate_embedding(text) for text in descriptions]
# Convert the embeddings list to a numpy array
embeddings_array = np.array(embeddings)
# Apply t-SNE for dimension reduction
tsne = TSNE(n_components=2)
reduced_data = tsne.fit_transform(embeddings_array)
# Create a new DataFrame with the issue key and reduced coordinates
output_data = pd.DataFrame({
"issue-key": issue_keys,
"x-coordinate": reduced_data[:, 0],
"y-coordinate": reduced_data[:, 1]
})
# Save the output DataFrame to a new CSV file
output_data.to_csv(output_file_path, index=False)
| [] |
2024-01-10 | mavreyn/UCF-crimes | categorization.py | '''
Using OpenAI to categorize incidents
'''
import openai
import pandas as pd
CATEGORY_DICT = {
1: "Theft and Property Offenses",
2: "Drug-Related Offenses",
3: "Assault and Battery",
4: "Traffic Offenses",
5: "Stalking and Harassment",
6: "Fraud and White-Collar Crimes",
7: "Weapons and Firearm Offenses",
8: "Miscellaneous Offenses"
}
def get_completion(prompt, model="gpt-3.5-turbo"):
messages = [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0,
)
txt = response.choices[0].message["content"]
print(txt)
return txt
def make_prompt(title):
return f"""You are a model helping to classify incidents that occur on a local college campus. The team we are working with has divided the incidents into 8 categories:
1. Theft and Property Offenses
2. Drug-Related Offenses
3. Assault and Battery
4. Traffic Offenses
5. Stalking and Harassment
6. Fraud and White-Collar Crimes
7. Weapons and Firearm Offenses
8. Miscellaneous Offenses
Please classify the following incident (between triple backticks) into one of the above categories by outputting a single character ranging from 1-8. If you are unsure, output '8' (Miscellaneous Offenses).
```{title}```"""
def main():
crimes = [] # LIST OF INCIDENT TITLES
for crime in crimes:
prompt = make_prompt(crime)
category = CATEGORY_DICT[int(get_completion(prompt))]
print(category)
# ADD TO DATAFRAME | [] |
2024-01-10 | ssjh23/ChatGPT-TeleBot | bot~imagegen.py | from enum import Enum
import logging
import time
from telegram import (
Update,
InlineKeyboardButton,
InlineKeyboardMarkup
)
from telegram.ext import (
filters,
CommandHandler,
ConversationHandler,
MessageHandler,
CallbackQueryHandler,
Application,
ContextTypes
)
import openai
from logger import UserActionLogger
WELCOME = "WELCOME"
IMAGE_GEN = "IMAGE_GEN"
IMAGE_SIZE = "IMAGE_SIZE"
IMAGE_PROMPT = "IMAGE_PROMPT"
IMAGE_N = "N"
SELECTING_LEVEL = "SELECTING_LEVEL"
TYPING_PROMPT = "TYPING_PROMPT"
TYPING_N = "TYPING_N"
ASK_FOR_N = "ASK_FOR_N"
SELECTING_SIZE = "SELECTING_SIZE"
ASK_FOR_PROMPT = "ASK_FOR_PROMPT"
BACK_TO_START = "BACK_TO_START"
END_IMAGEGEN = "END_IMAGEGEN"
BACK_TO_MENU_COMMAND = "back_to_main"
IMAGE_GEN_START_COMMAND = "imagegen"
RESTART_IMAGE_GEN_COMMAND = "restart_imagegen"
class ImageSize(Enum):
SMALL = 256
MEDIUM = 512
LARGE = 1024
class ImageGen:
def __init__(self, update:Update, context: ContextTypes.DEFAULT_TYPE, username:str, id:str, application:Application, logger:UserActionLogger):
self.update = update
self.context = context
self.username = username
self.id = id
self.application = application
self.logger = logger
self.imagegen_handlers = []
self.welcome = False
self.last_back_message_id = None
async def run(self):
self.logger.log_inline('%s', self.username, self.id, IMAGE_GEN)
image_gen_welcome_text = (
"You are now using the Image Generation tool!\n"
f"Type /{IMAGE_GEN_START_COMMAND} to get started."
)
if (self.welcome == False):
await self.update.callback_query.edit_message_text(text=image_gen_welcome_text)
self.welcome = True
await self.add_image_handlers()
async def add_image_handlers(self):
image_gen_entry_handler = CommandHandler(f"{IMAGE_GEN_START_COMMAND}", self.image_gen_entry)
ask_for_n_handler = CallbackQueryHandler(self.ask_for_n, pattern=f"^{ImageSize.SMALL.value}$|^{ImageSize.MEDIUM.value}$|^{ImageSize.LARGE.value}$")
save_n_handler = MessageHandler(filters.TEXT & ~filters.COMMAND, self.save_n)
save_image_prompt_handler = MessageHandler(filters.TEXT & ~filters.COMMAND, self.save_image_prompt)
restart = CommandHandler(f"{RESTART_IMAGE_GEN_COMMAND}", self.restart)
# Image Conversation
image_gen_conv = ConversationHandler(
entry_points=[image_gen_entry_handler],
states= {
SELECTING_SIZE:[
ask_for_n_handler
],
TYPING_N: [
save_n_handler
],
TYPING_PROMPT: [
save_image_prompt_handler
]
},
fallbacks = [
restart
]
)
self.imagegen_handlers.append(image_gen_conv)
self.application.add_handler(image_gen_conv)
async def image_gen_entry(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> str:
self.logger.log_action('%s', self.username, self.id, '/imagegen')
"""Selecting Image Size"""
image_size_text = (
"Select an image size for the generated photo"
)
image_size_buttons = [
[
InlineKeyboardButton(text="256", callback_data=str(ImageSize.SMALL.value)),
InlineKeyboardButton(text="512", callback_data=str(ImageSize.MEDIUM.value)),
InlineKeyboardButton(text="1024", callback_data=str(ImageSize.LARGE.value))
],
]
image_size_keyboard = InlineKeyboardMarkup(image_size_buttons)
await context.bot.send_message(chat_id=self.id, text=image_size_text, reply_markup=image_size_keyboard)
return SELECTING_SIZE
# Saves the image prompt typed in by user, transits to image generation state
async def ask_for_n(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> str:
context.user_data[IMAGE_SIZE] = update.callback_query.data
self.logger.log_inline('%s', self.username, self.id, self.get_image_size())
image_prompt_text = (
f"Size of image: {self.get_image_size()}\n"
f"Type in the number (1-10) of images to generate.\n"
)
await update.callback_query.edit_message_text(text=image_prompt_text)
return TYPING_N
async def save_n(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
self.logger.log_action('%s', self.username, self.id, update.message.text)
try:
n = update.message.text
if int(n) > 10 or int(n) < 1:
await context.bot.send_message(chat_id=self.id, text="Please key in a valid number (1-10)")
return TYPING_N
context.user_data[IMAGE_N] = n
await self.ask_for_image_prompt(update=self.update, context=self.context)
return TYPING_PROMPT
except ValueError:
await context.bot.send_message(chat_id=self.id, text="Please key in a number (1-10)")
return TYPING_N
# Saves the image prompt typed in by user, transits to image generation state
async def ask_for_image_prompt(self, update: Update, context: ContextTypes.DEFAULT_TYPE) -> str:
image_prompt_text = (
f"Size of image: {self.get_image_size()}\n"
f"Number of images to generate: {self.get_n()}\n"
f"Type in your prompt for the image \n"
)
await context.bot.send_message(chat_id=self.id, text=image_prompt_text)
return TYPING_PROMPT
async def save_image_prompt(self, update: Update, context: ContextTypes.DEFAULT_TYPE):
self.logger.log_action('%s', self.username, self.id, update.message.text)
try:
context.user_data[IMAGE_PROMPT] = update.message.text
return await self.gen_image()
# Prevent the user from typing into the text box when choosing options
except KeyError as e:
await self.context.bot.send_message(chat_id=self.id, text="Please only select options from the messages")
return await self.image_gen_entry(update=self.update, context=self.context)
def get_image_size(self):
image_size_string = self.context.user_data[IMAGE_SIZE]
image_size_param = f"{image_size_string}x{image_size_string}"
return image_size_param
def get_image_prompt(self):
image_prompt = self.context.user_data[IMAGE_PROMPT]
return image_prompt
def get_n(self):
image_n = self.context.user_data[IMAGE_N]
return int(image_n)
async def gen_image(self):
self.logger.log_bot('%s', self.username, self.id, f'generating ChatGPT image - {self.get_image_prompt()}')
try:
image_finish_text = (
"If you want to generate more images with a different prompt using the same image size and number, just type the prompt below.\n"
f"Type /{RESTART_IMAGE_GEN_COMMAND} to redo the whole settings selection.\n"
f"Type /{BACK_TO_MENU_COMMAND} to return to the main menu."
)
chosen_image_settings_text = (
"Generating photos with the following settings \n"
f"Size of image: {self.get_image_size()}\n"
f"Number of images: {self.get_n()}\n"
f"Prompt: {self.get_image_prompt()}"
)
if (self.last_back_message_id != None):
await self.context.bot.delete_message(chat_id=self.update.effective_chat.id, message_id=self.last_back_message_id)
await self.context.bot.send_message(chat_id=self.id, text=chosen_image_settings_text)
result = openai.Image.create(
prompt=self.get_image_prompt(),
n=self.get_n(),
size= self.get_image_size()
)
for photo in result.data:
await self.context.bot.send_photo(chat_id=self.id, photo=photo.url)
self.logger.log_bot('%s', self.username, self.id, 'finished image generation' )
await self.context.bot.send_message(chat_id=self.id, text=image_finish_text)
except openai.InvalidRequestError as e:
print(e)
async def restart(self, update:Update, context: ContextTypes.DEFAULT_TYPE):
self.logger.log_action('%s', self.username, self.id, '/restart_imagegen')
return await self.image_gen_entry(update=self.update, context=self.context)
async def remove_image_handlers(self):
for handler in self.imagegen_handlers:
self.application.remove_handler(handler=handler)
return
| [
"ASK_FOR_PROMPT",
"Type in your prompt for the image \n",
"Type in the number (1-10) of images to generate.\n",
"TYPING_PROMPT",
"IMAGE_PROMPT"
] |
2024-01-10 | jxmorris12/arxiv-gpt | process_topics.py | from typing import Dict, List
import json
import os
import random
import time
import threading
from concurrent.futures import ThreadPoolExecutor, as_completed
import openai
import sqlite3
from tenacity import (
retry,
stop_after_attempt,
wait_fixed,
)
import tqdm
from topics import TOPICS
@retry(wait=wait_fixed(1), stop=stop_after_attempt(10))
def ask_gpt(prompt: str) -> str:
time.sleep(1) # rate limit myself
prompts_list = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
]
response = openai.ChatCompletion.create(
# model="gpt-4",
model="gpt-3.5-turbo",
messages=prompts_list,
max_tokens=64,
temperature=0.1,
top_p=1,
frequency_penalty=0.25,
presence_penalty=0,
)
return response["choices"][0]["message"]["content"]
def get_topics(paper: Dict) -> List[str]:
topics_str = "\n".join(f"{i+1}. {t}" for i, t in enumerate(TOPICS))
prompt = """Given the following twenty topics:
{topics_str}
Please identify the top 1-5 topics that the following paper fits under. Return your answer as a python-style list, with quotes around each topic. Don't include the numbers in your answer. Choose the topics that really fit the paper, even if it's only a single one. DO NOT output anything except the list of topics, or someone will die.
Title: {title}
Abstract: {abstract}
Topics:
""".format(
topics_str=topics_str,
title=paper["title"].replace("\n", " "),
abstract=paper["abstract"][:250].replace("\n", " ").strip() + "..."
)
answer = ask_gpt(prompt)
try:
topics = eval(answer)
except SyntaxError as e:
print("got bad answer", answer)
raise e
return topics
data_folder = '/Users/johnmorris/arxiv-gpt/data/'
# maintain separate databases for papers and topics. makes it easier
# to regenerate one or the other.
data_conn = sqlite3.connect(os.path.join(data_folder, 'database.db'), check_same_thread=False)
data_cursor = data_conn.cursor()
conn = sqlite3.connect(os.path.join(data_folder, 'topics.db'), check_same_thread=False)
cursor = conn.cursor()
lock = threading.Lock()
# table 3: topics
cursor.execute('''CREATE TABLE IF NOT EXISTS topics
(id INTEGER PRIMARY KEY AUTOINCREMENT,
topic TEXT,
paper_id TEXT)''')
def process_document(document):
doc_id, date, doc_data = document
doc_data = json.loads(doc_data)
try:
topics = get_topics(doc_data)
except Exception as e:
print(f"Error - {e}")
print("Error – skipping document.")
return
for topic in topics:
lock.acquire(True)
cursor.execute("INSERT OR IGNORE INTO topics (topic, paper_id) VALUES (?,?)",
(topic, doc_data["id"]))
lock.release()
def main():
# pull all documents
# Get 5 random documents
data_cursor.execute("SELECT * FROM documents")
documents = data_cursor.fetchall()
# Print the random documents
executor = ThreadPoolExecutor(max_workers=5)
with tqdm.tqdm(total=len(documents)) as pbar:
futures = [executor.submit(process_document, doc) for doc in documents]
for future in as_completed(futures):
future.result()
pbar.update(1)
conn.commit()
conn.close()
print("done :-)")
if __name__ == '__main__':
main() | [
"Given the following twenty topics:\n PLACEHOLDER\n \nPlease identify the top 1-5 topics that the following paper fits under. Return your answer as a python-style list, with quotes around each topic. Don't include the numbers in your answer. Choose the topics that really fit the paper, even if it's only a single one. DO NOT output anything except the list of topics, or someone will die.\n\nTitle: PLACEHOLDER\n\nAbstract: PLACEHOLDER...\n\nTopics:\n",
"You are a helpful assistant."
] |
2024-01-10 | JamesHutchins2/AppSafe | GenerationModule~code_gen.py | import openai
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
def read_templates(folder_path):
template_data = []
for filename in os.listdir(folder_path):
file_path = os.path.join(folder_path, filename)
if os.path.isfile(file_path) and filename.endswith(".py"):
with open(file_path, 'r') as file:
template_code = file.read()
template_data.append({"code": template_code, "description": ""})
return template_data
def read_template_descriptions(folder_path):
template_data = read_templates(folder_path)
for template in template_data:
description_file_path = os.path.join(folder_path, f"{template['code'][:-3]}_description.txt")
if os.path.isfile(description_file_path):
with open(description_file_path, 'r') as file:
template["description"] = file.read()
return template_data
def read_additional_details(file_path):
with open(file_path, 'r') as file:
additional_details = file.read()
return additional_details
def write_code_to_file(code, file_path):
start_marker = "'''"
end_marker = "'''"
start_index = code.find(start_marker)
end_index = code.find(end_marker, start_index + len(start_marker))
if start_index != -1 and end_index != -1:
extracted_content = code[start_index + len(start_marker):end_index]
with open(file_path, "w") as file:
file.write(extracted_content)
print(f"Content between triple single quotes saved successfully")
else:
with open(file_path, 'w') as file:
file.write(code)
print(f"Content saved successfully")
def generate_code_using_templates_and_details(templates_folder, input):
# Read Python code templates and descriptions from the templates folder
all_templates = read_template_descriptions(templates_folder)
# Read additional details from the text file
additional_details = input
# Prepare user message with template descriptions
user_message = f"You are a helpful assistant that generates Python code.\n\n" \
f"Only generate Python code using the following templates:\n\n"
for template in all_templates:
user_message += f"Template Description: {template['description']}\n"
user_message += f"Template Code:\n{template['code']}\n\n"
user_message += f"Additional details:\n{additional_details}"
# Generate code
response = openai.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a helpful assistant that generates only Python code. You do not give descriptions or add any comments. You only return Python code."},
{"role": "user", "content": user_message}
]
)
generated_code = response.choices[0].message.content
# Write the generated code to a file
return generated_code
print(f"Generated code has been written to {output_file_path}")
| [
"[]",
"You are a helpful assistant that generates only Python code. You do not give descriptions or add any comments. You only return Python code."
] |
2024-01-10 | JamesHutchins2/AppSafe | GenerationModule~mongo_create.py | import openai
import os
import re
#get the api key from .env file
from dotenv import load_dotenv
load_dotenv()
class mongo_make:
def __init__(self, input):
self.input = input
openai.api_key = os.getenv("OPENAI_API_KEY")
self.context = """
Accurately identify different aspects of the input, like collection names, fields, and their data types.
Translate these aspects into structured Python code for database operations using MongoDB.
Maintain readability and best practices in code generation.
Here's an optimized version of the prompt:
Context Prompt for Chatbot:
As a software developer, your task is to transform a program description into Python code for managing a MongoDB database. The code should include:
Importing necessary modules.
Defining the MongoDB URI and a function to connect to the database.
Creating functions to handle database operations such as creating data entries and adding collections with specified schemas.
Determining the database schema based on the provided program description.
Input Example:
The input will describe a database with multiple collections. Each collection will have its fields and data types specified. For example:
Database Schema:
Collection 1: Requests
Fields: Request Id, Request Title, Request Description, Request Created Date, Request Status
Collection 2: Users
Fields: User Id, User Type (worker/IT), Name
Expected Output:
Based on the input, generate Python code that includes:
Import statements for MongoDB and other necessary modules.
A MongoDB URI string.
Functions for connecting to the database and performing CRUD operations.
Database schemas for each collection as Python dictionaries, with field names and data types.
The code should establish a database connection, add collections with the defined schemas, and output confirmation of these actions.
The output should be well-formatted, syntactically correct Python code, ready for execution.
"""
self.template = """from pymongo import MongoClient
from datetime import datetime
from typing import Dict
# MongoDB URI
mongo_uri = "mongodb://localhost:27017/"
# Function to connect to the database
def connect_db(uri: str, db_name: str):
client = MongoClient(uri)
db = client[db_name]
return db
# Function to create a data entry in a collection
def create_data_entry(db, collection_name: str, data: Dict):
collection = db[collection_name]
return collection.insert_one(data).inserted_id
# Function to add a collection with a specific schema
def add_table(db, table_name: str, data_schema: Dict):
collection = db[table_name]
return collection.insert_one(data_schema).inserted_id
# Database name
db_name = "userDB"
db = connect_db(mongo_uri, db_name)
# Collection and schema for Requests
requests_table = 'requests'
requests_schema = {
"Request_ID": int,
"Request_Title": str,
"Request_Description": str,
"Created_Date": datetime,
"Request_Status": str
}
requests_table_id = add_table(db, requests_table, requests_schema)
# Collection and schema for Users
users_table = 'users'
users_schema = {
"User_ID": int,
"User_Type": str, # worker/IT
"User_Name": str
}
users_table_id = add_table(db, users_table, users_schema)
# Outputting the created tables and their schemas
print(f"Requests table created with ID: {requests_table_id}")
print(f"Requests table schema: {requests_schema}")
print(f"Users table created with ID: {users_table_id}")
print(f"Users table schema: {users_schema}")
"""
def interact(self):
response = openai.chat.completions.create(
model= "gpt-4",
messages=[
{"role": "system", "content": self.context,
"role": "user", "content": self.input,}])
return response.choices[0].message.content
def run_interaction(self):
self.input = str(self.input + "Generate the code following this fomrat: " + self.template)
return self.interact()
def extract_code_from_markdown(markdown_text):
# Regular expression pattern for code blocks
pattern = r"```(.*?)```"
# Find all non-overlapping matches in the markdown text
matches = re.findall(pattern, markdown_text, re.DOTALL)
# Process each match to maintain its original formatting
formatted_code_blocks = []
for match in matches:
# Add each code block as a single string, preserving whitespace and newlines
formatted_code = '\n'.join(match.splitlines())
formatted_code_blocks.append(formatted_code)
return formatted_code_blocks
def to_python_file(self, file_name="mongo_creation_script.py"):
code = self.run_chain()
code_str = code[0]
# Remove the initial 'Python\n ' and adjust indentation
formatted_code = '\n'.join(line[4:] for line in code_str.split('\n')[1:])
# Write the formatted code to a file
with open(file_name, 'w') as file:
file.write(formatted_code)
def run_chain(self):
# call the llm
llm_output = self.run_interaction()
# extract the code from the llm output
code = self.extract_code_from_markdown(llm_output)
# return the code
return code
#let's run it
input = """Database Schema: Collection 1: Requests Fields:
Request Id
Request Title
Request Description
Request Created Date
Request Status
Collection 2: Users Fields:
User Id
User Type (worker/IT)
Name
User Types and Access: Workers: -Can view all requests -Can submit requests -No access to IT requests
IT Department: -Can view all requests -Can mark requests as pending, complete, or underway -Can delete requests
Pages and Permissions: Submitter View:
Accessible to all workers
Displays a list of all requests
Has input fields to submit new requests
Read-only access
IT View:
Accessible to IT department
Displays a list of all requests
Has input fields to mark requests as pending, complete, or underway
Edit and delete access"""
mongo_maker = mongo_make(input)
mongo_maker.to_python_file()
| [] |
2024-01-10 | JamesHutchins2/AppSafe | GenerationModule~mongoUtils.py | import openai
import os
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
def read_templates(folder_path):
template_data = []
for filename in os.listdir(folder_path):
file_path = os.path.join(folder_path, filename)
if os.path.isfile(file_path) and filename.endswith(".py"):
with open(file_path, 'r') as file:
template_code = file.read()
template_data.append({"code": template_code, "description": ""})
return template_data
def read_template_descriptions(folder_path):
template_data = read_templates(folder_path)
for template in template_data:
description_file_path = os.path.join(folder_path, f"{template['code'][:-3]}_description.txt")
if os.path.isfile(description_file_path):
with open(description_file_path, 'r') as file:
template["description"] = file.read()
return template_data
def read_additional_details(file_path):
with open(file_path, 'r') as file:
additional_details = file.read()
return additional_details
def write_code_to_file(code, file_path):
start_marker = "'''"
end_marker = "'''"
start_index = code.find(start_marker)
end_index = code.find(end_marker, start_index + len(start_marker))
if start_index != -1 and end_index != -1:
extracted_content = code[start_index + len(start_marker):end_index]
with open(file_path, "w") as file:
file.write(extracted_content)
print(f"Content between triple single quotes saved successfully")
else:
with open(file_path, 'w') as file:
file.write(code)
print(f"Content saved successfully")
def generate_code_using_templates_and_details(input):
# Read Python code templates and descriptions from the templates folder
all_templates = read_template_descriptions("./mongo_templates")
# Read additional details from the text file
additional_details = input
# Prepare user message with template descriptions
user_message = f"You are a helpful assistant that generates Python code.\n\n" \
f"The code you generates will be used to create a MongoDB collections.\n\n" \
f"Only generate Python code using the following templates:\n\n"\
f"you may add other data names and values to the schemas as you see fit\n\n"
for template in all_templates:
user_message += f"Template Description: {template['description']}\n"
user_message += f"Template Code:\n{template['code']}\n\n"
user_message += f"Additional details:\n{additional_details}"
# Generate code
response = openai.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a helpful assistant that generates only Python code. You do not give descriptions or add any comments. You only return Python code."},
{"role": "user", "content": user_message}
]
)
generated_code = response.choices[0].message.content
# Write the generated code to a file
return generated_code
print(f"Generated code has been written to {output_file_path}") | [
"[]",
"You are a helpful assistant that generates only Python code. You do not give descriptions or add any comments. You only return Python code.",
"./mongo_templates"
] |
2024-01-10 | JamesHutchins2/AppSafe | GenerationModule~data_collection_ui.py | import openai
import streamlit as st
import re
import code_gen
import os
import mongoUtils
BASE_DIR = "./Models" # Replace with the path to your base directory
BASE_PORT = 8500 # Starting port number
APP_COUNTER_FILE = 'app_counter.txt' # Path to the app counter file
def get_next_app_number():
# Check if the counter file exists
if not os.path.exists(APP_COUNTER_FILE):
with open(APP_COUNTER_FILE, 'w') as file:
file.write('1') # Initialize with the first app number
return 1
# If it exists, read the current number, increment it, and save it back
with open(APP_COUNTER_FILE, 'r') as file:
current_number = int(file.read().strip())
new_number = current_number + 1
with open(APP_COUNTER_FILE, 'w') as file:
file.write(str(new_number))
return new_number
def create_app_directories(app_number):
app_dir = os.path.join('Modules', f'app_{app_number}')
os.makedirs(app_dir, exist_ok=True)
return {
'table_script': os.path.join(app_dir, 'makeTable.py'),
'streamlit_app': os.path.join(app_dir, 'application.py'),
'info_file': os.path.join(app_dir, 'info.txt')
}
def generate_db_code(stage_1_input):
#this will take the stage 1 input, and use the code_gen file to generate the code
#we create a mongo_make instance
mongo = mongoUtils.generate_code_using_templates_and_details(stage_1_input)
return parse_code(mongo)
def generate_streamlit_code(stage_1_input):
Templates_folder = "Streamlit_Templates"
details = stage_1_input
gen_code = code_gen.generate_code_using_templates_and_details(Templates_folder, details)
return parse_code(gen_code)
def parse_code(code_lines):
if isinstance(code_lines, list):
# Join the list of strings into a single string
code = "\n".join(code_lines)
elif isinstance(code_lines, str):
code = code_lines
else:
raise ValueError("Invalid code format: Expected a string or list of strings")
start_marker = "'''"
end_marker = "'''"
start_index = code.find(start_marker)
end_index = code.find(end_marker, start_index + len(start_marker))
if start_index != -1 and end_index != -1:
extracted_content = code[start_index + len(start_marker):end_index].strip()
return extracted_content
else:
return code
def generate_and_save_code(app_number, user_input):
# Generate the necessary code
db_code = generate_db_code(user_input)
st_code = generate_streamlit_code(user_input)
# Debugging: Print the generated code to check
print("Generated DB Code:", db_code)
print("Generated Streamlit Code:", st_code)
# Create directories and file paths
paths = create_app_directories(app_number)
# Write the generated code to files
with open(paths['table_script'], 'w') as file:
file.write(db_code)
with open(paths['streamlit_app'], 'w') as file:
file.write(st_code)
# Write port and other info to info.txt
port_number = BASE_PORT + app_number - 1
with open(paths['info_file'], 'w') as file:
file.write(f"Port Number: {port_number}\n")
return paths
response_require_clarification = ["Could you clarify", "Please provide more details", "..."] # Your list of clarification phrases
st.title("Application Design Assistant")
user_info = st.text_area("1. Who are the users?")
app_purpose = st.text_area("2. What is the purpose of the application?")
components_views = st.text_area("3. What are the components/views in the application?")
# Allow the user to input their API key privately
api_key = st.text_input("Enter your OpenAI API key", type='password')
# Set the API key
openai.api_key = api_key
def interact_with_user(prompt, context=""):
full_prompt = context + "\n" + prompt
response = openai.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are an AI assistant."},
{"role": "user", "content": full_prompt},
]
)
return response.choices[0].message.content
def text_to_file(file_name,ai_response):
with open(file_name, "w") as file:
file.write(str(ai_response))
st.success(f"Response saved successfully")
context = """
I am an AI assistant designed to define application requirements. Please provide detailed information about the following aspects of the application:
1. Database Schema:
Outline the desired structure of the MongoDB database. Specify the collections, documents, and fields that should be included.
2. User Types and Access:
Enumerate all types of users who will interact with the application. For each user type, describe their roles, responsibilities, and specific access levels (read, write, admin, etc.).
3. Pages and Permissions:
List all the pages that will be generated by the application. For each page, specify:
Who can access it (user types).
What data will be displayed on the page.
Any input fields that users can interact with.
User access permissions for each page (e.g., read-only, edit, delete).
For each point, provide as much detail as possible. I will analyze this information and generate a comprehensive set of outputs meeting your specifications.
Please feel free to confirm the information or provide any necessary corrections. If there are specific details you want to emphasize or modify for the best results, please include that in your response. Thank you!
"""
if st.button('Submit Details'):
combined_input = f"User Information: {user_info}\nApplication Purpose: {app_purpose}\nComponents and Views: {components_views}"
st.session_state.ai_response = interact_with_user(combined_input, context)
st.write(st.session_state.ai_response)
if any(phrase in st.session_state.ai_response for phrase in response_require_clarification):
st.write("The AI needs more clarification on your last input.")
additional_info = st.text_area("Please provide more details:")
if st.button('Submit Additional Info'):
st.session_state.ai_response = interact_with_user(additional_info, context)
st.write(st.session_state.ai_response)
if st.button("Confirm Response"):
#create instance of mongo_make class
#call make_db_file
#generate the code
generate_and_save_code(get_next_app_number(), st.session_state.ai_response)
print("code has been generated")
print("python file has been created")
st.success("Response saved successfully")
if st.button("Provide Corrections"):
correction = st.text_area("Enter your correction here:")
st.session_state.ai_response = interact_with_user(correction, context)
st.write(st.session_state.ai_response)
| [
"\nI am an AI assistant designed to define application requirements. Please provide detailed information about the following aspects of the application:\n\n1. Database Schema:\nOutline the desired structure of the MongoDB database. Specify the collections, documents, and fields that should be included.\n\n2. User Types and Access:\nEnumerate all types of users who will interact with the application. For each user type, describe their roles, responsibilities, and specific access levels (read, write, admin, etc.).\n\n3. Pages and Permissions:\nList all the pages that will be generated by the application. For each page, specify:\nWho can access it (user types).\nWhat data will be displayed on the page.\nAny input fields that users can interact with.\nUser access permissions for each page (e.g., read-only, edit, delete).\nFor each point, provide as much detail as possible. I will analyze this information and generate a comprehensive set of outputs meeting your specifications.\nPlease feel free to confirm the information or provide any necessary corrections. If there are specific details you want to emphasize or modify for the best results, please include that in your response. Thank you!\n\nPLACEHOLDER",
"You are an AI assistant.",
"Streamlit_Templates"
] |
2024-01-10 | colesmcintosh/script-sleuth | tests.py | from langchain.prompts import PromptTemplate
import main
def test_fetch_code_file_exit(monkeypatch):
monkeypatch.setattr('builtins.input', lambda _: 'exit')
assert main.fetch_code_file() == (None, None, None, None)
def test_fetch_code_file_select_first(monkeypatch):
monkeypatch.setattr('builtins.input', lambda _: '1')
monkeypatch.setattr('main.glob.glob', lambda _ , **__: ['test_dir/hello_world.cpp'])
result = main.fetch_code_file('test_dir')
assert result[1].split('/')[-1] == 'hello_world.cpp'
assert result[3] == 'C++'
def test_format_prompt(monkeypatch):
monkeypatch.setattr('builtins.input', lambda _: 'What does this function do?')
result_prompt, result_question = main.format_prompt('test.py')
assert result_question == 'What does this function do?'
assert isinstance(result_prompt, PromptTemplate)
def test_get_llm_openai(monkeypatch):
mock_model = "mock_model"
monkeypatch.setattr('main.ChatOpenAI', lambda *args, **kwargs: mock_model)
assert main.get_llm("openai") == mock_model
def test_get_llm_huggingface_hub(monkeypatch):
mock_model = "mock_model"
monkeypatch.setattr('main.HuggingFaceHub', lambda *args, **kwargs: mock_model)
assert main.get_llm("huggingface") == mock_model
def test_get_llm_huggingface_pipeline(monkeypatch):
mock_model = "mock_model"
monkeypatch.setattr('main.HuggingFacePipeline.from_model_id', lambda *args, **kwargs: mock_model)
assert main.get_llm("huggingface", local_model=True) == mock_model
def test_main_exit(monkeypatch):
monkeypatch.setattr('builtins.input', lambda *args: 'exit')
assert main.main() is None
def test_format_prompt_back(monkeypatch):
monkeypatch.setattr('builtins.input', lambda _: 'back')
result_prompt, result_question = main.format_prompt('test.py')
assert result_question == 'back'
def test_main_quit(monkeypatch):
inputs = iter(['test_dir', '1', 'quit'])
monkeypatch.setattr('builtins.input', lambda _: next(inputs))
assert main.main() is None
def test_main_back(monkeypatch):
inputs = iter(['test_dir', '1', 'back', 'exit'])
monkeypatch.setattr('builtins.input', lambda _: next(inputs))
assert main.main() is None
| [] |
2024-01-10 | NicoleSherwood/YORG-AI | backend~src~core~assignments~write_node~write_node.py | from pydantic import BaseModel, Field
from tenacity import retry, stop_after_attempt, wait_fixed
from .write_node_prompt import *
from ..base_assignment import BaseAssignment, AssignmentOutput, AssignmentConfig
from src.core.nodes.base_node import NodeInput
from src.core.nodes.openai.openai import OpenAINode
from src.core.nodes.openai.openai_model import ChatInput
from src.utils.output_parser import LLMOutputParser
from src.utils.router_generator import generate_assignment_end_point
class WriteNodeInput(BaseModel):
node_name: str = Field(description="Name of a node.")
operations: Operations = Field(description="Operations of a node.")
write_node_config = {
"name": "write_node",
"description": "Write a node.",
}
@generate_assignment_end_point
class WriteNodeAssignment(BaseAssignment):
config: AssignmentConfig = AssignmentConfig(**write_node_config)
def __init__(self):
self.nodes = {"openai": OpenAINode()}
self.output = AssignmentOutput(
"node",
OUTPUT_SCHEMA,
LLMOutputParser,
)
@retry(stop=stop_after_attempt(2), wait=wait_fixed(1))
async def run(self, input: WriteNodeInput) -> AssignmentOutput:
# TBD: search and summary
prompt = PROMPT_TEMPLATE.format(
node_name=input.node_name,
operations_list=input.operations,
base_node_code=BASE_NODE_CODE,
example_node_code=NODE_EXAMPLE,
format_example=FORMAT_EXAMPLE,
)
node_input = NodeInput(
func_name="chat",
func_input=ChatInput(
model="gpt-4",
message_text=prompt,
),
)
text_output = self.nodes["openai"].run(node_input)
self.output.load(text_output)
return self.output
| [] |
2024-01-10 | NicoleSherwood/YORG-AI | backend~src~routers~agenda.py | from fastapi import APIRouter
# from src.core.user_input import UserInput
from pydantic import BaseModel
# from src.core.nodes.Hello.Hello import HelloNode
# from src.core.nodes.Hello.HelloModel import HelloWorldInput, HelloWithNameInput
# from src.core.nodes.LangChain.LangChainModel import LangChainInput
# from src.core.nodes.Anthropic.Anthropic import AnthropicNode, AnthropicModels
# from src.core.nodes.OpenAI.OpenAI import OpenAINode, OpenAIModels
# router = APIRouter(prefix="/agenda")
# class TestInput(BaseModel):
# user_id: str
# session_id: str
# requirement: str
# @router.post(
# "/create_agenda",
# responses={403: {"description": "agenda creation not available at this time"}},
# )
# async def run(input: TestInput):
# user_input = UserInput()
# user_input.start_project(input.user_id, input.session_id, input.requirement)
# return user_input
| [] |
2024-01-10 | NicoleSherwood/YORG-AI | backend~src~core~assignments~software_development~load_github_repo.py | from pydantic import BaseModel, Field
from tenacity import retry, stop_after_attempt, wait_fixed
from typing import List, Optional
from enum import Enum
import json
import re
import subprocess
from .load_github_repo_prompt import *
from ..base_assignment import BaseAssignment, AssignmentOutput, AssignmentConfig
from src.core.nodes.base_node import NodeInput
from src.core.nodes import (
DataAnalysisNode,
LoadDataInput,
OpenAINode,
ChatInput,
CodeRunnerNode,
RunCodeInput,
RunCodeFromFileInput,
DocumentLoaderNode,
FaissVectorStoreNode,
)
from src.core.common_models import (
UserProperties,
RedisKeyType,
DEFAULT_USER_ID,
DEFAULT_SESSION_ID,
DEFAULT_GIT_FOLDER,
)
from src.core.nodes.vectorstore.vectorstore_model import (
SimilaritySearchInput,
AddIndexInput,
DocumentIndexInfo,
)
from src.core.nodes.document_loader.document_model import (
Document,
SplitDocumentInput,
UrlDocumentInput,
)
from src.core.nodes.openai.openai_model import OpenAIResp
from src.core.nodes.openai.openai_model import ChatInput
from src.utils.output_parser import GitLoaderOutputParser, PythonCodeBlock
from src.utils.router_generator import generate_assignment_end_point
from dotenv import load_dotenv
load_dotenv()
class Mode(str, Enum):
FEATURE_IMPLEMENTATION = "feature_implementation"
FIX_BUGS = "fix_bugs"
SIMILARITY_SEARCH = "similarity_search"
class LoadGithubRepoInput(BaseModel):
query: str = Field(description="Query for searching github repo.")
top_k: Optional[int] = Field(5, description="Top k for searching github repo.")
target_files: Optional[List[str]] = Field(
[], description="Target files for searching github repo."
)
error_message: Optional[str] = Field(description="Error message for fixing bugs.")
mode: Mode = Field(default=Mode.SIMILARITY_SEARCH, description="Mode for input.")
load_github_repo_config = {
"name": "load_github_repo",
"description": "Load github repo and ask questions",
}
@generate_assignment_end_point
class LoadGithubRepoAssignment(BaseAssignment):
config: AssignmentConfig = AssignmentConfig(**load_github_repo_config)
document: Document = None
document_index_info: DocumentIndexInfo = None
def __init__(self):
self.chat_node = OpenAINode()
self.code_runner_node = CodeRunnerNode()
self.document_loader_node = DocumentLoaderNode()
self.faiss_vectorstore_node = FaissVectorStoreNode()
self.nodes = {
"openai": self.chat_node,
"code_runner": self.code_runner_node,
"document_loader": self.document_loader_node,
"faiss_vectorstore": self.faiss_vectorstore_node,
}
self.output = AssignmentOutput(
"load_github_repo",
OUTPUT_SCHEMA,
GitLoaderOutputParser,
)
def init_document(self, repo_url: str):
response = self.document_loader_node.create_document_from_url(
input=UrlDocumentInput(
url=repo_url,
type="git",
),
properties=UserProperties(),
)
document = Document(**json.loads(response))
self.document = document
def init_vectorstore(self, chunk_size: int = 200, chunk_overlap: int = 0):
add_index_input = AddIndexInput(
user_properties=UserProperties(
user_id=DEFAULT_USER_ID, session_id=DEFAULT_SESSION_ID
),
split_documents=[
SplitDocumentInput(
file_id=self.document.file_id,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
)
],
)
document_index_info: DocumentIndexInfo = self.faiss_vectorstore_node.add_index(
add_index_input
)
self.document_index_info = document_index_info
def update_file_content(self, file_path, file_content):
with open(self.document.file_path / file_path, "w") as f:
# Identify the minimum indentation across all non-empty lines
min_indent = min(
len(re.match(r"^\s*", line).group())
for line in file_content.splitlines()
if line.strip()
)
# Remove the minimum indentation from each line
adjusted_content_lines = [
line[min_indent:] for line in file_content.splitlines()
]
# Strip trailing whitespace from each line
stripped_adjusted_lines = [line.rstrip() for line in adjusted_content_lines]
# Join the adjusted lines to form the final content
final_content = "\n".join(stripped_adjusted_lines)
f.write(final_content)
def run_script(self, file_path):
run_code_input = RunCodeFromFileInput(
working_dir=str(self.document.file_path),
file_path=file_path,
)
output = self.code_runner_node.run_code_from_file(run_code_input)
return output
@retry(stop=stop_after_attempt(2), wait=wait_fixed(1))
async def run(self, input: LoadGithubRepoInput):
match input.mode:
case Mode.FEATURE_IMPLEMENTATION:
content = ""
for target_file in input.target_files:
content += f"This is the path to file {target_file}\n"
content += f"This is file content\n"
with open(self.document.file_path / target_file, "rb") as f:
target_file_content = f.read()
content += f"{target_file_content} \n\n"
prompt = FEATURE_IMPLEMENTATION_PROMPT_TEMPLATE.format(
content=content,
feature_requirement=input.query,
format_example=FORMAT_EXAMPLE,
)
case Mode.FIX_BUGS:
content = ""
for target_file in input.target_files:
content += f"This is the path to file {target_file}\n"
content += f"This is file content\n"
with open(self.document.file_path / target_file, "rb") as f:
target_file_content = f.read()
content += f"{target_file_content} \n\n"
prompt = FIX_BUGS_PROMPT_TEMPLATE.format(
content=content,
error_message=input.error_message,
format_example=FORMAT_EXAMPLE,
)
case Mode.SIMILARITY_SEARCH:
top_k = self.faiss_vectorstore_node.similarity_search(
SimilaritySearchInput(query=input.query, k=input.top_k)
)
prompt = PROMPT_TEMPLATE.format(
content=top_k,
question=input.query,
)
node_input = NodeInput(
func_name="chat",
func_input=ChatInput(
model="gpt-4",
message_text=prompt,
),
)
text_output: OpenAIResp = self.nodes["openai"].run(node_input)
if input.mode is Mode.SIMILARITY_SEARCH:
return text_output
code_block = PythonCodeBlock(text_output.message.content, "code")
code_block.parse()
return code_block.content()
| [] |
2024-01-10 | jvmkit/ChatGPT | src~revChatGPT~revChatGPT.py | # Author: @[email protected]
# License: MIT
# Description: A Python wrapper for OpenAI's chatbot API
import json
import uuid
import httpx
from OpenAIAuth.OpenAIAuth import OpenAIAuth, Debugger
def generate_uuid() -> str:
uid = str(uuid.uuid4())
return uid
class Chatbot:
config: json
conversation_id: str
parent_id: str
headers: dict
conversation_id_prev: str
parent_id_prev: str
def __init__(self, config, conversation_id=None, debug=False):
self.debugger = Debugger(debug)
self.config = config
self.conversation_id = conversation_id
self.parent_id = generate_uuid()
if "session_token" in config or ("email" in config and "password" in config):
self.refresh_session()
# Resets the conversation ID and parent ID
def reset_chat(self) -> None:
self.conversation_id = None
self.parent_id = generate_uuid()
# Refreshes the headers -- Internal use only
def refresh_headers(self) -> None:
if "Authorization" not in self.config:
self.config["Authorization"] = ""
elif self.config["Authorization"] is None:
self.config["Authorization"] = ""
self.headers = {
"Host": "chat.openai.com",
"Accept": "text/event-stream",
"Authorization": "Bearer " + self.config["Authorization"],
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) "
"Version/16.1 Safari/605.1.15",
"X-Openai-Assistant-App-Id": "",
"Connection": "close",
"Accept-Language": "en-US,en;q=0.9",
"Referer": "https://chat.openai.com/chat",
}
# Generates a UUID -- Internal use only
# Generator for chat stream -- Internal use only
def get_chat_stream(self, data) -> None:
response = httpx.post(
"https://chat.openai.com/backend-api/conversation",
headers=self.headers,
data=json.dumps(data),
stream=True,
timeout=50,
)
for line in response.iter_lines():
try:
line = line.decode("utf-8")
if line == "":
continue
line = line[6:]
line = json.loads(line)
try:
message = line["message"]["content"]["parts"][0]
self.conversation_id = line["conversation_id"]
self.parent_id = line["message"]["id"]
except:
continue
yield {
"message": message,
"conversation_id": self.conversation_id,
"parent_id": self.parent_id,
}
except:
continue
# Gets the chat response as text -- Internal use only
def get_chat_text(self, data) -> dict:
# Create request session
s = httpx.Client(http2=True)
# set headers
s.headers = self.headers
# Set multiple cookies
if "session_token" in self.config:
s.cookies.set(
"__Secure-next-auth.session-token",
self.config["session_token"],
)
s.cookies.set(
"__Secure-next-auth.callback-url",
"https://chat.openai.com/",
)
# Set proxies
if self.config.get("proxy", "") != "":
s.proxies = {
"http": self.config["proxy"],
"https": self.config["proxy"],
}
response = s.post(
"https://chat.openai.com/backend-api/conversation",
data=json.dumps(data),
)
try:
response = response.text.splitlines()[-4]
response = response[6:]
except Exception as exc:
self.debugger.log("Incorrect response from OpenAI API")
self.debugger.log(response.text)
raise Exception("Incorrect response from OpenAI API") from exc
response = json.loads(response)
self.parent_id = response["message"]["id"]
self.conversation_id = response["conversation_id"]
message = response["message"]["content"]["parts"][0]
return {
"message": message,
"conversation_id": self.conversation_id,
"parent_id": self.parent_id,
}
# Gets the chat response
def get_chat_response(self, prompt, output="text") -> dict or None:
data = {
"action": "next",
"messages": [
{
"id": str(generate_uuid()),
"role": "user",
"content": {"content_type": "text", "parts": [prompt]},
},
],
"conversation_id": self.conversation_id,
"parent_message_id": self.parent_id,
"model": "text-davinci-002-render",
}
self.conversation_id_prev = self.conversation_id
self.parent_id_prev = self.parent_id
if output == "text":
return self.get_chat_text(data)
elif output == "stream":
return self.get_chat_stream(data)
else:
raise ValueError("Output must be either 'text' or 'stream'")
def rollback_conversation(self) -> None:
self.conversation_id = self.conversation_id_prev
self.parent_id = self.parent_id_prev
def refresh_session(self) -> Exception:
if (
"session_token" not in self.config
and ("email" not in self.config or "password" not in self.config)
and "Authorization" not in self.config
):
error = ValueError("No tokens provided")
self.debugger.log(error)
raise error
elif "session_token" in self.config:
if (
self.config["session_token"] is None
or self.config["session_token"] == ""
):
raise ValueError("No tokens provided")
s = httpx.Client(http2=True)
if self.config.get("proxy", "") != "":
s.proxies = {
"http": self.config["proxy"],
"https": self.config["proxy"],
}
# Set cookies
s.cookies.set(
"__Secure-next-auth.session-token",
self.config["session_token"],
)
# s.cookies.set("__Secure-next-auth.csrf-token", self.config['csrf_token'])
response = s.get(
"https://chat.openai.com/api/auth/session",
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, "
"like Gecko) Version/16.1 Safari/605.1.15 ",
},
)
try:
self.config["session_token"] = response.cookies.get(
"__Secure-next-auth.session-token",
)
self.config["Authorization"] = response.json()["accessToken"]
self.refresh_headers()
except Exception as exc:
self.debugger.log("Error refreshing session")
self.debugger.log(response.text)
raise Exception("Error refreshing session") from exc
elif "email" in self.config and "password" in self.config:
try:
self.login(self.config["email"], self.config["password"])
except Exception as exc:
self.debugger.log("Error refreshing session: ")
self.debugger.log(exc)
return exc
elif "Authorization" in self.config:
self.refresh_headers()
return
else:
raise ValueError("No tokens provided")
def login(self, email, password) -> None:
self.debugger.log("Logging in...")
use_proxy = False
proxy = None
if "proxy" in self.config:
if self.config["proxy"] != "":
use_proxy = True
proxy = self.config["proxy"]
auth = OpenAIAuth(email, password, use_proxy, proxy)
try:
auth.begin()
except Exception as exc:
# if ValueError with e as "Captcha detected" fail
if exc == "Captcha detected":
self.debugger.log(
"Captcha not supported. Use session tokens instead.")
raise ValueError("Captcha detected") from exc
self.debugger.log("Error logging in (Probably wrong credentials)")
raise Exception("Error logging in") from exc
if auth.access_token is not None:
self.config["Authorization"] = auth.access_token
if auth.session_token is not None:
self.config["session_token"] = auth.session_token
else:
possible_tokens = auth.session.cookies.get(
"__Secure-next-auth.session-token",
)
if possible_tokens is not None:
if len(possible_tokens) > 1:
self.config["session_token"] = possible_tokens[0]
else:
try:
self.config["session_token"] = possible_tokens
except Exception as exc:
raise Exception("Error logging in") from exc
self.refresh_headers()
else:
raise Exception("Error logging in")
| [
"text",
"content_type"
] |
2024-01-10 | aksOps/share | codeshare.py | from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
from langchain.output_parsers import PydanticOutputParser
from pydantic import BaseModel, Field
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.text_splitter import CharacterTextSplitter
from langchain.chains import ConversationalRetrievalChain
# Define your desired data structure.
class MyData(BaseModel):
field1: str = Field(description="Description of field1")
field2: str = Field(description="Description of field2")
# Set up a parser + inject instructions into the prompt template.
parser = PydanticOutputParser(pydantic_object=MyData)
prompt = PromptTemplate(
template="Answer the user query.\\n {format_instructions}\\n {query}\\n",
input_variables=["query"],
partial_variables={"format_instructions": parser.get_format_instructions()}
)
# Initialize ConversationalRetrievalChain
embeddings = OpenAIEmbeddings()
vectorstore = Chroma.from_documents(documents, embeddings)
qa_chain = ConversationalRetrievalChain.from_llm(OpenAI(temperature=0), vectorstore.as_retriever())
# Use ConversationalRetrievalChain and parser
_input = prompt.format_prompt(query=my_query)
output = qa_chain(_input.to_string())
parsed_output = parser.parse(output)
| [
"Answer the user query.\\n {format_instructions}\\n {query}\\n",
"format_instructions"
] |
2024-01-10 | skywalker023/sodaverse | co3.py | import os
import json
import argparse
from pathlib import Path
import openai
import numpy as np
from tqdm import tqdm
import colorful as cf
cf.use_true_colors()
cf.use_style('monokai')
from utils.dialogue_utils import cleanup_dialogue
import tasks.dataset_loaders as loader
from agents.gpt import GPT3BaseAgent, ChatGPTBaseAgent
PROJECT_HOME = Path(__file__).parent.resolve()
DATA_DIR = 'data'
DATA_DIR_PATH = os.path.join(PROJECT_HOME, DATA_DIR)
class CO3():
def __init__(self, args):
self.args = args
self.args.dump_dir = self.args.run_id + ":{}_out_of_{}".format(args.split_num, args.split_data)
self.atomic10x = self.load_atomic10x()
self.set_llm_and_instruction(args)
self.build_output_file(args) # if the directory already exists, it loads the existing args from the directory
self.print_args()
self.print_soda()
def set_llm_and_instruction(self, args):
if args.model.startswith('text-davinci-'):
self.llm = GPT3BaseAgent(args.__dict__)
self.narrative_prompt = "Rewrite this story with more specific details in two or three sentences:"
self.dialogue_prompt = "The following is a long in-depth conversation happening in the scene between person 1 and person 2 with multiple turns."
elif args.model.startswith('gpt-'):
self.llm = ChatGPTBaseAgent(args.__dict__)
self.narrative_prompt = "Rewrite this story with more specific details in two or three sentences:"
self.dialogue_prompt = "Generate an in-depth conversation happening in the scene between person 1 and person 2 with multiple turns."
else:
# TODO: add other LLMs here!
raise NotImplementedError
self.prompt = [self.narrative_prompt, self.dialogue_prompt]
self.prompt_suffix = "\nPerson 1:"
self.prompt_suffix2 = "\nPerson 2:"
def identify_interlocutor_with_gpt3(self, prompt):
response = openai.Completion.create(
model="gpt-3.5-turbo-instruct",
prompt=prompt,
temperature=0,
max_tokens=16,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=["."],
echo=True # XXX: to get the full output
)
return response['choices'][0]['text'].strip()
def set_prompt_for_dialogue(self, text, **speakers):
"""
Set prompt for dialogue generation with the interlocutors.
"""
speaker_prefix = "\n" + speakers['x'] + ":"
command_prompt = self.dialogue_prompt.replace("person 1", speakers['x'])
# if there's PersonX and PersonY in the narrative, use them as the speakers.
if 'y' in speakers.keys() and speakers['y'] != "":
command_prompt = command_prompt.replace("person 2", speakers['y'])
if 'z' in speakers.keys() and speakers['z'] != "":
command_prompt = command_prompt.replace("with", "and " + speakers['z'] + " with")
prompt = text + " " + command_prompt + speaker_prefix
else: # if there's only PersonX in the narrative, prompt GPT-3 to figure out who is the most natural interlocutor.
trimmed_prompt = command_prompt.split("person 2")[0].strip()
prompt_to_complete = text + " " + trimmed_prompt
command_prompt = self.identify_interlocutor_with_gpt3(prompt_to_complete)
if not command_prompt.endswith("."):
command_prompt = command_prompt + "."
prompt = command_prompt + speaker_prefix
return prompt
def print_args(self):
# sorted_args = sorted(self.args.__dict__.items())
print("\n=======================================")
for idx, (k, v) in enumerate(self.args.__dict__.items()):
if idx != 0:
print("---------------------------------------")
print(k, " : ", v)
print("=======================================\n")
def print_soda(self):
print()
with open(os.path.join(PROJECT_HOME, 'assets', 'soda.txt'), 'r') as fp:
for idx, line in enumerate(fp.readlines()):
if idx in [0, 2, 4, 6]:
print(cf.bold | cf.ghostWhite(line), end="")
elif idx in [1, 3, 5]:
print(cf.bold | cf.magenta(line), end="")
else:
print(cf.bold | cf.blue(line), end="")
print()
print(cf.bold | cf.ghostWhite("[[ SODA coming up right now! ]]"))
print()
def run(self):
last_save_point = self._load_last_save_point()
t = tqdm(total=len(self.atomic10x))
for current_idx, data_input in self.atomic10x.iterrows():
if current_idx <= last_save_point:
t.update(1)
continue
if self.args.generation_limit is not None:
if current_idx > self.args.generation_limit:
break
sentence_form_triple = data_input['input_text']
narrative_result = self._collect_narrative(sentence_form_triple, **data_input)
output = self._collect_dialogue(narrative_result['narrative'], **data_input)
output['narrative_prompt'] = narrative_result['narrative_prompt']
output['narrative'] = narrative_result['narrative']
if current_idx % self.args.display_frequency == 0:
print()
print(cf.bold | cf.yellow("[ Triple ] " + data_input['head'] + " || " + data_input['relation'] + " || " + data_input['tail']))
print(cf.bold | cf.lightOrange("[ Sentence-form ] " + data_input['input_text']))
print(cf.bold | cf.green("[ Narrative ] " + output['narrative']))
first_speaker = output['dialogue_prompt'].split("\n")[-1]
print(cf.bold | cf.blue("[ Dialogue ]"))
print(cf.blue(first_speaker + output['cumulative_dialogue']))
print()
self._dump_output(current_idx, output, **data_input)
t.update(1)
def _generate_narrative(self, text):
prompt = text + " " + self.narrative_prompt
narrative = self.llm.interact(prompt)
narrative = narrative.replace("\n\n", "\n").replace("\n", " ").strip()
result = {
'narrative': narrative,
'narrative_prompt': prompt
}
return result
def _generate_dialogue(self, text, **data_input):
"""
Generate dialogue with the given narrative text.
"""
speakers = {'x': data_input['x'], 'y': data_input['y'], 'z': data_input['z']}
_prompt = prompt = self.set_prompt_for_dialogue(text, **speakers)
raw_dialogue = self.llm.interact(prompt)
result = self._parse_dialogue_output(raw_dialogue, prompt, **data_input)
length = result['num_responses']
# if it contained "\n\n" in the first place, maybe that caused the dialogue to stop. So, continue generating with the cleaned dialogue
if "\n\n" in raw_dialogue or length < self.args.min_dialogue_turn:
continue_generation = True
else:
continue_generation = False
# Try continuing the generation after we clean up the dialogue format in self._parse_output()
continuation_count = self.args.conversation_continuation_count
while continue_generation:
# print(cf.bold | cf.yellow("Continuing the dialogue..."))
prompt += result['dialogue']
raw_dialogue = self.llm.interact(prompt)
result = self._parse_dialogue_output(raw_dialogue, prompt, previous_result=result, **data_input)
continuation_count -= 1
length += result['num_responses']
continue_generation = result['continue_generation']
# if it has several utterances and the continuation_count is not left, stop.
if continuation_count == 0:
# print(cf.bold("Stopping the dialogue because it ran out of counts!"))
continue_generation = False
result['dialogue_prompt'] = _prompt
return result
def _collect_narrative(self, text, **data_input):
attempt_count = self.args.generation_attempt_count
narrative = None
generated_narratives = []
while narrative is None:
result = self._generate_narrative(text)
narrative = result['narrative']
generated_narratives.append(narrative)
result['suspended'] = False # default flag
narrative_sentences = narrative.split(". ")
if len(narrative_sentences) >= 4:
attempt_count -= 1
print(cf.bold | cf.purple("Too long in length! Attempt count left: " + str(attempt_count)))
narrative = None
elif narrative == text:
print(cf.bold | cf.purple("The generated narrative is the same as the literal!"))
narrative = None
del generated_narratives[-1]
elif len(narrative_sentences) != len(set(narrative_sentences)):
print(cf.bold | cf.purple("Repetitive sentences in the narrative!"))
narrative = None
if attempt_count == 0:
print(cf.bold | cf.magenta("Tried enough!"))
result['suspended'] = True
break
if narrative is None:
# choose from the existing ones
print(cf.bold("Choosing the shortest one among the generated ones!"))
sorted_narratives = sorted(generated_narratives, key=len)
narrative = sorted_narratives[0]
result['narrative'] = narrative
result['all_generated_narratives'] = generated_narratives
return result
def _collect_dialogue(self, text, **data_input):
attempt_count = self.args.generation_attempt_count
repetition_tolerance = self.args.repetition_tolerance
cumulative_dialogue = None
generated_dialogues = []
while cumulative_dialogue is None:
result = self._generate_dialogue(text, **data_input)
cumulative_dialogue = result['cumulative_dialogue']
unique_utterances = set(result['cumulative_utterances'])
n_repetitive_utterances = len(result['cumulative_utterances']) - len(unique_utterances)
result['repetition'] = False # default flag
result['suspended'] = False # default flag
generated_dialogues.append(cumulative_dialogue)
if len(result['cumulative_utterances']) < self.args.min_dialogue_turn:
cumulative_dialogue = None
attempt_count -= 1
print(cf.bold | cf.purple("The dialogue is too short! Attempt count left: " + str(attempt_count)))
elif len(result['cumulative_speakers']) < 2:
cumulative_dialogue = None
attempt_count -= 1
print(cf.bold | cf.purple("There are less than two speakers! Attempt count left: " + str(attempt_count)))
elif n_repetitive_utterances > 0:
repetition_tolerance -= 1
print(cf.bold | cf.purple("Has " + str(n_repetitive_utterances) + " repetitive utterances! Generating the dialogue again..."))
print(cf.bold | cf.purple("Repetition tolerance:", repetition_tolerance))
print(cf.bold | cf.yellow(result['dialogue_prompt']))
print(cumulative_dialogue)
if repetition_tolerance == 0:
result['repetition'] = True
else:
cumulative_dialogue = None
del generated_dialogues[-1]
if attempt_count == 0:
print(cf.bold | cf.magenta("Tried enough!"))
result['suspended'] = True
break
if cumulative_dialogue is None:
# choose from the existing ones
sorted_dialogues = sorted(generated_dialogues, key=len)
cumulative_dialogue = sorted_dialogues[-1]
print(cf.bold("Choosing the longest one among the generated ones!"))
result['all_generated_dialogues'] = generated_dialogues
return result
def _parse_dialogue_output(self, raw_dialogue, prompt, previous_result=None, **data_input):
# need to add the first speaker prefix
if previous_result is None:
starting_speaker = prompt.split()[-1]
raw_dialogue = starting_speaker + raw_dialogue
else:
starting_speaker = previous_result['speakers'][0]
# clean up dialogue
clean_dialogue = cleanup_dialogue(raw_dialogue)
dialogue = clean_dialogue['dialogue']
num_responses = len(clean_dialogue['speakers'])
# if it's a newly generated dialogue
continue_generation = True
if previous_result is None:
cumulative_dialogue = dialogue
cumulative_speakers = clean_dialogue['speakers']
cumulative_utterances = clean_dialogue['utterances']
# if we are continuing the dialogue, cumulate the dialogue
else:
cumulative_dialogue = previous_result['cumulative_dialogue']
cumulative_utterances = previous_result['cumulative_utterances']
cumulative_speakers = previous_result['cumulative_speakers']
if dialogue == "\n":
# if the current output is empty make sure to stop
print(cf.bold("Stopping the dialogue because nothing was generated"))
continue_generation = False
elif num_responses == 1:
# if GPT-3 only adds a single utterance, maybe it has nothing more to say!
print(cf.bold("Stopping the dialogue because it has probably nothing more to say!"))
continue_generation = False
else:
cumulative_dialogue = cumulative_dialogue + dialogue
cumulative_utterances = cumulative_utterances + clean_dialogue['utterances']
cumulative_speakers = cumulative_speakers + clean_dialogue['speakers']
result = {
'dialogue': dialogue,
'speakers': clean_dialogue['speakers'],
'utterances': clean_dialogue['utterances'],
'num_responses': num_responses,
'cumulative_dialogue': cumulative_dialogue.removeprefix(starting_speaker), # remove the first speaker prefix for continuing the dialogue because it's already in the prompt.
'cumulative_speakers': cumulative_speakers,
'cumulative_utterances': cumulative_utterances,
'continue_generation': continue_generation,
}
return result
def load_atomic10x(self):
_df = loader.load('atomic10x')
whole_df = _df[_df['x_relations']].copy().reset_index() # XXX: because we are only using a subset, so there will be some missing indices
whole_df.rename(columns={'index': 'original_index', 'named_literal': 'input_text'}, inplace=True)
df_chunks = np.array_split(whole_df, self.args.split_data)
df = df_chunks[self.args.split_num]
return df
def _load_last_save_point(self):
if os.path.exists(self.last_save_point_file):
with open(self.last_save_point_file, 'r') as fp:
last_save_point = int(fp.readlines()[0].strip())
else:
last_save_point = -1
return last_save_point
def build_output_file(self, args):
"""
This function builds the output directory for dumping the results.
If the directory already exists,
it will automatically pick up where it stopped before and load the existing hyper parameters.
"""
assert args.dump_dir is not None
self.output_dump_location = os.path.join(DATA_DIR_PATH, "soda:" + args.dump_dir)
os.makedirs(self.output_dump_location, exist_ok=True)
args_file = os.path.join(self.output_dump_location, 'args.json')
# if previously used args already exist, load them and override
if os.path.exists(args_file):
with open(args_file, 'r') as fp:
previous_args = json.load(fp)
for k, v in previous_args.items():
setattr(self.args, k, v)
self.prompt = previous_args['prompt']
else:
# save the arguments inside the dumping directory
args_dict = vars(args).copy()
del args_dict['generation_attempt_count']
del args_dict['generation_limit']
del args_dict['display_frequency']
args_dict['prompt'] = self.prompt
with open(args_file, 'w') as fp:
json.dump(args_dict, fp)
self.dump_file = os.path.join(self.output_dump_location, 'dump_file_' + args.dataset + '.jsonl')
self.last_save_point_file = os.path.join(self.output_dump_location, 'last_save_point_' + args.dataset + '.txt')
def _dump_output(self, idx, output, **data_input):
file_name = self.dump_file
# update save point
with open(self.last_save_point_file, 'w') as fp:
fp.write(str(idx))
with open(file_name, 'a') as fp:
del output['dialogue']
del output['speakers']
del output['utterances']
del output['num_responses']
data = {'index': int(idx), **output, **data_input}
fp.write(json.dumps(data) + '\n')
def main(args):
soda_maker = CO3(args)
soda_maker.run()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='arguments for generating dialogues using instruct gpt3')
parser.add_argument('--dataset',
type=str,
default='atomic10x')
parser.add_argument('--run-id',
type=str,
default='vanilla',
help='the name of the directory where the output will be dumped')
parser.add_argument('--generation-limit',
type=int,
default=None,
help='the number of dialogues that this run will generate. If None, it will generate with the entire given dataset.')
parser.add_argument('--display-frequency',
type=int,
default=1,
help='the frequency of displaying the generation results')
parser.add_argument('--model',
type=str,
default='gpt-3.5-turbo-1106',
help='which LLM to use')
parser.add_argument('--temperature',
type=float,
default=0.9,
help="control randomness: lowering results in less random completion")
parser.add_argument('--top-p',
type=float,
default=0.95,
help="nucleus sampling")
parser.add_argument('--frequency-penalty',
type=float,
default=1.0,
help="decreases the model's likelihood to repeat the same line verbatim")
parser.add_argument('--presence-penalty',
type=float,
default=0.6,
help="increases the model's likelihood to talk about new topics")
parser.add_argument('--max-tokens',
type=int,
default=1024,
help='maximum number of tokens to generate')
parser.add_argument('--min-dialogue-turn',
type=int,
default=6,
help='minimum number of turns for a dialogue (if gpt-3 still fails to generate longer than after generation-attempt-count, it will let the dialogue be)')
parser.add_argument('--conversation-continuation-count',
type=int,
default=1,
help='maximum number of attempts to continue the current conversation')
parser.add_argument('--generation-attempt-count',
type=int,
default=2,
help='maximum number of attempts to generate a dialogue again')
parser.add_argument('--repetition-tolerance',
type=int,
default=1,
help='maximum number of generation attempts when repetitive utterance is present in the dialogue')
parser.add_argument('--split-data',
type=int,
default=15,
help='how many splits for the data?')
parser.add_argument('--split-num',
type=int,
default=0,
help='access which data split?')
args = parser.parse_args()
main(args)
| [
"PLACEHOLDER PLACEHOLDER",
"PLACEHOLDER PLACEHOLDERPLACEHOLDER",
"PLACEHOLDERPLACEHOLDER",
"and ",
" with",
" ",
"person 2",
"PLACEHOLDER.",
"person 1"
] |
2024-01-10 | gormlabenz/wiki-time-extractor | fetch_events_chatgpt.py | import json
import logging
import os
import re
from pprint import pprint
import openai
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s')
# Set API key
openai.api_key = os.getenv("OPENAI_API_KEY")
ChatCompletionConfig = {
"model": "gpt-4",
"temperature": 0.5,
"stop": "</code>"
}
# Function to extract and validate the output JSON
def extract_events_from_json(input_json_string):
logging.info('Starting extraction for input data...')
# Construct the initial prompt for the API
prompt = (f"You are a service that translates a JSON into another JSON based on historical events. "
f"Given the JSON below, extract significant events from the \"history_text_cleaned\" "
f"and \"short_description_text_cleaned\" fields.\n\n"
f"Describe the events with short concise sentences in a narrative manner.\n\n"
f"<code>"
f"{input_json_string}"
f"</code>\n\n"
"Please generate the expected output as a valid JSON format and place it inside a code block.\n\n"
"Expected Output Format:\n\n"
"<code>\n"
"{\n"
" \"id\": \"Extracted from the input JSON.\",\n"
" \"events\": [\n"
" {\n"
" \"description\": \"A brief summary of the event.\",\n"
" \"shortDescription\": \"A summary of the event using strictly no more than 3 words.\",\n"
" \"time\": \"Strict ISO 8601 date format 'YYYY-MM-DD' or a duration object with 'from' and 'to' dates, both strictly in 'YYYY-MM-DD' format. No other characters or suffixes allowed.\",\n"
" \"approximateDate\": \"true if the date is approximate, false otherwise.\"\n"
" }\n"
" ]\n"
"}\n"
"</code>\n\n"
"Please make sure your response contains a valid JSON structure wrapped between in a html code block <code>...</code>.\n\n"
"Return only code block, no text.\n\n")
messages = [
{"role": "system", "content": "You are a specialist in converting and extracting information from JSON based on historical events."},
{"role": "user", "content": prompt}
]
# Fetch the API
response = openai.ChatCompletion.create(
model=ChatCompletionConfig['model'],
temperature=ChatCompletionConfig['temperature'],
stop=ChatCompletionConfig['stop'],
messages=messages
)
# Extract and validate the response JSON
output_text = response.choices[0].message['content'].strip()
# Add stop token to the end of the output text
output_text += ChatCompletionConfig['stop']
pprint(output_text)
messages.append({"role": "assistant", "content": output_text})
match = re.search(r'<code>([\s\S]+?)</code>', output_text)
# Extract JSON inside code block using regex
if not match:
logging.error('No JSON found inside code block.')
return refetch_api_with_error_message(messages, "No JSON found inside code block.", input_json_string)
json_str_in_code_block = match.group(1)
try:
output_json = json.loads(json_str_in_code_block)
logging.info('JSON extraction successful.')
return output_json
except json.JSONDecodeError as e:
logging.error(f'Error decoding JSON: {e}')
return refetch_api_with_error_message(messages, str(e), input_json_string)
# Function to refetch the API on error
def refetch_api_with_error_message(messages, error_message, input_json_string):
logging.info('Refetching with error message...')
retry_prompt = (f"The JSON structure returned was incorrect because of the following reason: {error_message}. "
f"Given the original data, can you generate the expected JSON format as specified earlier?\n\n"
f"<code>"
f"{input_json_string}"
f"</code>\n\n"
"Expected Output Format:\n\n"
"<code>\n"
"{\n"
" \"id\": \"Extracted from the input JSON.\",\n"
" \"events\": [\n"
" {\n"
" \"description\": \"A brief summary of the event.\",\n"
" \"shortDescription\": \"A summary of the event using strictly no more than 3 words.\",\n"
" \"time\": \"Strict ISO 8601 date format 'YYYY-MM-DD' or a duration object with 'from' and 'to' dates, both strictly in 'YYYY-MM-DD' format. No other characters or suffixes allowed.\",\n"
" \"approximateDate\": \"true if the date is approximate, false otherwise.\"\n"
" }\n"
" ]\n"
"}\n"
"</code>\n\n"
"Please make sure your response contains a valid JSON structure wrapped between in a html code block <code>...</code>.\n\n"
"Return only code block, no text.\n\n")
messages.append({"role": "user", "content": retry_prompt})
pprint(messages)
response = openai.ChatCompletion.create(
model=ChatCompletionConfig['model'],
temperature=ChatCompletionConfig['temperature'],
stop=ChatCompletionConfig['stop'],
messages=messages
)
output_text = response.choices[0].message['content'].strip()
# add stop token to the end of the output text
output_text += ChatCompletionConfig['stop']
pprint(output_text)
# Extract JSON inside code block using regex, if present
match = re.search(r'<code>([\s\S]+?)</code>', output_text)
json_to_parse = match.group(1) if match else output_text
try:
output_json = json.loads(json_to_parse)
logging.info('JSON extraction successful in refetch.')
return output_json
except json.JSONDecodeError as e:
logging.error(f'Error decoding JSON on refetch: {e}')
return None
# Read the input JSON file
with open('output/filtered_dump_2.json', 'r') as file:
data = json.load(file)
# Process each item in the input JSON
for item in data:
# Remove the "main_coords_parsed" entry before constructing the prompt
if 'main_coords_parsed' in item:
del item['main_coords_parsed']
output_file_path = f"generated_output/{item['id']}.json"
# Check if the file already exists
if os.path.exists(output_file_path):
logging.warning(
f"A file with ID {item['id']} already exists. Skipping...")
continue
# Extract events from the JSON and save the response to a separate JSON file
output_data = extract_events_from_json(json.dumps(item))
if output_data:
with open(output_file_path, 'w') as outfile:
json.dump(output_data, outfile, indent=4)
logging.info('Script execution completed.')
| [
"You are a service that translates a JSON into another JSON based on historical events. Given the JSON below, extract significant events from the \"history_text_cleaned\" and \"short_description_text_cleaned\" fields.\n\nDescribe the events with short concise sentences in a narrative manner.\n\n<code>PLACEHOLDER</code>\n\nPlease generate the expected output as a valid JSON format and place it inside a code block.\n\nExpected Output Format:\n\n<code>\n{\n \"id\": \"Extracted from the input JSON.\",\n \"events\": [\n {\n \"description\": \"A brief summary of the event.\",\n \"shortDescription\": \"A summary of the event using strictly no more than 3 words.\",\n \"time\": \"Strict ISO 8601 date format 'YYYY-MM-DD' or a duration object with 'from' and 'to' dates, both strictly in 'YYYY-MM-DD' format. No other characters or suffixes allowed.\",\n \"approximateDate\": \"true if the date is approximate, false otherwise.\"\n }\n ]\n}\n</code>\n\nPlease make sure your response contains a valid JSON structure wrapped between in a html code block <code>...</code>.\n\nReturn only code block, no text.\n\n",
"You are a specialist in converting and extracting information from JSON based on historical events.",
"The JSON structure returned was incorrect because of the following reason: PLACEHOLDER. Given the original data, can you generate the expected JSON format as specified earlier?\n\n<code>PLACEHOLDER</code>\n\nExpected Output Format:\n\n<code>\n{\n \"id\": \"Extracted from the input JSON.\",\n \"events\": [\n {\n \"description\": \"A brief summary of the event.\",\n \"shortDescription\": \"A summary of the event using strictly no more than 3 words.\",\n \"time\": \"Strict ISO 8601 date format 'YYYY-MM-DD' or a duration object with 'from' and 'to' dates, both strictly in 'YYYY-MM-DD' format. No other characters or suffixes allowed.\",\n \"approximateDate\": \"true if the date is approximate, false otherwise.\"\n }\n ]\n}\n</code>\n\nPlease make sure your response contains a valid JSON structure wrapped between in a html code block <code>...</code>.\n\nReturn only code block, no text.\n\n"
] |
2024-01-10 | open-cogsci/OpenSesame | translation_tools~translate-ts.py | from pathlib import Path
import xml.etree.ElementTree as ET
import openai
import json
import os
from translation_utils import *
import argparse
tree = ET.parse(TRANSLATABLES)
root = tree.getroot()
if TRANSLATIONS.exists():
translation_dict = json.loads(TRANSLATIONS.read_text())
else:
translation_dict = {}
messages = []
for message in root.iter("message"):
source_text = message.find("source").text
print(f'\n"{source_text}"')
if translation_dict.get(source_text, None) is not None:
print('* already translated')
continue
if source_text in translation_dict and \
translation_dict[source_text] is None and not args.redo_invalid:
print('* translation already failed')
continue
messages = messages[-HISTORY_LENGTH:]
messages.append({"role": "user", "content": source_text})
response = openai.ChatCompletion.create(
model=MODEL,
messages=[{"role": "system", "content": TS_SYSTEM}] + messages)
reply = response['choices'][0]['message']['content']
messages.append({"role": "assistant", "content": reply})
translations = [l for l in reply.splitlines() if l]
if len(translations) != len(LOCALES):
print(f'* invalid response: {reply}')
translation_dict[source_text] = None
else:
translation_dict[source_text] = {}
for lang, translation in zip(LOCALES, translations):
if source_text.startswith(' ') and not translation.startswith(' '):
translation = ' ' + translation
if source_text.endswith(' ') and not translation.endswith(' '):
translation = translation + ' '
print(f'- {lang[0]}: "{translation}"')
translation_dict[source_text][lang[1]] = translation
TRANSLATIONS.write_text(json.dumps(translation_dict, ensure_ascii=False,
indent=2))
| [
"content"
] |
2024-01-10 | mitchellgordon95/TwitterSummary | clustering.py | import openai
import asyncio
from twitter import Thread
import re
import time
from collections import Counter
import pickle
class HashtagsThread:
def __init__(self, thread, hashtags):
self.text = thread.text
self.conversation_id = thread.conversation_id
self.thread_ids = thread.thread_ids
self.hashtags = hashtags
class TweetCluster:
def __init__(self, threads, hashtags=None, summary=None, subclusters=[]):
self.threads = threads
self.hashtags = hashtags
self.summary = summary
self.subclusters = subclusters or []
@property
def num_tweets(self):
count = len(self.threads)
if self.subclusters:
for cluster in self.subclusters:
count += cluster.num_tweets
return count
async def with_retries(func, err_return):
for attempt in range(1, 4): # 3 attempts with exponential backoff
try:
return await func()
except Exception as e:
wait_time = 2 ** attempt
print(f"Error generating summary on attempt {attempt}. Retrying in {wait_time} seconds. Error: {str(e)}")
time.sleep(wait_time)
return err_return
HASHTAG_PROMPT = """\
TWEET:
{tweet}
Generate 30 possible hashtags that could go with TWEET.
Rules:
If TWEET refers to a location or event, include at least one hashtag containing the name of the event.
If TWEET refers to a specific object or thing, include at least one hashtag containing the name of that thing.
"""
async def add_hashtags(thread):
# TODO - count tokens
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": HASHTAG_PROMPT.format(tweet=thread.text)}
]
async def get_hashtags():
print("sending request...")
response = await openai.ChatCompletion.acreate(
# model="gpt-4",
model="gpt-3.5-turbo",
messages=messages
)
response_text = response.choices[0].message['content'].strip()
hashtags = re.findall(r'#\w+', response_text)
return [h.lower() for h in hashtags]
hashtags = await with_retries(get_hashtags, [])
return HashtagsThread(thread, hashtags)
def count_hashtags(threads : HashtagsThread | TweetCluster):
hashtag_counter = Counter()
for thread in threads:
for h in thread.hashtags:
hashtag_counter[h] += 1
return hashtag_counter
def pack_cluster(relevant_threads, threads, hashtag):
# Grab more threads that seem relevant until we hit 7
all_cluster_hashtags = count_hashtags(relevant_threads)
pivot_hashtags = set([hashtag])
while len(relevant_threads) < 7:
found = False
for c_hashtag, _ in all_cluster_hashtags.most_common():
try:
another_relevant_thread = next(iter([thread for thread in threads if c_hashtag in thread.hashtags]))
except Exception:
continue
found = True
pivot_hashtags.add(c_hashtag)
relevant_threads.add(another_relevant_thread)
threads.remove(another_relevant_thread)
break
if not found:
break
# Also add hashtags that most threads have, but were not originally
# used to pivot
all_cluster_hashtags = count_hashtags(relevant_threads)
pivot_hashtags.update([h for h, count in all_cluster_hashtags.most_common()
if count > len(relevant_threads) / 2])
return pivot_hashtags
async def cluster_threads(threads):
threads = await asyncio.gather(*[add_hashtags(thread) for thread in threads])
# with open('hashtag_threads.pkl', 'wb') as file_:
# pickle.dump(threads, file_)
# with open('hashtag_threads.pkl', 'rb') as file_:
# threads = pickle.load(file_)
hashtag_counter = count_hashtags(threads)
clusters = []
threads = set(threads)
for hashtag, _ in hashtag_counter.most_common():
relevant_threads = set([thread for thread in threads if hashtag in thread.hashtags])
if len(relevant_threads) < 8:
threads = threads - relevant_threads
# Note: this mutates threads and relevant_threads
pivot_hashtags = pack_cluster(relevant_threads, threads, hashtag)
if len(relevant_threads) > 3:
clusters.append(TweetCluster(relevant_threads, hashtags=pivot_hashtags))
else:
threads.update(relevant_threads)
misc = []
for thread in threads:
found = False
for c in clusters:
for t in c.threads:
found = found or thread.conversation_id == t.conversation_id
if not found:
misc.append(thread)
clusters.append(TweetCluster(misc, hashtags=[], summary="misc"))
return clusters
def meta_cluster(clusters):
hashtag_counter = count_hashtags(clusters)
meta_clusters = []
clusters = set(clusters)
for hashtag, _ in hashtag_counter.most_common():
relevant_clusters = set([c for c in clusters if hashtag in c.hashtags])
clusters -= relevant_clusters
if len(relevant_clusters) == 1:
meta_clusters.append(list(relevant_clusters)[0])
elif len(relevant_clusters) > 1:
meta_cluster_hashtags = count_hashtags(relevant_clusters)
meta_cluster_pivot_hashtags = [h for h, count in meta_cluster_hashtags.most_common()
if count > len(relevant_clusters) / 2]
meta_clusters.append(TweetCluster([], hashtags=meta_cluster_pivot_hashtags, subclusters=relevant_clusters))
return meta_clusters
| [
"TWEET:\n{tweet}\n\nGenerate 30 possible hashtags that could go with TWEET.\n\nRules:\nIf TWEET refers to a location or event, include at least one hashtag containing the name of the event.\nIf TWEET refers to a specific object or thing, include at least one hashtag containing the name of that thing.\n",
"You are a helpful assistant."
] |
2024-01-10 | mitchellgordon95/TwitterSummary | meta_summary.py | import openai
from twitter import Thread
import asyncio
from clustering import with_retries, TweetCluster
import re
import time
from collections import Counter
import pickle
META_SUMMARY_PROMPT="""\
TWEET_SUMMARIES
\"\"\"
{summaries}
\"\"\"
What common theme unites all these tweets? Rules:
- The theme must begin with "{num_tweets} tweets are about"
- The theme must be no more than 1 sentence.
- The theme must be discussed in a majority of the tweets.
Think out loud, then state the topic prefixed with the TOPIC label."""
RESUMMARY_PROMPT = """\
TWEETS:
\"\"\"
{tweets_text}
\"\"\"
What topic do all TWEETS have in common? Rules:
- The topic must be no more than 1 sentence.
- The topic must be discussed in a majority of the tweets.
- The topic must be related to {hashtags}
- The topic must begin with "{num_cluster_tweets} tweets are about {cluster_summary}. More specifically, {num_tweets} are about"
Do not think. Just say the topic and only the topic."""
async def resummarize(cluster):
"""Given a meta-cluster, resummarize the subclusters to be more specific."""
async def resummarize_subcluster(subcluster):
tweets_text = "\n\n".join([thread.text for thread in subcluster.threads])
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": RESUMMARY_PROMPT.format(
tweets_text=tweets_text,
num_tweets=subcluster.num_tweets,
num_cluster_tweets=cluster.num_tweets,
cluster_summary=cluster.summary,
hashtags=" ".join(subcluster.hashtags)
)}
]
async def get_summary():
print("sending request...")
response = await openai.ChatCompletion.acreate(
model="gpt-4",
# model="gpt-3.5-turbo",
messages=messages
)
return response.choices[0].message['content'].strip()
response_text = await with_retries(get_summary, "API error")
try:
summary = response_text.strip('"')
_, summary = summary.split('specifically,', 1)
_, summary = summary.split('about', 1)
except:
summary = f'Error parsing model output: {response_text}'
return TweetCluster(subcluster.threads, hashtags=subcluster.hashtags, summary=summary, subclusters=subcluster.subclusters)
subclusters = await asyncio.gather(*[resummarize_subcluster(c) for c in cluster.subclusters])
return TweetCluster(cluster.threads, hashtags=cluster.hashtags, summary=cluster.summary, subclusters=subclusters)
async def generate_meta_summary(cluster):
if cluster.summary:
return cluster
summaries = "\n\n".join([c.summary for c in cluster.subclusters])
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": META_SUMMARY_PROMPT.format(
summaries=summaries,
num_tweets=cluster.num_tweets,
)}
]
async def get_summary():
print("sending request...")
response = await openai.ChatCompletion.acreate(
model="gpt-4",
# model="gpt-3.5-turbo",
messages=messages
)
return response.choices[0].message['content'].strip()
response_text = await with_retries(get_summary, "API error")
try:
lines = response_text.split("\n")
summary = None
for line in lines:
if "TOPIC" in line:
summary = line[len("TOPIC")+1:]
summary = summary.strip('"')
_, summary = summary.split('about', 1)
except:
summary = f"Error parsing model output: {response_text}"
out = TweetCluster(cluster.threads, hashtags=cluster.hashtags, summary=summary, subclusters=cluster.subclusters)
return await resummarize(out)
async def meta_summarize(clusters):
clusters = await asyncio.gather(*[generate_meta_summary(cluster) for cluster in clusters])
# with open('meta_summaries.pkl', 'wb') as file_:
# pickle.dump(clusters, file_)
# with open('meta_summaries.pkl', 'rb') as file_:
# clusters = pickle.load(file_)
return clusters
| [
"TWEETS:\n\"\"\"\n{tweets_text}\n\"\"\"\n\nWhat topic do all TWEETS have in common? Rules:\n\n- The topic must be no more than 1 sentence.\n- The topic must be discussed in a majority of the tweets.\n- The topic must be related to {hashtags}\n- The topic must begin with \"{num_cluster_tweets} tweets are about {cluster_summary}. More specifically, {num_tweets} are about\"\n\nDo not think. Just say the topic and only the topic.",
"TWEET_SUMMARIES\n\"\"\"\n{summaries}\n\"\"\"\n\nWhat common theme unites all these tweets? Rules:\n\n- The theme must begin with \"{num_tweets} tweets are about\"\n- The theme must be no more than 1 sentence.\n- The theme must be discussed in a majority of the tweets.\n\nThink out loud, then state the topic prefixed with the TOPIC label.",
" ",
"You are a helpful assistant."
] |
2024-01-10 | adamkarvonen/chess_gpt_eval | gpt_query.py | import openai
import tiktoken
import json
import os
# import replicate
# for hugging face inference endpoints for codellama
import requests
from typing import Optional
from tenacity import (
retry,
stop_after_attempt,
wait_random_exponential,
) # for exponential backoff
with open("gpt_inputs/api_key.txt", "r") as f:
openai.api_key = f.read().strip()
# system message is used in openai_request()
system_message = """Provide the next move in the chess game. Only provide the move, no move numbers."""
# dollars per 1k tokens, per openai.com/pricing
pricing_dict = {
"gpt-4": 0.03,
"gpt-4-0301": 0.03,
"gpt-4-0613": 0.03,
"gpt-3.5-turbo": 0.0015,
"gpt-3.5-turbo-0301": 0.0015,
"gpt-3.5-turbo-0613": 0.0015,
"gpt-3.5-turbo-16k": 0.003,
"babbage": 0.0005,
"gpt-3.5-turbo-instruct": 0.0015,
}
MAX_TOKENS = 10
completion_models = [
"gpt-3.5-turbo-instruct",
"babbage",
"davinci",
]
# tenacity is to handle anytime a request fails
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(6))
def get_gpt_response(
prompt: str, model: str = "gpt-4", temperature: float = 0.0
) -> Optional[str]:
try:
messages = []
# system message is used in openai_request()
# system_message_dict = {
# "role": "system",
# "content": system_message,
# }
initial_message = {"role": "user", "content": prompt}
messages.append(initial_message)
record_messages(messages, model)
# num_tokens = count_all_tokens(model, messages)
# prompt_cost = get_prompt_cost(model, num_tokens)
# print("prompt cost in $:", prompt_cost)
if model in completion_models:
response = get_completions_response(model, messages, temperature)
elif model.startswith("gpt"):
response = openai_request(model, messages, temperature)
elif model.startswith("openrouter"):
response = openrouter_request(model, messages, temperature)
elif model.startswith("huggingface"):
response = hugging_face_request(model, messages, temperature)
elif model.startswith("replicate"):
response = replicate_request(model, messages, temperature)
else:
raise Exception("Invalid model name")
# response_cost = get_response_cost(model, count_tokens(model, response))
# print("response cost in $:", response_cost)
messages.append({"role": "assistant", "content": response})
record_messages(messages, model)
return response
except Exception as e:
print(f"Error while getting GPT response: {e}")
return None
def openai_request(model: str, messages: list[dict], temperature: float) -> str:
system_message_dict = {
"role": "system",
"content": system_message,
}
messages.append(system_message_dict)
completion = openai.ChatCompletion.create(
model=model,
temperature=temperature,
messages=messages,
)
response = completion.choices[0].message.content
return response
def openrouter_request(model: str, messages: list[dict], temperature: float) -> str:
if temperature == 0:
temperature = 0.001
with open("gpt_inputs/openrouter_api_key.txt", "r") as f:
openai.api_key = f.read().strip()
openai.api_base = "https://openrouter.ai/api/v1"
OPENROUTER_REFERRER = "https://github.com/adamkarvonen/nanoGPT"
model = model.replace("openrouter/", "")
completion = openai.ChatCompletion.create(
model=model,
headers={"HTTP-Referer": OPENROUTER_REFERRER},
messages=messages,
temperature=temperature,
max_tokens=MAX_TOKENS,
)
response = completion.choices[0].message.content
return response
def replicate_request(model: str, messages: list[dict], temperature: float) -> str:
if temperature == 0:
temperature = 0.001
with open("gpt_inputs/replicate_api_key.txt", "r") as f:
api_key = f.read().strip()
os.environ["REPLICATE_API_TOKEN"] = api_key
model = model.replace("replicate/", "")
messages = translate_to_string_input(messages)
output = replicate.run(
model,
input={
"prompt": messages,
"max_new_tokens": MAX_TOKENS,
"temperature": temperature,
},
)
# The meta/llama-2-7b model can stream output as it's running.
response = ""
# The predict method returns an iterator, and you can iterate over that output.
for item in output:
# https://replicate.com/meta/llama-2-7b/versions/527827021d8756c7ab79fde0abbfaac885c37a3ed5fe23c7465093f0878d55ef/api#output-schema
response += item
return response
def hugging_face_request(model: str, messages: list[dict], temperature: float) -> str:
def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
messages = translate_to_string_input(messages)
API_URL = "https://xxxxxxxx.us-east-1.aws.endpoints.huggingface.cloud"
headers = {
"Authorization": "Bearer xxxxx",
"Content-Type": "application/json",
}
if temperature == 0:
temperature = 0.001
output = query(
{
"inputs": messages,
"parameters": {"temperature": temperature, "max_new_tokens": MAX_TOKENS},
}
)
return output[0]["generated_text"]
def translate_to_string_input(
openai_messages: list[dict], roles_included: bool = False
):
# Translate from OpenAI's dict to a single string input
messages = []
for message in openai_messages:
if roles_included:
messages.append(message["role"] + ": ")
messages.append(message["content"])
if roles_included:
messages.append("assistant: ")
return "\n".join(messages)
# for gpt-3 models and instruct models
def get_completions_response(
model: str,
messages: list[dict] | str,
temperature: float,
max_tokens: int = MAX_TOKENS,
) -> str:
if not isinstance(messages, str):
prompt = translate_to_string_input(messages, roles_included=False)
else:
prompt = messages
completion = openai.Completion.create(
model=model, temperature=temperature, prompt=prompt, max_tokens=max_tokens
)
response = completion.choices[0].text
return response
def count_all_tokens(model: str, messages: list[dict[str, str]]) -> int:
total_tokens = 0
for message in messages:
total_tokens += count_tokens(model, message["content"])
return total_tokens
def count_tokens(model: str, prompt: str) -> int:
if "gpt" not in model:
model = "gpt-4"
encoding = tiktoken.encoding_for_model(model)
num_tokens = len(encoding.encode(prompt))
return num_tokens
def get_prompt_cost(model: str, num_tokens: int) -> float:
# good enough for quick evals
if model not in pricing_dict:
return num_tokens * 0.001 * pricing_dict["gpt-4"]
return num_tokens * 0.001 * pricing_dict[model]
def get_response_cost(model: str, num_tokens: int) -> float:
# good enough for quick evals
if model not in pricing_dict:
return num_tokens * 0.001 * pricing_dict["gpt-4"]
cost = num_tokens * 0.001 * pricing_dict[model]
if model == "gpt-4":
cost *= 2
return cost
def record_messages(messages: list[dict], model: str):
# create the conversation in a human-readable format
conversation_text = ""
for message in messages:
conversation_text += message["content"]
# write the conversation to the next available text file
with open(f"gpt_outputs/transcript.txt", "w") as f:
f.write(model + "\n\n")
f.write(conversation_text)
| [] |
2024-01-10 | ahuaiph/chatgpt-on-wechat | voice~voice_factory.py | """
voice factory
"""
def create_voice(voice_type):
"""
create a voice instance
:param voice_type: voice type code
:return: voice instance
"""
if voice_type == 'baidu':
from voice.baidu.baidu_voice import BaiduVoice
return BaiduVoice()
elif voice_type == 'google':
from voice.google.google_voice import GoogleVoice
return GoogleVoice()
elif voice_type == 'openai':
from voice.openai.openai_voice import OpenaiVoice
return OpenaiVoice()
raise RuntimeError
| [] |
2024-01-10 | ahuaiph/chatgpt-on-wechat | bot~bot_factory.py | """
channel factory
"""
from common import const
def create_bot(bot_type):
"""
create a channel instance
:param channel_type: channel type code
:return: channel instance
"""
if bot_type == const.BAIDU:
# Baidu Unit对话接口
from bot.baidu.baidu_unit_bot import BaiduUnitBot
return BaiduUnitBot()
elif bot_type == const.CHATGPT:
# ChatGPT 网页端web接口
from bot.chatgpt.chat_gpt_bot import ChatGPTBot
return ChatGPTBot()
elif bot_type == const.OPEN_AI:
# OpenAI 官方对话模型API
from bot.openai.open_ai_bot import OpenAIBot
return OpenAIBot()
raise RuntimeError
| [] |
2024-01-10 | zhbhun/backend-learning | languages~python~tester~library~tester.py | from langchain import OpenAI
print(OpenAI)
import pydash
print(pydash.flatten_deep([1, 2, [3, [4, 5, [6, 7]]]]))
| [] |
2024-01-10 | zhbhun/backend-learning | languages~python~librayries~chatgpt~langchain~tester~hello.py | from langchain.llms import OpenAI
text = "What would be a good company name for a company that makes colorful socks?"
print(text)
llm = OpenAI(temperature=0.9)
print(llm(text))
| [] |
2024-01-10 | krithik-ramesh/9.66-final-project | babi_code~babi_gpt.py | import openai
import pandas as pd
import json
import os
from openai import OpenAI
import re
from tqdm.asyncio import tqdm
import asyncio
import aiohttp
from tenacity import retry, stop_after_attempt, wait_random_exponential
import io
import contextlib
# Set the API key
os.environ["OPENAI_API_KEY"] = "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"
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {OPENAI_API_KEY}"
}
class ProgressLog:
def __init__(self, total):
self.total = total
self.done = 0
def increment(self):
self.done += 1
def __repr__(self):
return f"Done runs {self.done}/{self.total}."
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(20), retry_error_callback=lambda _: None)
async def get_completion(story, query, session, semaphore, pbar):
async with semaphore:
try:
async with session.post("https://api.openai.com/v1/chat/completions", headers=headers, json={
"model": "gpt-4",
"seed": 42,
"messages": [
{"role": "system", "content": "You are a helpful assistant. Your task is to write Pyro code to model relationships in stories and answer queries about them. Please consider all relational types"},
{"role": "system", "content": """Here is an example of story and complementary code you should attempt to write. For this given story:
1 Lily is a frog.
2 Bernhard is a frog.
3 Bernhard is green.
4 Brian is a lion.
5 Brian is white.
6 Julius is a swan.
7 Julius is green.
8 Lily is green.
9 Greg is a swan.
10 What color is Greg?
to which the answer as green,
the corresponding pyro code looked like this:
import pyro
import pyro.distributions as dist
import torch
# Example data (species to color mapping)
data = {
'frog': ['green', 'green'],
'lion': ['white'],
'swan': ['green', 'green', 'white'] # Assuming we have an observation that a swan can also be white
}
# Model
def species_color_model(data):
# Prior probabilities for each color for each species
species_colors = {
'frog': pyro.sample('frog_color', dist.Dirichlet(torch.tensor([1.0, 1.0]))),
'lion': pyro.sample('lion_color', dist.Dirichlet(torch.tensor([1.0, 1.0]))),
'swan': pyro.sample('swan_color', dist.Dirichlet(torch.tensor([1.0, 1.0])))
}
# Observations
for species, colors in data.items():
for color in colors:
if color == 'green':
pyro.sample(f'obs_{species}_{color}', dist.Categorical(species_colors[species]), obs=torch.tensor(0))
elif color == 'white':
pyro.sample(f'obs_species}_color}', dist.Categorical(species_colors[species]), obs=torch.tensor(1))
# Inference
conditioned_model = pyro.condition(species_color_model, data={})
# Assuming we want to infer the color of a 'swan'
# We would run inference here (e.g., using Pyro's MCMC or SVI) to get the posterior distribution of swan's color
"""},
{"role": "user", "content": f"Story: {story} \nQuery: {query}. "}
]
}) as resp:
resp.raise_for_status()
response_json = await resp.json()
result = response_json["choices"][0]['message']["content"]
except aiohttp.ClientResponseError as e:
print(f"HTTP Error: {e.status}")
result = None
except aiohttp.ClientError as e:
print(f"Client Error: {e}")
result = None
except json.JSONDecodeError as e:
print(f"JSON Decode Error: {e}")
result = None
except Exception as e:
print(f"Unexpected error: {e}")
result = None
finally:
pbar.update(1)
return result
async def generate_pyro_code(stories, queries, max_parallel_calls, timeout):
semaphore = asyncio.Semaphore(value=max_parallel_calls)
pbar = tqdm(total=len(stories))
try:
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=timeout)) as session:
tasks = []
for story, query in zip(stories, queries):
task = get_completion(story, query, session, semaphore, pbar)
tasks.append(task)
results = await asyncio.gather(*tasks, return_exceptions=True)
finally:
pbar.close()
return results
# Load the bAbI dataset
file_path = 'tasks_1-20_v1-2/en/qa16_basic-induction_test.txt'
with open(file_path) as f:
lines = f.readlines()
# Group every 10 lines into a single story
stories = []
queries = []
for i in range(0, int(len(lines)/5), 10):
story = "\n".join(lines[i:i+9]).strip() # Story is the first 9 lines
query = lines[i+9].strip() # Query is the 10th line
stories.append(story)
queries.append(query)
# Run the asynchronous code generation
max_parallel_calls = 25
timeout = 60
loop = asyncio.get_event_loop()
pyro_code_results = loop.run_until_complete(generate_pyro_code(stories, queries, max_parallel_calls, timeout))
# Process results to get Pyro code
pyro_code_snippets = [{
"story": story,
"query": query,
"pyro_code": result
} for story, query, result in zip(stories, queries, pyro_code_results)]
# Save or process pyro_code_snippets as needed
output_file_path = 'babi_q16_pyro_code_results_with_GPT4.json'
with open(output_file_path, 'w') as out_file:
json.dump(pyro_code_snippets, out_file, ensure_ascii=False, indent=2)
| [
"lion_color",
"Story: PLACEHOLDER \nQuery: PLACEHOLDER. ",
"obs_PLACEHOLDER_PLACEHOLDER",
"swan",
"green",
"swan_color",
"lion",
"You are a helpful assistant. Your task is to write Pyro code to model relationships in stories and answer queries about them. Please consider all relational types",
"frog_color"
] |
2024-01-10 | krithik-ramesh/9.66-final-project | clutrr_code~pyro_gpt_test.py | import openai
import pandas as pd
import json
import os
from openai import OpenAI
import re
from tqdm.asyncio import tqdm
import asyncio
import aiohttp
from tenacity import retry, stop_after_attempt, wait_random_exponential
import io
import contextlib
# Set the API key
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {OPENAI_API_KEY}"
}
class ProgressLog:
def __init__(self, total):
self.total = total
self.done = 0
def increment(self):
self.done += 1
def __repr__(self):
return f"Done runs {self.done}/{self.total}."
@retry(wait=wait_random_exponential(min=1, max=60), stop=stop_after_attempt(20), retry_error_callback=lambda _: None)
async def get_completion(story, query, genders, session, semaphore, pbar):
async with semaphore:
try:
async with session.post("https://api.openai.com/v1/chat/completions", headers=headers, json={
"model": "gpt-4-1106-preview",
"seed": 42,
"messages": [
{"role": "system", "content": "You are a helpful assistant. Your task is to write Pyro code to model relationships in stories, considering gender information, and answer queries about them. Please consider a full list of relationships include those of siblings, in-laws (daughter-in law, son in-law, mother-in law, aunt in law, uncle in-law, etc.) so look beyon just the story and consider all familial relationships in your code. "},
{"role": "system", "content": """
here is an example of pyro code for the following story: \n \n
[Darnell] loved his mother, [Theresa]. [Theresa] was so proud of her daughter [Amanda] for getting straight A's this semester. [Darnell] is going to the arcade with his sister, [Michelle]. [Michelle] was excited for today, its her daughter's, [Theresa], spring break. She will finally get to see her. \n \n \n
and for this query: \n \n ('Amanda', 'Michelle') \n \n which had a target of "sister". \n \n
Please ensure that the output of the answer is of the relational form e.g. "'mother', 'daughter', 'sister', 'aunt', 'cousin', 'grandmother', 'granddaughter'": \n \n this was the code:
import torch
import pyro
import pyro.distributions as dist
# use the provided genders of the individuals
genders = {'Amanda': 'female', 'Theresa': 'female', 'Michelle': 'female', 'Darnell': 'male'}
# Define a simple family tree model in Pyro
def family_tree_model():
# Define the relationships and their initial probabilities
relationships = ['mother', 'daughter', 'sister', 'other']
rel_probs = torch.tensor([0.25, 0.25, 0.25, 0.25]) # Equal probabilities
# Theresa is the mother of Amanda and Michelle; Darnell is the brother of Michelle.
# We reflect these relationships in our model
# For simplicity, we use indices: mother=0, daughter=1, sister=2, etc. please write out conditional indices of all possibilities.
# Theresa -> Amanda (mother)
pyro.sample('Theresa_Amanda', dist.Categorical(probs=torch.tensor([1.0, 0.0, 0.0, 0.0])))
# Theresa -> Michelle (mother)
pyro.sample('Theresa_Michelle', dist.Categorical(probs=torch.tensor([1.0, 0.0, 0.0, 0.0])))
# Darnell -> Michelle (sister)
pyro.sample('Darnell_Michelle', dist.Categorical(probs=torch.tensor([0.0, 0.0, 1.0, 0.0])))
# Inference for Amanda's relationship to Michelle
# Since Theresa is the mother of both Amanda and Michelle, Amanda and Michelle are sisters
amanda_michelle_rel = pyro.sample('Amanda_Michelle', dist.Categorical(probs=torch.tensor([0.0, 0.0, 1.0, 0.0])))
return amanda_michelle_rel.item()
# Run the model to infer the relationship between Amanda and Michelle
most_likely_relationship = family_tree_model()
relationship = relationships[most_likely_relationship]
print(f"The inferred relationship between Amanda and Michelle is: {relationship}")
"""},
{"role": "user", "content": "use the following steps to solve the question in the next prompt: First consider all of the conditionals provided from the story and then write those out in pyro like the example above.Think of the correct relationship and please ensure that you consider all types of relationships like mother-in-law, sister-in-law, uncle-in-law, brother-in-law, etc." },
{"role": "user", "content": f"Story: {story} \nGenders: {genders} \nQuery: {query}. "}
]
}) as resp:
resp.raise_for_status()
response_json = await resp.json()
result = response_json["choices"][0]['message']["content"]
except aiohttp.ClientResponseError as e:
print(f"HTTP Error: {e.status}")
result = None
except aiohttp.ClientError as e:
print(f"Client Error: {e}")
result = None
except json.JSONDecodeError as e:
print(f"JSON Decode Error: {e}")
result = None
except Exception as e:
print(f"Unexpected error: {e}")
result = None
finally:
pbar.update(1)
return result
async def generate_pyro_code(stories, queries, genders, max_parallel_calls, timeout):
semaphore = asyncio.Semaphore(value=max_parallel_calls)
pbar = tqdm(total=len(stories))
try:
async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=timeout)) as session:
tasks = []
for story, query, gender in zip(stories, queries, genders):
task = get_completion(story, query, gender, session, semaphore, pbar)
tasks.append(task)
results = await asyncio.gather(*tasks, return_exceptions=True)
finally:
pbar.close()
return results
# Load the CSV data
file_path = 'data_emnlp_final/data_06b8f2a1/2.2,2.3_train.csv'
df = pd.read_csv(file_path)
df = df.iloc[:200]
# Prepare stories, queries, and gender information
stories = df['clean_story'].tolist()
queries = df['query'].tolist()
genders = df['genders'].tolist()
# Run the asynchronous code generation
max_parallel_calls = 25
timeout = 60
loop = asyncio.get_event_loop()
pyro_code_results = loop.run_until_complete(generate_pyro_code(stories, queries, genders, max_parallel_calls, timeout))
# Process results to get Pyro code
pyro_code_snippets = [{
"story": story,
"query": query,
"genders": gender,
"pyro_code": result
} for story, query, gender, result in zip(stories, queries, genders, pyro_code_results)]
# Save or process pyro_code_snippets as needed
output_file_path = 'babi_pyro_code_results_with_gender_GPT4_turbo_v5.json'
with open(output_file_path, 'w') as out_file:
json.dump(pyro_code_snippets, out_file, ensure_ascii=False, indent=2)
| [
"use the following steps to solve the question in the next prompt: First consider all of the conditionals provided from the story and then write those out in pyro like the example above.Think of the correct relationship and please ensure that you consider all types of relationships like mother-in-law, sister-in-law, uncle-in-law, brother-in-law, etc.",
"Story: PLACEHOLDER \nGenders: PLACEHOLDER \nQuery: PLACEHOLDER. ",
"You are a helpful assistant. Your task is to write Pyro code to model relationships in stories, considering gender information, and answer queries about them. Please consider a full list of relationships include those of siblings, in-laws (daughter-in law, son in-law, mother-in law, aunt in law, uncle in-law, etc.) so look beyon just the story and consider all familial relationships in your code. ",
"{'Amanda': 'female', 'Theresa': 'female', 'Michelle': 'female', 'Darnell': 'male'}"
] |
2024-01-10 | inishantxchandel/story_teller_backend | story_teller_api~api_v1~endpoints~story_writer.py | from fastapi import FastAPI, HTTPException
import openai
from fastapi import APIRouter, Depends, HTTPException, Response
from ... import models, schemas,auth2
from ...database import get_db
from sqlalchemy.orm import Session
import json
from ...config import settings
# OpenAI API configuration
router = APIRouter(prefix="/story_writer", tags=["story_writer"])
openai.api_key = settings.OPENAI_API_KEY
@router.post("/get_story_title_and_chapter", response_model=list[schemas.StoryResponse])
async def get_story_title_and_chapter(
request: schemas.StoryRequest, db: Session = Depends(get_db),current_user: int = Depends(auth2.get_current_user)
):
try:
description = request.description
messages = [
{
"role": "system",
"content": "you are professional ,your task is to guide a person regarding their queries.",
},
{
"role": "user",
"content": f"""write a title and list of chapters name by taking context from given description. Description-->{description}. Please provide response in JSON format. The format should be like this-->{'{{"title":"hello","chapters":["word","word","word"]}}'}. Make sure the chapters list only contains chapter names, not any numbers or 'chapter 1' like. """,
},
]
responses = []
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=messages,
temperature=0.7,
max_tokens=1000,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
result = response.choices[0].message.content.strip()
result = eval(result) # Convert the string to a dictionary
title = result.get("title", "")
chapters = result.get("chapters", [])
story = schemas.StoryResponse(title=title, chapters=chapters)
responses.append(story)
new_story = models.Story(story_title_chapter=result, story_till_now="",owner_id=current_user.id)
db.add(new_story)
db.commit()
db.refresh(new_story)
return responses
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
def get_summary(text):
try:
messages = [
{"role": "user", "content": f"generate summary of given text. text-->{text}"}
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=messages,
temperature=0.7,
max_tokens=1000,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
result = response.choices[0].message.content.strip()
return result
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.post("/get_story_chapter", response_model=schemas.ChapterResponse)
async def get_story_chapter(request: schemas.ChapterRequest, db: Session = Depends(get_db),current_user: int = Depends(auth2.get_current_user)):
try:
chapter_info = request.description
story_id = request.story_id
chapter_name = request.chapter_name
story = db.query(models.Story).filter(models.Story.id == story_id,models.Story.owner_id== current_user.id).first()
story_info = json.dumps(story.story_title_chapter)
story_info = f"""{story_info}"""
previous_info = ""
if story.story_till_now:
previous_info = story.story_till_now
print(previous_info)
summary_till_now = get_summary(previous_info)
messages = [
{"role": "system", "content": story_info},
{
"role": "assistant",
"content": f"""Summary till now -> {summary_till_now} .Continue writing a new chapter {chapter_info} content by taking context from the above given chapter info and summary till now. Do not write chapter number in response content:""",
},
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=messages,
temperature=0.7,
max_tokens=1000,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
result = response.choices[0].message.content.strip()
chapter_dict = {
"chapter_name": chapter_name,
"chapter_content": result
}
story_chapter_wise = story.story_chapter_wise if story.story_chapter_wise else []
story_chapter_wise.append(chapter_dict)
story_query = db.query(models.Story).filter(models.Story.id == story_id)
story_query.update({"story_chapter_wise": story_chapter_wise, "story_till_now": previous_info + result})
db.commit()
content = result
chapter = schemas.ChapterResponse(content=content)
return chapter
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
| [
"story_infodb28c7a4-ac4d-4099-aae7-551305b5bc72",
"generate summary of given text. text-->PLACEHOLDER",
"Summary till now -> PLACEHOLDER .Continue writing a new chapter PLACEHOLDER content by taking context from the above given chapter info and summary till now. Do not write chapter number in response content:",
"write a title and list of chapters name by taking context from given description. Description-->PLACEHOLDER. Please provide response in JSON format. The format should be like this-->{{\"title\":\"hello\",\"chapters\":[\"word\",\"word\",\"word\"]}}. Make sure the chapters list only contains chapter names, not any numbers or 'chapter 1' like. ",
"you are professional ,your task is to guide a person regarding their queries."
] |
2024-01-10 | techwithtim/AI-Choose-Your-Own-Adventure-Game | tutorial.py | from cassandra.cluster import Cluster
from cassandra.auth import PlainTextAuthProvider
from langchain.memory import CassandraChatMessageHistory, ConversationBufferMemory
from langchain.llms import OpenAI
from langchain import LLMChain, PromptTemplate
import json
cloud_config= {
'secure_connect_bundle': 'secure-connect-choose-your-own-adventure.zip'
}
with open("choose_your_own_adventure-token.json") as f:
secrets = json.load(f)
CLIENT_ID = secrets["clientId"]
CLIENT_SECRET = secrets["secret"]
ASTRA_DB_KEYSPACE = ""
OPENAI_API_KEY = ""
auth_provider = PlainTextAuthProvider(CLIENT_ID, CLIENT_SECRET)
cluster = Cluster(cloud=cloud_config, auth_provider=auth_provider)
session = cluster.connect()
message_history = CassandraChatMessageHistory(
session_id="anything",
session=session,
keyspace=ASTRA_DB_KEYSPACE,
ttl_seconds=3600
)
message_history.clear()
cass_buff_memory = ConversationBufferMemory(
memory_key="chat_history",
chat_memory=message_history
)
template = """
You are now the guide of a mystical journey in the Whispering Woods.
A traveler named Elara seeks the lost Gem of Serenity.
You must navigate her through challenges, choices, and consequences,
dynamically adapting the tale based on the traveler's decisions.
Your goal is to create a branching narrative experience where each choice
leads to a new path, ultimately determining Elara's fate.
Here are some rules to follow:
1. Start by asking the player to choose some kind of weapons that will be used later in the game
2. Have a few paths that lead to success
3. Have some paths that lead to death. If the user dies generate a response that explains the death and ends in the text: "The End.", I will search for this text to end the game
Here is the chat history, use this to understand what to say next: {chat_history}
Human: {human_input}
AI:"""
prompt = PromptTemplate(
input_variables=["chat_history", "human_input"],
template=template
)
llm = OpenAI(openai_api_key=OPENAI_API_KEY)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
memory=cass_buff_memory
)
choice = "start"
while True:
response = llm_chain.predict(human_input=choice)
print(response.strip())
if "The End." in response:
break
choice = input("Your reply: ")
| [
"chat_history",
"\nYou are now the guide of a mystical journey in the Whispering Woods. \nA traveler named Elara seeks the lost Gem of Serenity. \nYou must navigate her through challenges, choices, and consequences, \ndynamically adapting the tale based on the traveler's decisions. \nYour goal is to create a branching narrative experience where each choice \nleads to a new path, ultimately determining Elara's fate. \n\nHere are some rules to follow:\n1. Start by asking the player to choose some kind of weapons that will be used later in the game\n2. Have a few paths that lead to success\n3. Have some paths that lead to death. If the user dies generate a response that explains the death and ends in the text: \"The End.\", I will search for this text to end the game\n\nHere is the chat history, use this to understand what to say next: {chat_history}\nHuman: {human_input}\nAI:",
"The End.",
"human_input"
] |
2024-01-10 | yuqisun/notes | llm~chatglm_llm.py | from typing import Any, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from transformers import AutoTokenizer, AutoModel
model_path = "D:\\models\\"
llm_model_name = model_path + "chatglm-6b"
llm_tokenizer = AutoTokenizer.from_pretrained(llm_model_name, trust_remote_code=True)
llm_model = AutoModel.from_pretrained(llm_model_name, trust_remote_code=True).float()
class ChatGLM(LLM):
max_token: int = 10000
temperature: float = 0.8
top_p = 0.9
history = []
@property
def _llm_type(self) -> str:
return "ChatGLM"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
response, history = llm_model.chat(llm_tokenizer, prompt, history=[],
max_length=self.max_token,
temperature=self.temperature,
top_p=self.top_p)
if stop is not None:
raise ValueError("stop kwargs are not permitted.")
return response
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {"temperature": self.temperature}
from langchain.prompts import PromptTemplate
prompt = PromptTemplate(input_variables=['product'],
template="What is the creative name for a store makes {product}?")
from langchain.chains import LLMChain, SimpleSequentialChain
llm = ChatGLM()
chain = LLMChain(llm=llm, prompt=prompt)
# print(chain.run('Electric Vehicle'))
second_prompt = PromptTemplate(input_variables=['company_name'],
template="Write a slogan for company {company_name}?")
chain_two = LLMChain(llm=llm, prompt=second_prompt)
overall_chain = SimpleSequentialChain(chains=[chain, chain_two], verbose=True)
slogan = overall_chain.run('Electric Vehicle')
print(slogan)
| [
"Write a slogan for company {company_name}?",
"What is the creative name for a store makes {product}?",
"company_name"
] |
2024-01-10 | LyricLy/Esobot | cogs~japanese.py | import aiohttp
import asyncio
import pykakasi
import discord
import asyncio
import random
import io
from discord.ext import commands, menus
from openai import AsyncOpenAI
from utils import show_error
openai = AsyncOpenAI()
def format_jp_entry(entry):
try:
return f"{entry['word']}【{entry['reading']}】"
except KeyError:
try:
return entry["reading"]
except KeyError:
try:
return entry["word"]
except KeyError:
return "???"
class DictSource(menus.ListPageSource):
def __init__(self, data):
super().__init__(data, per_page=1)
async def format_page(self, menu, entry):
e = discord.Embed(
title = f"Result #{menu.current_page + 1}",
description = format_jp_entry(entry['japanese'][0])
)
if tags := [
*["common"]*entry.get("is_common", False),
*sorted(f"JLPT {x.partition('-')[2]}" for x in entry.get("jlpt", []))[-1:],
]:
e.title += f" ({', '.join(tags)})"
for i, sense in enumerate(entry["senses"], start=1):
e.add_field(
name = ", ".join(sense["parts_of_speech"]) if sense["parts_of_speech"] else "\u200b",
value = " | ".join([
f"{i}. " + "; ".join(sense["english_definitions"]),
*filter(None, [", ".join(f"*{x}*" for x in sense["tags"] + sense["info"])]),
]),
inline=False
)
if len(entry["japanese"]) > 1:
e.add_field(name = "Other forms", value = "\n".join(format_jp_entry(x) for x in entry["japanese"][1:]), inline=False)
return e
class Japanese(commands.Cog):
"""Weeb stuff."""
def __init__(self, bot):
self.bot = bot
kakasi = pykakasi.kakasi()
kakasi.setMode("H", "a")
kakasi.setMode("K", "a")
kakasi.setMode("J", "a")
kakasi.setMode("s", True)
self.conv = kakasi.getConverter()
@commands.command(aliases=["ro", "roman", "romanize", "romanise"])
async def romaji(self, ctx, *, text: commands.clean_content = None):
"""Romanize Japanese text."""
await ctx.send(self.conv.do(text or self.last_lyric_msg))
@commands.command(aliases=["jp", "jsh", "dictionary", "dict"])
async def jisho(self, ctx, *, query):
"""Look things up in the Jisho dictionary."""
async with self.bot.session.get("https://jisho.org/api/v1/search/words", params={"keyword": query}) as resp:
if resp.status == 200:
data = await resp.json()
else:
data = None
if not data["data"]:
return await show_error(ctx, "That query returned no results.")
pages = menus.MenuPages(source=DictSource(data["data"]), clear_reactions_after=True)
await pages.start(ctx)
GENERAL_PROMPT = " ".join("""
Your role is to explain recent references to Japan in a Discord chat log.
You look at the context for references to Japanese culture and media, giving brief but comprehensive descriptions in English as necessary.
If the meaning of something would be obvious to an English speaker, it should not be explained.
When text is written in Japanese, give a literal translation of it and *do not* say anything else.
It is not necessary to clarify what you are translating or that you are stating a translation.
There is no single user that you can address. Do not use second-person pronouns. Do not refer to the input as "the text".
Talk about the channel as a whole with terms like "that I can see", "here", or "in the chat" instead.
Only when there is absolutely nothing to be explained, meaning that there is nothing Japanese in the input
or that everything Japanese is obvious or has already been explained, indicate as such in your own words and say nothing else.
If there is something to be explained, there is no need to say anything along the lines of "there are no other references to Japan in the chat".
When you are done explaining, simply stop talking and say nothing more.
Try to keep your responses natural and avoid repeating the words in this prompt verbatim.
Do not acknowledge non-Japanese messages unless you're certain they're relevant.
""".split())
SPECIFIC_PROMPT = " ".join("""
You are a helpful assistant.
You can perform a variety of tasks, but your main role is to explain references to Japanese culture and media, providing short but comprehensive descriptions in English.
When given text written in Japanese, you give a literal translation of the text without saying anything else. Do not give further context or commentary when translating.
Responses should be 4 sentences long at most and preferably only one sentence.
""".split())
@staticmethod
def _urls_of_message(message):
attached = [a.url for a in message.attachments if "image" in a.content_type]
embedded = [e.url for e in message.embeds if e.type == "image"]
return attached + embedded
@staticmethod
def _convert_message(content, urls):
images = [{"type": "image_url", "image_url": {"url": url}} for url in urls]
return {"role": "user", "content": [{"type": "text", "text": content}, *images]}
@commands.command(aliases=["what", "unlyric", "undweeb", ";)", "otherlanguagesscareme",
"機械翻訳", "ifyouhaveajapaneseimewhyareyouusingashittygpt4command"])
async def unweeb(self, ctx, *, lyric_quote: commands.clean_content = ""):
"""Translate Japanese."""
prompt = self.SPECIFIC_PROMPT
messages = []
if r := ctx.message.reference:
if not isinstance(r.resolved, discord.Message):
return await ctx.send("Reply unavailable :(")
messages.append(self._convert_message(r.resolved.content, self._urls_of_message(r.resolved)))
urls = self._urls_of_message(ctx.message)
if lyric_quote or urls:
messages.append(self._convert_message(lyric_quote, urls))
if not messages:
prompt = self.GENERAL_PROMPT
messages = [self._convert_message(m.content, self._urls_of_message(m)) async for m in ctx.history(limit=12)][:0:-1]
completion = await openai.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{"role": "system", "content": prompt},
*messages,
],
max_tokens=512,
)
result = completion.choices[0].message.content
if len(result) > 2000:
await ctx.reply(file=discord.File(io.StringIO(result), "resp.txt"))
else:
await ctx.reply(result)
async def setup(bot):
await bot.add_cog(Japanese(bot))
| [
"You are a helpful assistant. You can perform a variety of tasks, but your main role is to explain references to Japanese culture and media, providing short but comprehensive descriptions in English. When given text written in Japanese, you give a literal translation of the text without saying anything else. Do not give further context or commentary when translating. Responses should be 4 sentences long at most and preferably only one sentence.",
"Your role is to explain recent references to Japan in a Discord chat log. You look at the context for references to Japanese culture and media, giving brief but comprehensive descriptions in English as necessary. If the meaning of something would be obvious to an English speaker, it should not be explained. When text is written in Japanese, give a literal translation of it and *do not* say anything else. It is not necessary to clarify what you are translating or that you are stating a translation. There is no single user that you can address. Do not use second-person pronouns. Do not refer to the input as \"the text\". Talk about the channel as a whole with terms like \"that I can see\", \"here\", or \"in the chat\" instead. Only when there is absolutely nothing to be explained, meaning that there is nothing Japanese in the input or that everything Japanese is obvious or has already been explained, indicate as such in your own words and say nothing else. If there is something to be explained, there is no need to say anything along the lines of \"there are no other references to Japan in the chat\". When you are done explaining, simply stop talking and say nothing more. Try to keep your responses natural and avoid repeating the words in this prompt verbatim. Do not acknowledge non-Japanese messages unless you're certain they're relevant."
] |
2024-01-10 | ziqiwww/refcountChecker | LLMPlugin~RC_openai.py | from openai import OpenAI
import json
PROJECT_ROOT = "/Users/ziqi/Work/pyc/workaround/refcountChecker/"
class LLMPlugin:
def __init__(self):
# read pretrain prompt from pretrain_prompt.txt
# read LLMPlugin settings
with open(PROJECT_ROOT + 'settings.json') as f:
data = json.load(f)
self.model = data['LLMPlugin']['model']
self.cfile = data['ToolConfig']['module']
self.apiKey = data['LLMPlugin']['apiKey']
pretrain_path = PROJECT_ROOT + 'LLMPlugin/pretrain_prompt.txt'
with open(pretrain_path, 'r') as f:
self._pretrain_prompt = f.read()
self.messages = [{"role": "system", "content": self._pretrain_prompt}]
with open(self.cfile) as f:
self.c_content = f.read()
self.messages.append({"role": "user", "content": self.c_content})
def generate(self):
client = OpenAI(api_key=self.apiKey)
response = client.chat.completions.create(
model=self.model,
messages=self.messages,
)
return response.choices[0].message
if __name__ == "__main__":
print("\n====================\nContent below is generated by LLMPlugin\n====================\n")
plugin = LLMPlugin()
print(plugin.generate().content)
| [] |
2024-01-10 | Annarhysa/OpenAI-Lab | text-gen.py | import openai
import os
from dotenv import load_dotenv
load_dotenv()
# Set up your API key
openai.api_key = os.getenv("api")
# Set up the prompt and parameters
prompt = "What is One Piece?"
model = "text-davinci-002"
temperature = 0.7
max_tokens = 100
# Generate text using OpenAI's GPT-3
response = openai.Completion.create(
engine=model,
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens
)
# Print the generated text
print(response.choices[0].text.strip())
| [
"What is One Piece?"
] |
2024-01-10 | Annarhysa/OpenAI-Lab | Summarizer.py | import openai
import streamlit as st
openai.api_key = st.secrets['api_secret']
st.header("Summarizer App using OpenAI ")
article_text = st.text_area("Enter your scientific texts to summarize")
output_size = st.radio( label = "What kind of output do you want? ", options= ["To-The-Point", "Concise", "Detailed"])
if output_size == "To-The-Point":
out_token = 50
elif output_size == "Concise":
out_token = 128
else:
out_token = 516
if (len(article_text)>100):
# max = st.text_input("Enter the max words you want your text to be summarized in")
if st.button("Generate Summary",type='primary'):
response = openai.Completion.create( engine = "text-davinci-002", prompt = "Please summarize this scientific article for me in a few sentences: "+ article_text, max_tokens = out_token, temperature = 0.5)
res = response["choices"][0]["text"]
st.success(res)
st.download_button("Download the result", res)
elif (len(article_text)<100):
st.warning("The Sentence is not long enough") | [
"Please summarize this scientific article for me in a few sentences: PLACEHOLDER"
] |
2024-01-10 | danteGPT/ScriptWriterChat | gpt_waster.py | import os
from langchain.chat_models import ChatOpenAI
from langchain import PromptTemplate
from langchain.chains import ConversationChain
from langchain.chains.conversation.memory import ConversationSummaryMemory
from prompt_lib import STARTER_PROMPT
from dotenv import load_dotenv
load_dotenv()
if os.getenv("OPENAI_API_KEY") is None or os.getenv("OPENAI_API_KEY") == "":
print("OPENAI_API_KEY is not set")
exit(1)
else:
print("OPENAI_API_KEY is set")
chat = ChatOpenAI(temperature=0.1, model_name="gpt-3.5-turbo")
PROMPT = PromptTemplate(template=STARTER_PROMPT, input_variables=["input", "history"])
conversation = ConversationChain(prompt=PROMPT, llm=chat, memory=ConversationSummaryMemory(llm=chat), verbose=True)
def generate_response(prompt):
return conversation.predict(input=prompt)
for i in range(250):
response = generate_response(STARTER_PROMPT)
print("Response:" + str(i))
print(response)
| [
"input"
] |
2024-01-10 | bodytaylor/TARSautomation | code~hotel_description.py | import openai
openai.api_key = "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"
def comp(PROMPT, MaxToken=50, outputs=3):
# using OpenAI's Completion module that helps execute
# any tasks involving text
response = openai.Completion.create(
# model name used here is text-davinci-003
# there are many other models available under the
# umbrella of GPT-3
model="text-davinci-003",
# passing the user input
prompt=PROMPT,
# generated output can have "max_tokens" number of tokens
max_tokens=MaxToken,
# number of outputs generated in one call
n=outputs
)
# creating a list to store all the outputs
output = list()
for k in response['choices']:
output.append(k['text'].strip())
return output
PROMPT = """
Shorten this text to fit 250 characters, do not change the words or change sentence structure, do not include special characters like " / & ; @ % '
Text:###
text: Experience urban tranquility at Novotel Singapore On Kitchener, your haven in the heritage hub of Little India. Wander Serangoon Road, where vibrant hues and alluring aromas enchant. Dive into the 24-hour shopping at Mustafa Centre. After indulging in Singapore's sensations, relax in lavish modernity within your contemporary furnished room. Our devoted team is poised to lead you in embracing the local essence, ensuring an unforgettable stay.
###
"""
print(comp(PROMPT, MaxToken=3000, outputs=1)) | [
" \n\nShorten this text to fit 250 characters, do not change the words or change sentence structure, do not include special characters like \" / & ; @ % ' \nText:### \ntext: Experience urban tranquility at Novotel Singapore On Kitchener, your haven in the heritage hub of Little India. Wander Serangoon Road, where vibrant hues and alluring aromas enchant. Dive into the 24-hour shopping at Mustafa Centre. After indulging in Singapore's sensations, relax in lavish modernity within your contemporary furnished room. Our devoted team is poised to lead you in embracing the local essence, ensuring an unforgettable stay.\n###\n"
] |
2024-01-10 | bodytaylor/TARSautomation | code~checking_data.py | import openai
openai.api_key = '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'
def get_completion(prompt, model="gpt-3.5-turbo"):
messages = [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0,
)
return response.choices[0].message["content"]
prompt = "WHO IS THE PRESIDENT OF UNITED STATES"
response = get_completion(prompt)
print(response) | [
"WHO IS THE PRESIDENT OF UNITED STATES"
] |
2024-01-10 | DavidMChan/grazier | grazier~engines~chat~vertex_engine.py | try:
from vertexai.preview.language_models import ChatModel, InputOutputTextPair
except ImportError:
ChatModel = None
InputOutputTextPair = None
from typing import Any, Dict, List
from ratelimit import limits, sleep_and_retry
from grazier.engines.chat import Conversation, ConversationTurn, LLMChat, Speaker, register_engine
from grazier.utils.python import singleton
class VertexLLMEngine(LLMChat):
def __init__(self, model: str, **kwargs: Dict[str, Any]) -> None:
super().__init__(device="api")
if ChatModel is None:
raise ImportError("Please install the Vertex AI SDK to use this LM engine.")
self._model = ChatModel.from_pretrained(model)
self._parameters = {
"max_output_tokens": 256, # Token limit determines the maximum amount of text output.
"top_p": 0.95, # Tokens are selected from most probable to least until the sum of their probabilities equals the top_p value.
"top_k": 40, # A top_k of 1 means the selected token is the most probable among all tokens.
} | kwargs
@sleep_and_retry # type: ignore
@limits( # type: ignore
calls=40, period=60
) # This is the default rate limit for Vertex AI (actual rate limit is 60 calls per minute, but we'll be conservative)
def _rate_limited_model_predict(self, info, **kwargs: Any) -> Any:
context, examples, prompt = info
chat = self._model.start_chat(context=context, examples=examples, **kwargs)
response = chat.send_message(prompt).text
return response
def call(self, conversation: Conversation, n_completions: int = 1, **kwargs: Any) -> List[ConversationTurn]:
# Start the chat
system_turns = [c for c in conversation.turns if c.speaker == Speaker.SYSTEM]
context = system_turns[-1].text if system_turns else ""
non_system_turns = [c for c in conversation.turns if c.speaker != Speaker.SYSTEM]
# Assert that the non-system turns alternate between the user and the agent
for idx, turn in enumerate(non_system_turns):
if idx % 2 == 0:
assert turn.speaker == Speaker.USER
else:
assert turn.speaker == Speaker.AI
if len(non_system_turns) > 1:
assert non_system_turns[-1].speaker == Speaker.USER
assert InputOutputTextPair is not None
# Build the examples
examples = [
InputOutputTextPair(input_text=non_system_turns[i].text, output_text=non_system_turns[i + 1].text)
for i in range(0, len(non_system_turns) - 1, 2)
]
else:
examples = []
# Normalize kwargs from openai to vertexai (some common parameters are different)
kwargs = (
self._parameters
| {
"max_output_tokens": kwargs.pop("max_output_tokens", kwargs.pop("max_tokens", 256)),
"temperature": kwargs.pop("temperature", 1.0),
}
| kwargs
)
return [
ConversationTurn(
self._rate_limited_model_predict((context, examples, non_system_turns[-1].text), **kwargs),
speaker=Speaker.AI,
) # type: ignore
for _ in range(n_completions)
]
@staticmethod
def is_configured() -> bool:
# Check to see if the Vertex AI SDK is installed, and if so, if the user has configured their credentials.
if ChatModel is None or InputOutputTextPair is None:
return False
# Check to see if the user has configured their google cloud credentials.
try:
from google.auth import default
except ImportError:
return False
try:
default()
except Exception:
return False
return True
@register_engine
@singleton
class PaLMEngine(VertexLLMEngine):
name = ("PaLM", "palm")
def __init__(self) -> None:
super().__init__("chat-bison@001")
| [] |
2024-01-10 | DavidMChan/grazier | grazier~engines~llm~vertex_engine.py | try:
from vertexai.preview.language_models import TextGenerationModel
except ImportError:
TextGenerationModel = None
from typing import Any, Dict, List
from ratelimit import limits, sleep_and_retry
from grazier.engines.llm import LLMEngine, register_engine
from grazier.utils.python import singleton
class VertexLLMEngine(LLMEngine):
def __init__(self, model: str, **kwargs: Dict[str, Any]) -> None:
if TextGenerationModel is None:
raise ImportError("Please install the Vertex AI SDK to use this LM engine.")
self._model = TextGenerationModel.from_pretrained(model)
self._parameters = {
# Token limit determines the maximum amount of text output.
"max_output_tokens": kwargs.pop("max_output_tokens", kwargs.pop("max_tokens", 256)),
"top_p": 0.95, # Tokens are selected from most probable to least until the sum of their probabilities equals the top_p value.
"top_k": 40, # A top_k of 1 means the selected token is the most probable among all tokens.
} | kwargs
@sleep_and_retry # type: ignore
@limits( # type: ignore
calls=40, period=60
) # This is the default rate limit for Vertex AI (actual rate limit is 60 calls per minute, but we'll be conservative)
def _rate_limited_model_predict(self, *args: Any, **kwargs: Any) -> Any:
return self._model.predict(*args, **kwargs)
def call(self, prompt: str, n_completions: int = 1, **kwargs: Any) -> List[str]:
# Normalize kwargs from openai to vertexai (some common parameters are different)
kwargs = (
self._parameters
| {
"max_output_tokens": kwargs.pop("max_output_tokens", kwargs.pop("max_tokens", 256)),
"temperature": kwargs.pop("temperature", 1.0),
}
| kwargs
)
return [self._rate_limited_model_predict(prompt, **kwargs).text for _ in range(n_completions)] # type: ignore
@staticmethod
def is_configured() -> bool:
# Check to see if the Vertex AI SDK is installed, and if so, if the user has configured their credentials.
if TextGenerationModel is None:
return False
# Check to see if the user has configured their google cloud credentials.
try:
from google.auth import default
except ImportError:
return False
try:
default()
except Exception:
return False
return True
@register_engine
@singleton
class PaLMEngine(VertexLLMEngine):
name = ("PaLM", "palm")
def __init__(self) -> None:
super().__init__("text-bison@001")
| [] |
2024-01-10 | DavidMChan/grazier | grazier~engines~chat~anthropic_engine.py | import logging
import os
from typing import Any, List
import anthropic
from grazier.engines.chat import Conversation, ConversationTurn, LLMChat, Speaker, register_engine
from grazier.utils.python import retry, singleton
class AnthropicLMEngine(LLMChat):
def __init__(self, model: str):
super().__init__(device="api")
self._client = anthropic.Client(api_key=os.getenv("ANTHROPIC_API_KEY", None))
self._model = model
@retry()
def _completion(self, prompt: str, **kwargs: Any) -> Any:
kwargs = {
"temperature": kwargs.get("temperature", 0.7),
"max_tokens_to_sample": kwargs.get("max_tokens_to_sample", kwargs.pop("max_tokens", 256)),
"model": self._model,
"prompt": prompt,
} | kwargs
return self._client.completion(
**kwargs,
)
def call(self, conversation: Conversation, n_completions: int = 1, **kwargs: Any) -> List[ConversationTurn]:
# Some odd anthropic assertions
if conversation.turns[-1].speaker != Speaker.USER:
raise AssertionError("Last turn must be a user turn")
# Assert that conversations altrenate between user and AI (anthropic doesn't support system turns)
for idx, turn in enumerate([c for c in conversation.turns if c.speaker != Speaker.SYSTEM]):
if idx % 2 == 0 and turn.speaker != Speaker.USER:
raise AssertionError("Conversations must alternate between user and AI turns")
if idx % 2 == 1 and turn.speaker != Speaker.AI:
raise AssertionError("Conversations must alternate between user and AI turns")
# Construct the messages list from the conversation
prompt = ""
for idx, turn in enumerate(conversation.turns):
if turn.speaker == Speaker.SYSTEM:
logging.warning("Anthropic does not support SYSTEM turns, skipping...")
elif turn.speaker == Speaker.USER:
prompt += f"{anthropic.HUMAN_PROMPT} "
prompt += turn.text + " "
elif turn.speaker == Speaker.AI:
prompt += f"{anthropic.AI_PROMPT} "
prompt += turn.text + " "
# add the last turn
prompt += f"{anthropic.AI_PROMPT}"
temperature = kwargs.get("temperature", 0.7)
samples = []
for _ in range(n_completions):
resp = self._completion(prompt, temperature=temperature)
samples.append(resp["completion"])
return [ConversationTurn(text=s.strip(), speaker=Speaker.AI) for s in samples]
@staticmethod
def is_configured() -> bool:
return os.getenv("ANTHROPIC_API_KEY", None) is not None
@register_engine
@singleton
class Claude(AnthropicLMEngine):
name = ("Claude", "claude")
def __init__(self, **kwargs: Any) -> None:
super().__init__("claude-1")
@register_engine
@singleton
class Claude100K(AnthropicLMEngine):
name = ("Claude 100K", "claude-100k")
def __init__(self, **kwargs: Any) -> None:
super().__init__("claude-1-100k")
@register_engine
@singleton
class ClaudeInstant(AnthropicLMEngine):
name = ("Claude Instant", "claude-instant")
def __init__(self, **kwargs: Any) -> None:
super().__init__("claude-instant-1")
@register_engine
@singleton
class ClaudeInstant100K(AnthropicLMEngine):
name = ("Claude Instant 100K", "claude-instant-100k")
def __init__(self, **kwargs: Any) -> None:
super().__init__("claude-instant-1-100k")
@register_engine
@singleton
class Claude2(AnthropicLMEngine):
name = ("Claude 2", "claude-2")
def __init__(self, **kwargs: Any) -> None:
super().__init__("claude-2")
| [
" "
] |
2024-01-10 | Rihitonnnu/MOWAS | feature~conversation.py | import os
from dotenv import load_dotenv
import openai
import json
import numpy as np
from langchain.memory import ConversationBufferWindowMemory
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.callbacks import get_openai_callback
from SyntheticVoice import SyntheticVoice
from sql import Sql
import rec_unlimited
from gpt import Gpt
import beep
import log_instance
from token_record import TokenRecord
from search_spot import SearchSpot
import place_details
from udp.udp_receive import UDPReceive
openai.api_key = os.environ["OPENAI_API_KEY"]
# conversation()をclassにする
class Conversation():
def __init__(self,reaction_time_sheet_path):
self.reaction_time_sheet_path=reaction_time_sheet_path
# jsonのパスを設定
self.sleepy_json_path='json/embedding/is_sleepy.json'
self.introduce_reaction_json_path='json/embedding/introduce_reaction.json'
self.introduce_prompt = """"""
self.user_name=Sql().select('''
SELECT name
FROM users
''')
self.syntheticVoice = SyntheticVoice()
self.token_record = TokenRecord()
self.human_input = ""
template = """あなたは相手と会話をすることで覚醒維持するシステムで名前はもわすです。
# 条件
- 「会話を行いながら覚醒維持を行います」、「眠くなった場合は私に眠いと伝えてください」と伝える
- 相手の興味のある話題で会話をする
{chat_history}
{introduce_prompt}
Human: {human_input}
"""
prompt = PromptTemplate(
input_variables=["chat_history", "human_input", "introduce_prompt"], template=template
)
# 記憶するmemoryの設定
memory = ConversationBufferWindowMemory(
k=3, memory_key="chat_history", input_key="human_input")
self.llm_chain = LLMChain(
llm=ChatOpenAI(temperature=0),
prompt=prompt,
memory=memory,
verbose=False
)
def introduce(self,human_input):
# 眠くない場合は案内を行わない
if not human_input=='眠いです':
return
# 現在の緯度経度を取得する
coordinates_results=UDPReceive('127.0.0.1',2002).get_coordinates()
spot_result = SearchSpot().search_spot(coordinates_results[0],coordinates_results[1])
spot_url = place_details.place_details(
spot_result['place_id'])
# スポットの案内の提案プロンプト
self.introduce_prompt = """以下の案内文言を読んでください。
# 案内文言
{}さん、眠くなっているんですね。近くの休憩場所は{}です。この目的地まで案内しましょうか?
""".format(self.user_name, spot_result['display_name'])
response = self.llm_chain.predict(
human_input=human_input, introduce_prompt=self.introduce_prompt)
self.syntheticVoice.speaking(response.replace(
'AI: ', '').replace('もわす: ', ''))
# 入力を受け取る
introduce_reaction_response = input("You: ")
# ここでembeddingを用いて眠いか眠くないかを判定
result=self.embedding(self.introduce_reaction_json_path,introduce_reaction_response.replace('You:',''))
if result:
# 休憩所のurlをメールで送信
place_details.send_email(spot_url)
self.syntheticVoice.speaking("了解しました。休憩場所のマップURLをメールで送信しましたので確認してください。到着まで引き続き会話を続けます。")
self.introduce_prompt = """"""
# 再度会話をするためにhuman_inputを初期化
self.human_input="何か話題を振ってください。"
def run(self):
# ログの設定
logger = log_instance.log_instance('conversation')
# 環境変数読み込み
load_dotenv()
# SQLクエリ設定
summary = Sql().select('''
SELECT summary
FROM users
''')
with get_openai_callback() as cb:
# 会話回数を初期化
conv_cnt = 1
# 事前に入力をしておくことでMOWAS側からの応答から会話が始まる
# 分岐はドライバーの名前が入力されているかどうか
response = self.llm_chain.predict(
human_input="こんにちは。あなたの名前は何ですか?私の名前は{}です。".format(self.user_name), introduce_prompt=self.introduce_prompt)
self.syntheticVoice.speaking(response.replace(
'Mowasu: ', '').replace('もわす: ', ''))
print(response.replace('AI: ', ''))
# トークンをexcelに記録
self.token_record.token_record(cb, conv_cnt)
conv_cnt += 1
while True:
try:
with get_openai_callback() as cb:
# human_input = rec_unlimited.recording_to_text()
self.human_input = input("You: ")
self.introduce(self.human_input)
logger.info(self.user_name + ": " + self.human_input)
response = self.llm_chain.predict(
human_input=self.human_input, summary=summary, introduce_prompt=self.introduce_prompt)
self.token_record.token_record(cb, conv_cnt)
conv_cnt += 1
logger.info(response.replace('AI: ', ''))
self.syntheticVoice.speaking(response.replace(
'AI: ', '').replace('もわす: ', ''))
except KeyboardInterrupt:
# 会話の要約をDBに格納
# summary = Gpt().make_conversation_summary()
# Sql().store_conversation_summary(summary)
# Sql().store_conversation()
beep.high()
exit(1)
# コサイン類似度を計算する関数
def cosine_similarity(self,a, b):
return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
# 入力を複数にしてqueryを用意してコサイン類似度を用いて検索させる
def embedding(self,json_path,input):
with open(json_path) as f:
INDEX = json.load(f)
# 入力を複数にしてqueryを用意してコサイン類似度を用いて検索させる
query = openai.Embedding.create(
model='text-embedding-ada-002',
input=input
)
query = query['data'][0]['embedding']
results = map(
lambda i: {
'body': i['body'],
# ここでクエリと各文章のコサイン類似度を計算
'similarity': self.cosine_similarity(i['embedding'], query)
},
INDEX
)
# コサイン類似度で降順(大きい順)にソート
results = sorted(results, key=lambda i: i['similarity'], reverse=True)
# 類似性の高い選択肢を出力
if json_path==self.sleepy_json_path:
result = {
'眠い': True,
'少し眠い': True,
'眠くなりかけている': True,
'眠くない': False,
}
if json_path==self.introduce_reaction_json_path:
result = {
'はい': True,
'してください': True,
'お願いします': True,
'いいえ': False,
'しないでください': False,
'大丈夫です': False,
}
# 眠ければTrue、眠くなければFalseを返す
# print(result[results[0]["body"]])
return result[results[0]["body"]]
| [
"chat_history",
"introduce_prompt",
"あなたは相手と会話をすることで覚醒維持するシステムで名前はもわすです。\n # 条件\n - 「会話を行いながら覚醒維持を行います」、「眠くなった場合は私に眠いと伝えてください」と伝える\n - 相手の興味のある話題で会話をする\n\n {chat_history}\n {introduce_prompt}\n Human: {human_input}\n ",
"human_input"
] |
2024-01-10 | Rihitonnnu/MOWAS | test~spot_introduce_test.py | import os
import logging
from dotenv import load_dotenv
import openai
from langchain.memory import ConversationBufferMemory
from langchain.memory import ConversationBufferWindowMemory
from langchain.llms import OpenAI
from langchain.chains import ConversationChain
from langchain.prompts import ChatPromptTemplate
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.schema import BaseOutputParser
from langchain.chat_models import ChatOpenAI
from langchain.callbacks import get_openai_callback
import logging
# 環境変数読み込み
load_dotenv()
openai.api_key = os.environ["OPENAI_API_KEY"]
template = """あなたは相手と会話をすることで覚醒を維持するシステムであり、名前はもわすです。{human_input}""" + \
'休憩場所はローソン 九大学研都市駅前店もしくはファミリーマート JR九大学研都市駅店が近いです。紹介してください。'
prompt = PromptTemplate(template=template, input_variables=["human_input"])
llm = ChatOpenAI(temperature=0.1)
llm_chain = LLMChain(
llm=llm,
prompt=prompt,
verbose=False
)
response = llm_chain.predict(human_input="休憩できる場所を紹介してください")
print(response)
| [
"あなたは相手と会話をすることで覚醒を維持するシステムであり、名前はもわすです。{human_input}休憩場所はローソン 九大学研都市駅前店もしくはファミリーマート JR九大学研都市駅店が近いです。紹介してください。",
"human_input"
] |
2024-01-10 | Rihitonnnu/MOWAS | openai~fine_tuning.py | import os
import openai
from dotenv import load_dotenv
load_dotenv()
openai.api_key = os.environ["OPENAI_API_KEY"]
# file = openai.File.create(
# file=open("mydata.jsonl", "rb"),
# purpose='fine-tune'
# )
openai.FineTuningJob.create(
training_file="file-GQ717qTCTvvGgfxCJtbJafKy", model="gpt-3.5-turbo")
| [] |
2024-01-10 | NyanSequitur/SQA3D-LLM | sqa3dtest.py | # author: me :)
# date: 2021-07-31
# description: test the sqa3d dataset using gpt-3.5-turbo
# I don't like how this code handles reasoning
# it should not be caption-order dependent how the LLM generates reasoning
import logging
# log to log2.log
logging.basicConfig(filename='log2.log', filemode='w', format='%(name)s - %(levelname)s - %(message)s', level=logging.DEBUG)
# Imports
import os
from transformers import (
AutoConfig,
AutoTokenizer,
set_seed,
)
import json
import re
import time
from typing import List
from tqdm.auto import tqdm
import tiktoken
import openai
from dotenv import load_dotenv
# load environment variables
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
# set constants
NUMBER_OF_QUESTIONS_TO_SOLVE = -1
CONFIRM_EVERY_N_QUESTIONS =-1
PAUSE_ON_FAIL = False
PAUSE_ON_FIRST = False
CACHE_MAX_AGE = 86400
MODEL_NAME = "tiiuae/falcon-40b-instruct"
openAIEngine = "gpt-3.5-turbo-16k"
# set up openai engine
encoder = tiktoken.encoding_for_model(openAIEngine)
if input("Delete savestate? (y/n) ").lower() == "y":
try:
os.remove("savestate.json")
except FileNotFoundError:
pass
def isWord(word):
return word in words
# Reasoning format:
# Context:
# <context>
#
# Situation:
# <situation>
#
# Carefully work through every step of solving this question and briefly explain reasoning. Do not include the caption in the reasoning.
# <question>
#
# Reasoning:
# 1.
def generateReasoning(context, situation, question):
prompt = f"Captions:\n{context}\n\nWork through this question step-by-step, very quickly explaining reasoning:\n{situation} {question}\n\nReasoning:\n1. "
# write prompt to file
with open("prompt.txt", "a") as f:
f.write(prompt + "\n\n\n\n")
return prompt
def getLLMResponse(
model,
prompt,
max_new_tokens=100,
temperature=1.9,
repetition_penalty=1.2,
):
# encode prompt
input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to("cuda")
# get number of tokens in prompt
inputTokens = len(input_ids[0])
# generate response
outputs = model.generate(
input_ids,
max_new_tokens=max_new_tokens,
pad_token_id=tokenizer.eos_token_id,
temperature=temperature,
repetition_penalty=repetition_penalty,
bad_words_ids=badWords,
)
# decode response
outputTokens = len(outputs[0])
answer = tokenizer.decode(outputs[0])
# count number of tokens generated by subtracting the length of the prompt from the length of the answer
print(f"Generated {outputTokens - inputTokens} tokens")
return answer, outputTokens - inputTokens
# ______
# Question:
# <situation> <question>
# - - - - -
# Reasoning:
# <reasoning>
# - - - - -
# Answer:
# <answer>
def printResults(reasoning, answer, question, situation):
print("_" * os.get_terminal_size().columns)
print("Question:\n" + situation + " " + question)
print("- " * (os.get_terminal_size().columns // 2))
print("Reasoning:\n" + reasoning)
print("- " * (os.get_terminal_size().columns // 2))
print("Answer:\n" + answer)
# taken from https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
def num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613"):
"""Return the number of tokens used by a list of messages."""
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
print("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model in {
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k-0613",
"gpt-4-0314",
"gpt-4-32k-0314",
"gpt-4-0613",
"gpt-4-32k-0613",
}:
tokens_per_message = 3
tokens_per_name = 1
elif model == "gpt-3.5-turbo-0301":
tokens_per_message = (
4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
)
tokens_per_name = -1 # if there's a name, the role is omitted
elif "gpt-3.5-turbo" in model:
print(
"Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613."
)
return num_tokens_from_messages(messages, model="gpt-3.5-turbo-0613")
elif "gpt-4" in model:
print(
"Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613."
)
return num_tokens_from_messages(messages, model="gpt-4-0613")
else:
raise NotImplementedError(
f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens."""
)
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
def sendMessage(context, situation, question, allowUnsure=True):
message = f"{context}\n\n{situation} {question} Let's work through this step-by-step, before coming to the most likely one-word answer."
if allowUnsure:
message += " Answer 'unsure' if you are unsure."
messages = [
{
"role": "system",
"content": "You will be presented with a list of captions describing a room, followed by a question which is set in that room."
},
{
"role": "user",
"content": message,
}
]
modelOutput = openai.ChatCompletion.create(
model=openAIEngine,
messages=messages,
max_tokens=350 + num_tokens_from_messages(messages, model='gpt-3.5-turbo-0613'),
temperature=0,
)
fullAnswer = modelOutput["choices"][0]["message"]["content"]
#print(fullAnswer)
answer = fullAnswer.split("\n")[-1]
answer = answer.split(":")[1] if ":" in answer else answer.split()[-1]
answer = "".join([c for c in answer if c.isalpha()])
return answer, modelOutput
def promptOpenAI(context, question, situation, actuallyPrompt=True):
# just for testing the rest of the code without using tokens
if not actuallyPrompt:
return ""
answer, modelOutput = sendMessage(context, situation, question)
tk = modelOutput["usage"]["completion_tokens"]
if answer.strip().lower() == "unsure":
# flip order of context
# This is such a hacky way to do this
# Ideally, the response is caption-order agnostic
context = "\n".join(context.split("\n")[::-1])
answer, modelOutput = sendMessage(context, situation, question, False)
tk += modelOutput["usage"]["completion_tokens"]
return answer, tk, modelOutput
unifiedQA=0
from transformers import T5Tokenizer, T5ForConditionalGeneration
model_name = "allenai/unifiedqa-v2-t5-large-1363200" # you can specify the model size here
tokenizer = T5Tokenizer.from_pretrained(model_name)
model = T5ForConditionalGeneration.from_pretrained(model_name)
def run_model(input_string, **generator_args):
input_ids = tokenizer.encode(input_string, return_tensors="pt")
res = model.generate(input_ids, **generator_args)
return tokenizer.batch_decode(res, skip_special_tokens=True)
# save state to file
def saveState(
question,
reasoning,
answer,
correct_answer,
is_correct,
token_count,
totalTokenCount,
answered,
correct,
):
try:
f = open("savestate.json")
try:
savestate = json.load(f)
except json.decoder.JSONDecodeError:
savestate = {
"answered": 0,
"correct": 0,
"totalTokenCount": 0,
"questions": [],
}
except FileNotFoundError:
savestate = {
"answered": 0,
"correct": 0,
"totalTokenCount": 0,
"questions": [],
}
# create file if it doesn't exist
open("savestate.json", "w").close()
savestate["answered"] = answered
savestate["correct"] = correct
savestate["totalTokenCount"] = totalTokenCount
savestate["questions"].append(
{
"scene_id": question["scene_id"],
"situation": question["situation"],
"question": question["question"],
"question_id": question["question_id"],
# "relevant_nouns": question["relevantNouns"],
"context": question["context"],
"reasoning": reasoning,
"answer": answer,
"correct_answer": correct_answer,
"is_correct": is_correct,
"token_count": token_count,
}
)
with open("savestate.json", "w") as f:
json.dump(savestate, f, indent=4)
# compile regex pattern to remove all non-alphanumeric characters
pattern = re.compile(r"[\W_]+")
# load /usr/share/dict/words into a set
with open("/usr/share/dict/words") as f:
words = set(f.read().splitlines())
# load nouns_with_context.json
with open("nouns_with_context.json") as f:
nouns = json.load(f)
# load answers
with open("v1_balanced_sqa_annotations_test_scannetv2.json") as f:
answers = json.load(f)["annotations"]
answers_dict = {answer["question_id"]: answer for answer in answers}
answers = [answers_dict[noun["question_id"]] for noun in nouns]
if os.path.exists("savestate.json") and os.path.getsize("savestate.json") > 0:
with open("savestate.json") as f:
savestate = json.load(f)
answered = savestate["answered"]
correct = savestate["correct"]
totalTokenCount = savestate["totalTokenCount"]
else:
answered = 0
correct = 0
totalTokenCount = 0
# skip questions/answers that have already been answered
nouns = nouns[answered:]
answers = answers[answered:]
GTcorrect = 0
# reset answers.txt
open("answers.txt", "w").close()
generationTimes = []
is_correct = True
config = AutoConfig.from_pretrained(
MODEL_NAME,
trust_remote_code=True,
)
print("loaded config")
# model = AutoModelForCausalLM.from_pretrained(
# MODEL_NAME,
# trust_remote_code=True,
# load_in_8bit=True,
# device_map="balanced",
# )
tokenizer = AutoTokenizer.from_pretrained(
MODEL_NAME, add_prefix_space=True, trust_remote_code=True
)
badWordlist = ["<strong>"]
badWords = tokenizer(badWordlist, add_special_tokens=False).input_ids
for i, question in enumerate(tqdm(nouns, position=0, leave=True)):
if i >= NUMBER_OF_QUESTIONS_TO_SOLVE and not NUMBER_OF_QUESTIONS_TO_SOLVE == -1:
break
# confirm every n questions, if the answer is wrong, and if it's the first question
if (
(i % CONFIRM_EVERY_N_QUESTIONS == 0 and not CONFIRM_EVERY_N_QUESTIONS == -1)
or (not is_correct and PAUSE_ON_FAIL)
or (i == 1 and PAUSE_ON_FIRST)
):
if input("Continue? (y/n) ") != "y":
break
is_correct = True
questionTokenCount = 0
tempContext = "\n".join(question["context"])
tempSituation = question["situation"]
tempQuestion = question["question"]
# reasoningPrompt = generateReasoning(tempContext, tempSituation, tempQuestion)
#
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# start = time.time()
# reasoning, temp = getLLMResponse(
# model,
# reasoningPrompt,
# max_new_tokens=650,
# repetition_penalty=1.2,
# temperature=1,
# )
# end = time.time()
#
# questionTokenCount += temp
# totalTokenCount += temp
# if reasoning.endswith("<|endoftext|>"):
# reasoning = reasoning[: -len("<|endoftext|>")]
# else:
# reasoning = reasoning[: reasoning.rfind("\n")]
# reasoning = reasoning[len(reasoningPrompt) - len("1. ") :]
# send reasoning to openai
start2 = time.time()
answer, temp, fullAnswer = promptOpenAI(tempContext, tempQuestion, tempSituation, True)
end2 = time.time()
questionTokenCount += int(temp)
totalTokenCount += int(temp)
generationTimes.append(((end2 - start2)))
# write answer to file as new line
with open("answers.txt", "a") as f:
f.write(answer + "\n")
answered += 1
# answer should be just lowercase letters with no punctuation or spaces or newlines or anything
answer = pattern.sub("", answer)
# strip <|endoftext|> from reasoning
# reasoning = reasoning.replace("<|endoftext|>", "")
reasoning = fullAnswer["choices"][0]["message"]["content"]
printResults(reasoning, answer, tempQuestion, tempSituation)
if answer.lower() == answers[i]["answers"][0]["answer"].lower():
correct += 1
# check if last two words of reasoning are the same as the correct answer
elif answer in answers[i]["answers"][0]["answer"].lower() and " ".join(reasoning.split()[-2:]).lower() == answers[i]["answers"][0]["answer"]:
correct += 1
else:
is_correct = False
# if answer isn't even a word, save it to a file
if not isWord(answer.lower()):
with open("badAnswers.txt", "a") as f:
f.write(answer + "\n")
print("Correct answer: " + answers[i]["answers"][0]["answer"])
print(f"Answered: {answered}, Correct: {correct}, Accuracy: {round(correct / answered * 100, 2)}%")
print(f"Question token count: {questionTokenCount}")
print(f"Total token count: {totalTokenCount}")
print(f"Average token count per question: {totalTokenCount / answered}")
print(f"Average tokens per second: {totalTokenCount / sum(generationTimes)}")
saveState(
question,
reasoning,
answer,
answers[i]["answers"][0]["answer"],
answer.lower() == answers[i]["answers"][0]["answer"].lower(),
questionTokenCount,
totalTokenCount,
answered,
correct,
)
# Ground truth reasoning
tempContext = "\n".join(question["gtCaptions"])
answer, temp, fullAnswer = promptOpenAI(tempContext, tempQuestion, tempSituation, True)
end2 = time.time()
questionTokenCount += int(temp)
totalTokenCount += int(temp)
generationTimes.append(((end2 - start2)))
# write answer to file as new line
with open("GTanswers.txt", "a") as f:
f.write(answer + "\n")
if answer.lower() == answers[i]["answers"][0]["answer"].lower():
GTcorrect += 1
# check if last two words of reasoning are the same as the correct answer
elif answer in answers[i]["answers"][0]["answer"].lower() and " ".join(reasoning.split()[-2:]).lower() == answers[i]["answers"][0]["answer"]:
GTcorrect += 1
else:
is_correct = False
# if answer isn't even a word, save it to a file
if not isWord(answer.lower()):
with open("badAnswers.txt", "a") as f:
f.write(answer + "\n")
print("Correct answer: " + answers[i]["answers"][0]["answer"])
print(f"Ground Truth Captions | Answered: {answered}, Correct: {GTcorrect}, Accuracy: {round(GTcorrect / answered * 100, 2)}%")
# unifiedQA
tempContext = "\n".join(question["context"])
if run_model(f"{tempContext}\n\n{tempSituation} {tempQuestion} Let's work through this step-by-step, before coming to the most likely one-word answer.")[0].lower() == answers[i]["answers"][0]["answer"].lower():
unifiedQA += 1
print(f"UnifiedQA | Answered: {answered}, Correct: {unifiedQA}, Accuracy: {round(unifiedQA / answered * 100, 2)}%")
tempContext = "\n".join(question["gtCaptions"])
if run_model(f"{tempContext}\n\n{tempSituation} {tempQuestion} Let's work through this step-by-step, before coming to the most likely one-word answer.")[0].lower() == answers[i]["answers"][0]["answer"].lower():
unifiedQA += 1 | [
"You will be presented with a list of captions describing a room, followed by a question which is set in that room.",
"Captions:\nPLACEHOLDER\n\nWork through this question step-by-step, very quickly explaining reasoning:\nPLACEHOLDER PLACEHOLDER\n\nReasoning:\n1. "
] |
2024-01-10 | fdamken/SimuRLacra | Pyrado~pyrado~environments~mujoco~quanser_qube.py | # Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os.path as osp
from abc import abstractmethod
from typing import Optional
import mujoco_py
import numpy as np
import torch as to
from init_args_serializer import Serializable
import pyrado
from pyrado.environments.mujoco.base import MujocoSimEnv
from pyrado.environments.quanser import MAX_ACT_QQ
from pyrado.spaces.base import Space
from pyrado.spaces.box import BoxSpace
from pyrado.tasks.base import Task
from pyrado.tasks.desired_state import RadiallySymmDesStateTask
from pyrado.tasks.reward_functions import ExpQuadrErrRewFcn
class QQubeMjSim(MujocoSimEnv, Serializable):
def __init__(
self,
frame_skip: int = 4,
dt: Optional[float] = None,
max_steps: int = pyrado.inf,
task_args: Optional[dict] = None,
):
"""
Constructor
:param frame_skip: number of simulation frames for which the same action is held, results in a multiplier of
the time step size `dt`
:param dt: by default the time step size is the one from the mujoco config file multiplied by the number of
frame skips (legacy from OpenAI environments). By passing an explicit `dt` value, this can be
overwritten. Possible use case if if you know that you recorded a trajectory with a specific `dt`.
:param max_steps: max number of simulation time steps
:param task_args: arguments for the task construction
"""
Serializable._init(self, locals())
model_path = osp.join(pyrado.MUJOCO_ASSETS_DIR, "furuta_pendulum.xml")
super().__init__(model_path, frame_skip, dt, max_steps, task_args)
self.camera_config = dict(distance=1.0, lookat=np.array((0.0, 0.0, 0.0)), elevation=-25.0, azimuth=180.0)
@abstractmethod
def _create_task(self, task_args: dict) -> Task:
raise NotImplementedError
@property
def obs_space(self) -> Space:
max_obs = np.array([1.0, 1.0, 1.0, 1.0, 20 * np.pi, 20 * np.pi]) # [-, -, -, -, rad/s, rad/s]
return BoxSpace(
-max_obs, max_obs, labels=["sin_theta", "cos_theta", "sin_alpha", "cos_alpha", "theta_dot", "alpha_dot"]
)
@property
def act_space(self) -> Space:
return BoxSpace(-MAX_ACT_QQ, MAX_ACT_QQ, labels=["V"])
@classmethod
def get_nominal_domain_param(cls) -> dict:
return dict(
gravity_const=9.81, # gravity [m/s**2]
motor_resistance=8.4, # motor resistance [Ohm]
motor_back_emf=0.042, # motor back-emf constant [V*s/rad]
mass_rot_pole=0.095, # rotary arm mass [kg]
length_rot_pole=0.085, # rotary arm length [m]
damping_rot_pole=5e-6, # rotary arm viscous damping [N*m*s/rad], original: 0.0015, identified: 5e-6
mass_pend_pole=0.024, # pendulum link mass [kg]
length_pend_pole=0.129, # pendulum link length [m]
damping_pend_pole=1e-6, # pendulum link viscous damping [N*m*s/rad], original: 0.0005, identified: 1e-6
voltage_thold_neg=0, # min. voltage required to move the servo in negative the direction [V]
voltage_thold_pos=0, # min. voltage required to move the servo in positive the direction [V]
)
def _mujoco_step(self, act: np.ndarray) -> dict:
assert self.act_space.contains(act, verbose=True)
voltage_thold_neg = self.domain_param["voltage_thold_neg"]
voltage_thold_pos = self.domain_param["voltage_thold_pos"]
motor_back_emf = self.domain_param["motor_back_emf"]
motor_resistance = self.domain_param["motor_resistance"]
# Apply a voltage dead zone, i.e. below a certain amplitude the system is will not move.
# This is a very simple model of static friction.
if voltage_thold_neg <= act <= voltage_thold_pos:
act = 0
# Decompose state
_, _, theta_dot, _ = self.state
# Compute the torques for the PD controller and clip them to their max values
torque = (
motor_back_emf * (float(act) - motor_back_emf * theta_dot) / motor_resistance
) # act is a scalar array, causing warning on later np.array construction
# Apply the torques to the robot
self.sim.data.ctrl[:] = torque
# Call MuJoCo
try:
self.sim.step()
mjsim_crashed = False
except mujoco_py.builder.MujocoException:
# When MuJoCo recognized instabilities in the simulation, it simply kills it.
# Instead, we want the episode to end with a failure.
mjsim_crashed = True
qpos = self.sim.data.qpos.copy()
qvel = self.sim.data.qvel.copy()
self.state = np.concatenate([qpos, qvel])
# If state is out of bounds (this is normally checked by the task, but does not work because of the mask)
state_oob = not self.state_space.contains(self.state)
return dict(
qpos=qpos,
qvel=qvel,
failed=mjsim_crashed or state_oob,
)
def observe(self, state: np.ndarray) -> np.ndarray:
return np.array([np.sin(state[0]), np.cos(state[0]), np.sin(state[1]), np.cos(state[1]), state[2], state[3]])
def _adapt_model_file(self, xml_model: str, domain_param: dict) -> str:
length_pend_pole = domain_param["length_pend_pole"]
length_rot_pole = domain_param["length_rot_pole"]
xml_model = xml_model.replace("[0.13-length_pend_pole]", str(0.13 - length_pend_pole))
xml_model = xml_model.replace("[0.0055+length_rot_pole]", str(0.0055 + length_rot_pole))
return super()._adapt_model_file(xml_model, domain_param)
class QQubeSwingUpMjSim(QQubeMjSim):
name: str = "qq-mj-su"
@property
def state_space(self) -> Space:
max_state = np.array([115.0 / 180 * np.pi, 4 * np.pi, 20 * np.pi, 20 * np.pi]) # [rad, rad, rad/s, rad/s]
return BoxSpace(-max_state, max_state, labels=["theta", "alpha", "theta_dot", "alpha_dot"])
@property
def init_space(self) -> Space:
max_init_state = np.array([2.0, 1.0, 0.5, 0.5]) / 180 * np.pi # [rad, rad, rad/s, rad/s]
return BoxSpace(-max_init_state, max_init_state, labels=["theta", "alpha", "theta_dot", "alpha_dot"])
def _create_task(self, task_args: dict) -> Task:
# Define the task including the reward function
state_des = task_args.get("state_des", np.array([0.0, np.pi, 0.0, 0.0]))
Q = task_args.get("Q", np.diag([1.0, 1.0, 2e-2, 5e-3])) # former: [3e-1, 1.0, 2e-2, 5e-3]
R = task_args.get("R", np.diag([4e-3]))
return RadiallySymmDesStateTask(self.spec, state_des, ExpQuadrErrRewFcn(Q, R), idcs=[1])
class QQubeStabMjSim(QQubeMjSim):
name: str = "qq-mj-st"
@property
def state_space(self) -> Space:
max_state = np.array([115.0 / 180 * np.pi, 4 * np.pi, 20 * np.pi, 20 * np.pi]) # [rad, rad, rad/s, rad/s]
return BoxSpace(-max_state, max_state, labels=["theta", "alpha", "theta_dot", "alpha_dot"])
@property
def init_space(self) -> Space:
min_init_state = np.array([-5.0 / 180 * np.pi, 175.0 / 180 * np.pi, 0, 0]) # [rad, rad, rad/s, rad/s]
max_init_state = np.array([5.0 / 180 * np.pi, 185.0 / 180 * np.pi, 0, 0]) # [rad, rad, rad/s, rad/s]
return BoxSpace(min_init_state, max_init_state, labels=["theta", "alpha", "theta_dot", "alpha_dot"])
def _create_task(self, task_args: dict) -> Task:
# Define the task including the reward function
state_des = task_args.get("state_des", np.array([0.0, np.pi, 0.0, 0.0]))
Q = task_args.get("Q", np.diag([3.0, 4.0, 2.0, 2.0]))
R = task_args.get("R", np.diag([5e-2]))
return RadiallySymmDesStateTask(self.spec, state_des, ExpQuadrErrRewFcn(Q, R), idcs=[1])
| [] |
2024-01-10 | MLBazaar/Cardea | cardea~fhir~__init__.py | from cardea.fhir.Account import Account, Account_Coverage, Account_Guarantor
from cardea.fhir.ActivityDefinition import (
ActivityDefinition, ActivityDefinition_DynamicValue, ActivityDefinition_Participant)
from cardea.fhir.Address import Address
from cardea.fhir.AdverseEvent import AdverseEvent, AdverseEvent_SuspectEntity
from cardea.fhir.Age import Age
from cardea.fhir.AllergyIntolerance import AllergyIntolerance, AllergyIntolerance_Reaction
from cardea.fhir.Annotation import Annotation
from cardea.fhir.Appointment import Appointment, Appointment_Participant
from cardea.fhir.AppointmentResponse import AppointmentResponse
from cardea.fhir.Attachment import Attachment
from cardea.fhir.AuditEvent import (
AuditEvent, AuditEvent_Agent, AuditEvent_Detail, AuditEvent_Entity, AuditEvent_Network,
AuditEvent_Source)
from cardea.fhir.BackboneElement import BackboneElement
from cardea.fhir.Basic import Basic
from cardea.fhir.Binary import Binary
from cardea.fhir.BodySite import BodySite
from cardea.fhir.Bundle import (
Bundle, Bundle_Entry, Bundle_Link, Bundle_Request, Bundle_Response, Bundle_Search)
from cardea.fhir.CapabilityStatement import (
CapabilityStatement, CapabilityStatement_Certificate, CapabilityStatement_Document,
CapabilityStatement_Endpoint, CapabilityStatement_Event, CapabilityStatement_Implementation,
CapabilityStatement_Interaction, CapabilityStatement_Interaction1,
CapabilityStatement_Messaging, CapabilityStatement_Operation, CapabilityStatement_Resource,
CapabilityStatement_Rest, CapabilityStatement_SearchParam, CapabilityStatement_Security,
CapabilityStatement_Software, CapabilityStatement_SupportedMessage)
from cardea.fhir.CarePlan import CarePlan, CarePlan_Activity, CarePlan_Detail
from cardea.fhir.CareTeam import CareTeam, CareTeam_Participant
from cardea.fhir.ChargeItem import ChargeItem, ChargeItem_Participant
from cardea.fhir.Claim import (
Claim, Claim_Accident, Claim_CareTeam, Claim_Detail, Claim_Diagnosis, Claim_Information,
Claim_Insurance, Claim_Item, Claim_Payee, Claim_Procedure, Claim_Related, Claim_SubDetail)
from cardea.fhir.ClaimResponse import (
ClaimResponse, ClaimResponse_AddItem, ClaimResponse_Adjudication, ClaimResponse_Detail,
ClaimResponse_Detail1, ClaimResponse_Error, ClaimResponse_Insurance, ClaimResponse_Item,
ClaimResponse_Payment, ClaimResponse_ProcessNote, ClaimResponse_SubDetail)
from cardea.fhir.ClinicalImpression import (
ClinicalImpression, ClinicalImpression_Finding, ClinicalImpression_Investigation)
from cardea.fhir.CodeableConcept import CodeableConcept
from cardea.fhir.CodeSystem import (
CodeSystem, CodeSystem_Concept, CodeSystem_Designation, CodeSystem_Filter, CodeSystem_Property,
CodeSystem_Property1)
from cardea.fhir.Coding import Coding
from cardea.fhir.Communication import Communication, Communication_Payload
from cardea.fhir.CommunicationRequest import (
CommunicationRequest, CommunicationRequest_Payload, CommunicationRequest_Requester)
from cardea.fhir.CompartmentDefinition import CompartmentDefinition, CompartmentDefinition_Resource
from cardea.fhir.Composition import (
Composition, Composition_Attester, Composition_Event, Composition_RelatesTo,
Composition_Section)
from cardea.fhir.ConceptMap import (
ConceptMap, ConceptMap_DependsOn, ConceptMap_Element, ConceptMap_Group, ConceptMap_Target,
ConceptMap_Unmapped)
from cardea.fhir.Condition import Condition, Condition_Evidence, Condition_Stage
from cardea.fhir.Consent import (
Consent, Consent_Actor, Consent_Actor1, Consent_Data, Consent_Data1, Consent_Except,
Consent_Policy)
from cardea.fhir.ContactDetail import ContactDetail
from cardea.fhir.ContactPoint import ContactPoint
from cardea.fhir.Contract import (
Contract, Contract_Agent, Contract_Agent1, Contract_Friendly, Contract_Legal, Contract_Rule,
Contract_Signer, Contract_Term, Contract_ValuedItem, Contract_ValuedItem1)
from cardea.fhir.Contributor import Contributor
from cardea.fhir.Count import Count
from cardea.fhir.Coverage import Coverage, Coverage_Grouping
from cardea.fhir.DataElement import DataElement, DataElement_Mapping
from cardea.fhir.DataRequirement import (
DataRequirement, DataRequirement_CodeFilter, DataRequirement_DateFilter)
from cardea.fhir.DetectedIssue import DetectedIssue, DetectedIssue_Mitigation
from cardea.fhir.Device import Device, Device_Udi
from cardea.fhir.DeviceComponent import DeviceComponent, DeviceComponent_ProductionSpecification
from cardea.fhir.DeviceMetric import DeviceMetric, DeviceMetric_Calibration
from cardea.fhir.DeviceRequest import DeviceRequest, DeviceRequest_Requester
from cardea.fhir.DeviceUseStatement import DeviceUseStatement
from cardea.fhir.DiagnosticReport import (
DiagnosticReport, DiagnosticReport_Image, DiagnosticReport_Performer)
from cardea.fhir.Distance import Distance
from cardea.fhir.DocumentManifest import (
DocumentManifest, DocumentManifest_Content, DocumentManifest_Related)
from cardea.fhir.DocumentReference import (
DocumentReference, DocumentReference_Content, DocumentReference_Context,
DocumentReference_Related, DocumentReference_RelatesTo)
from cardea.fhir.DomainResource import DomainResource
from cardea.fhir.Dosage import Dosage
from cardea.fhir.Duration import Duration
from cardea.fhir.Element import Element
from cardea.fhir.ElementDefinition import (
ElementDefinition, ElementDefinition_Base, ElementDefinition_Binding,
ElementDefinition_Constraint, ElementDefinition_Discriminator, ElementDefinition_Example,
ElementDefinition_Mapping, ElementDefinition_Slicing, ElementDefinition_Type)
from cardea.fhir.EligibilityRequest import EligibilityRequest
from cardea.fhir.EligibilityResponse import (
EligibilityResponse, EligibilityResponse_BenefitBalance, EligibilityResponse_Error,
EligibilityResponse_Financial, EligibilityResponse_Insurance)
from cardea.fhir.Encounter import (
Encounter, Encounter_ClassHistory, Encounter_Diagnosis, Encounter_Hospitalization,
Encounter_Location, Encounter_Participant, Encounter_StatusHistory)
from cardea.fhir.Endpoint import Endpoint
from cardea.fhir.EnrollmentRequest import EnrollmentRequest
from cardea.fhir.EnrollmentResponse import EnrollmentResponse
from cardea.fhir.EpisodeOfCare import (
EpisodeOfCare, EpisodeOfCare_Diagnosis, EpisodeOfCare_StatusHistory)
from cardea.fhir.ExpansionProfile import (
ExpansionProfile, ExpansionProfile_Designation, ExpansionProfile_Designation1,
ExpansionProfile_Designation2, ExpansionProfile_Exclude, ExpansionProfile_ExcludedSystem,
ExpansionProfile_FixedVersion, ExpansionProfile_Include)
from cardea.fhir.ExplanationOfBenefit import (
ExplanationOfBenefit, ExplanationOfBenefit_Accident, ExplanationOfBenefit_AddItem,
ExplanationOfBenefit_Adjudication, ExplanationOfBenefit_BenefitBalance,
ExplanationOfBenefit_CareTeam, ExplanationOfBenefit_Detail, ExplanationOfBenefit_Detail1,
ExplanationOfBenefit_Diagnosis, ExplanationOfBenefit_Financial,
ExplanationOfBenefit_Information, ExplanationOfBenefit_Insurance, ExplanationOfBenefit_Item,
ExplanationOfBenefit_Payee, ExplanationOfBenefit_Payment, ExplanationOfBenefit_Procedure,
ExplanationOfBenefit_ProcessNote, ExplanationOfBenefit_Related, ExplanationOfBenefit_SubDetail)
from cardea.fhir.Extension import Extension
from cardea.fhir.FamilyMemberHistory import FamilyMemberHistory, FamilyMemberHistory_Condition
from cardea.fhir.fhirbase import fhirbase
from cardea.fhir.Flag import Flag
from cardea.fhir.Goal import Goal, Goal_Target
from cardea.fhir.GraphDefinition import (
GraphDefinition, GraphDefinition_Compartment, GraphDefinition_Link, GraphDefinition_Target)
from cardea.fhir.Group import Group, Group_Characteristic, Group_Member
from cardea.fhir.GuidanceResponse import GuidanceResponse
from cardea.fhir.HealthcareService import (
HealthcareService, HealthcareService_AvailableTime, HealthcareService_NotAvailable)
from cardea.fhir.HumanName import HumanName
from cardea.fhir.Identifier import Identifier
from cardea.fhir.ImagingManifest import (
ImagingManifest, ImagingManifest_Instance, ImagingManifest_Series, ImagingManifest_Study)
from cardea.fhir.ImagingStudy import ImagingStudy, ImagingStudy_Instance, ImagingStudy_Series
from cardea.fhir.Immunization import (
Immunization, Immunization_Explanation, Immunization_Practitioner, Immunization_Reaction,
Immunization_VaccinationProtocol)
from cardea.fhir.ImmunizationRecommendation import (
ImmunizationRecommendation, ImmunizationRecommendation_DateCriterion,
ImmunizationRecommendation_Protocol, ImmunizationRecommendation_Recommendation)
from cardea.fhir.ImplementationGuide import (
ImplementationGuide, ImplementationGuide_Dependency, ImplementationGuide_Global,
ImplementationGuide_Package, ImplementationGuide_Page, ImplementationGuide_Resource)
from cardea.fhir.Library import Library
from cardea.fhir.Linkage import Linkage, Linkage_Item
from cardea.fhir.List import List, List_Entry
from cardea.fhir.Location import Location, Location_Position
from cardea.fhir.Measure import (
Measure, Measure_Group, Measure_Population, Measure_Stratifier, Measure_SupplementalData)
from cardea.fhir.MeasureReport import (
MeasureReport, MeasureReport_Group, MeasureReport_Population, MeasureReport_Population1,
MeasureReport_Stratifier, MeasureReport_Stratum)
from cardea.fhir.Media import Media
from cardea.fhir.Medication import (
Medication, Medication_Batch, Medication_Content, Medication_Ingredient, Medication_Package)
from cardea.fhir.MedicationAdministration import (
MedicationAdministration, MedicationAdministration_Dosage, MedicationAdministration_Performer)
from cardea.fhir.MedicationDispense import (
MedicationDispense, MedicationDispense_Performer, MedicationDispense_Substitution)
from cardea.fhir.MedicationRequest import (
MedicationRequest, MedicationRequest_DispenseRequest, MedicationRequest_Requester,
MedicationRequest_Substitution)
from cardea.fhir.MedicationStatement import MedicationStatement
from cardea.fhir.MessageDefinition import (
MessageDefinition, MessageDefinition_AllowedResponse, MessageDefinition_Focus)
from cardea.fhir.MessageHeader import (
MessageHeader, MessageHeader_Destination, MessageHeader_Response, MessageHeader_Source)
from cardea.fhir.Meta import Meta
from cardea.fhir.Money import Money
from cardea.fhir.NamingSystem import NamingSystem, NamingSystem_UniqueId
from cardea.fhir.Narrative import Narrative
from cardea.fhir.NutritionOrder import (
NutritionOrder, NutritionOrder_Administration, NutritionOrder_EnteralFormula,
NutritionOrder_Nutrient, NutritionOrder_OralDiet, NutritionOrder_Supplement,
NutritionOrder_Texture)
from cardea.fhir.Observation import (
Observation, Observation_Component, Observation_ReferenceRange, Observation_Related)
from cardea.fhir.OperationDefinition import (
OperationDefinition, OperationDefinition_Binding, OperationDefinition_Overload,
OperationDefinition_Parameter)
from cardea.fhir.OperationOutcome import OperationOutcome, OperationOutcome_Issue
from cardea.fhir.Organization import Organization, Organization_Contact
from cardea.fhir.ParameterDefinition import ParameterDefinition
from cardea.fhir.Parameters import Parameters, Parameters_Parameter
from cardea.fhir.Patient import (
Patient, Patient_Animal, Patient_Communication, Patient_Contact, Patient_Link)
from cardea.fhir.PaymentNotice import PaymentNotice
from cardea.fhir.PaymentReconciliation import (
PaymentReconciliation, PaymentReconciliation_Detail, PaymentReconciliation_ProcessNote)
from cardea.fhir.Period import Period
from cardea.fhir.Person import Person, Person_Link
from cardea.fhir.PlanDefinition import (
PlanDefinition, PlanDefinition_Action, PlanDefinition_Condition, PlanDefinition_DynamicValue,
PlanDefinition_Goal, PlanDefinition_Participant, PlanDefinition_RelatedAction,
PlanDefinition_Target)
from cardea.fhir.Practitioner import Practitioner, Practitioner_Qualification
from cardea.fhir.PractitionerRole import (
PractitionerRole, PractitionerRole_AvailableTime, PractitionerRole_NotAvailable)
from cardea.fhir.Procedure import Procedure, Procedure_FocalDevice, Procedure_Performer
from cardea.fhir.ProcedureRequest import ProcedureRequest, ProcedureRequest_Requester
from cardea.fhir.ProcessRequest import ProcessRequest, ProcessRequest_Item
from cardea.fhir.ProcessResponse import ProcessResponse, ProcessResponse_ProcessNote
from cardea.fhir.Provenance import Provenance, Provenance_Agent, Provenance_Entity
from cardea.fhir.Quantity import Quantity
from cardea.fhir.Questionnaire import (
Questionnaire, Questionnaire_EnableWhen, Questionnaire_Item, Questionnaire_Option)
from cardea.fhir.QuestionnaireResponse import (
QuestionnaireResponse, QuestionnaireResponse_Answer, QuestionnaireResponse_Item)
from cardea.fhir.Range import Range
from cardea.fhir.Ratio import Ratio
from cardea.fhir.Reference import Reference
from cardea.fhir.ReferralRequest import ReferralRequest, ReferralRequest_Requester
from cardea.fhir.RelatedArtifact import RelatedArtifact
from cardea.fhir.RelatedPerson import RelatedPerson
from cardea.fhir.RequestGroup import (
RequestGroup, RequestGroup_Action, RequestGroup_Condition, RequestGroup_RelatedAction)
from cardea.fhir.ResearchStudy import ResearchStudy, ResearchStudy_Arm
from cardea.fhir.ResearchSubject import ResearchSubject
from cardea.fhir.Resource import Resource
from cardea.fhir.ResourceList import ResourceList
from cardea.fhir.RiskAssessment import RiskAssessment, RiskAssessment_Prediction
from cardea.fhir.SampledData import SampledData
from cardea.fhir.Schedule import Schedule
from cardea.fhir.SearchParameter import SearchParameter, SearchParameter_Component
from cardea.fhir.Sequence import (
Sequence, Sequence_Quality, Sequence_ReferenceSeq, Sequence_Repository, Sequence_Variant)
from cardea.fhir.ServiceDefinition import ServiceDefinition
from cardea.fhir.Signature import Signature
from cardea.fhir.Slot import Slot
from cardea.fhir.Specimen import (
Specimen, Specimen_Collection, Specimen_Container, Specimen_Processing)
from cardea.fhir.StructureDefinition import (
StructureDefinition, StructureDefinition_Differential, StructureDefinition_Mapping,
StructureDefinition_Snapshot)
from cardea.fhir.StructureMap import (
StructureMap, StructureMap_Dependent, StructureMap_Group, StructureMap_Input,
StructureMap_Parameter, StructureMap_Rule, StructureMap_Source, StructureMap_Structure,
StructureMap_Target)
from cardea.fhir.Subscription import Subscription, Subscription_Channel
from cardea.fhir.Substance import Substance, Substance_Ingredient, Substance_Instance
from cardea.fhir.SupplyDelivery import SupplyDelivery, SupplyDelivery_SuppliedItem
from cardea.fhir.SupplyRequest import (
SupplyRequest, SupplyRequest_OrderedItem, SupplyRequest_Requester)
from cardea.fhir.Task import Task, Task_Input, Task_Output, Task_Requester, Task_Restriction
from cardea.fhir.TestReport import (
TestReport, TestReport_Action, TestReport_Action1, TestReport_Action2, TestReport_Assert,
TestReport_Operation, TestReport_Participant, TestReport_Setup, TestReport_Teardown,
TestReport_Test)
from cardea.fhir.TestScript import (
TestScript, TestScript_Action, TestScript_Action1, TestScript_Action2, TestScript_Assert,
TestScript_Capability, TestScript_Destination, TestScript_Fixture, TestScript_Link,
TestScript_Metadata, TestScript_Operation, TestScript_Origin, TestScript_Param,
TestScript_Param1, TestScript_Param2, TestScript_Param3, TestScript_RequestHeader,
TestScript_Rule, TestScript_Rule1, TestScript_Rule2, TestScript_Rule3, TestScript_Ruleset,
TestScript_Ruleset1, TestScript_Setup, TestScript_Teardown, TestScript_Test,
TestScript_Variable)
from cardea.fhir.Timing import Timing, Timing_Repeat
from cardea.fhir.TriggerDefinition import TriggerDefinition
from cardea.fhir.UsageContext import UsageContext
from cardea.fhir.ValueSet import (
ValueSet, ValueSet_Compose, ValueSet_Concept, ValueSet_Contains, ValueSet_Designation,
ValueSet_Expansion, ValueSet_Filter, ValueSet_Include, ValueSet_Parameter)
from cardea.fhir.VisionPrescription import VisionPrescription, VisionPrescription_Dispense
__all__ = (
"fhirbase",
"Account",
"ActivityDefinition",
"Address",
"AdverseEvent",
"Age",
"AllergyIntolerance",
"Annotation",
"Appointment",
"AppointmentResponse",
"Attachment",
"AuditEvent",
"BackboneElement",
"Basic",
"Binary",
"BodySite",
"Bundle",
"CapabilityStatement",
"CarePlan",
"CareTeam",
"ChargeItem",
"Claim",
"ClaimResponse",
"ClinicalImpression",
"CodeableConcept",
"CodeSystem",
"Coding",
"Communication",
"CommunicationRequest",
"CompartmentDefinition",
"Composition",
"ConceptMap",
"Condition",
"Consent",
"ContactDetail",
"ContactPoint",
"Contract",
"Contributor",
"Count",
"Coverage",
"DataElement",
"DataRequirement",
"DetectedIssue",
"Device",
"DeviceComponent",
"DeviceMetric",
"DeviceRequest",
"DeviceUseStatement",
"DiagnosticReport",
"Distance",
"DocumentManifest",
"DocumentReference",
"DomainResource",
"Dosage",
"Duration",
"Element",
"ElementDefinition",
"EligibilityRequest",
"EligibilityResponse",
"Encounter",
"Endpoint",
"EnrollmentRequest",
"EnrollmentResponse",
"EpisodeOfCare",
"ExpansionProfile",
"ExplanationOfBenefit",
"Extension",
"FamilyMemberHistory",
"Flag",
"Goal",
"GraphDefinition",
"Group",
"GuidanceResponse",
"HealthcareService",
"HumanName",
"Identifier",
"ImagingManifest",
"ImagingStudy",
"Immunization",
"ImmunizationRecommendation",
"ImplementationGuide",
"Library",
"Linkage",
"List",
"Location",
"Measure",
"MeasureReport",
"Media",
"Medication",
"MedicationAdministration",
"MedicationDispense",
"MedicationRequest",
"MedicationStatement",
"MessageDefinition",
"MessageHeader",
"Meta",
"Money",
"NamingSystem",
"Narrative",
"NutritionOrder",
"Observation",
"OperationDefinition",
"OperationOutcome",
"Organization",
"ParameterDefinition",
"Parameters",
"Patient",
"PaymentNotice",
"PaymentReconciliation",
"Period",
"Person",
"PlanDefinition",
"Practitioner",
"PractitionerRole",
"Procedure",
"ProcedureRequest",
"ProcessRequest",
"ProcessResponse",
"Provenance",
"Quantity",
"Questionnaire",
"QuestionnaireResponse",
"Range",
"Ratio",
"Reference",
"ReferralRequest",
"RelatedArtifact",
"RelatedPerson",
"RequestGroup",
"ResearchStudy",
"ResearchSubject",
"Resource",
"ResourceList",
"RiskAssessment",
"SampledData",
"Schedule",
"SearchParameter",
"Sequence",
"ServiceDefinition",
"Signature",
"Slot",
"Specimen",
"StructureDefinition",
"StructureMap",
"Subscription",
"Substance",
"SupplyDelivery",
"SupplyRequest",
"Task",
"TestReport",
"TestScript",
"Timing",
"TriggerDefinition",
"UsageContext",
"ValueSet",
"VisionPrescription"
)
| [] |
2024-01-10 | c0sogi/LLMChat | app~utils~chat~messages~handler.py | """A module for handling messages from Frontend"""
import asyncio
from contextlib import asynccontextmanager
from copy import deepcopy
from inspect import iscoroutinefunction
from typing import (
Any,
Awaitable,
Callable,
Optional,
TypeVar,
)
from uuid import uuid4
import aiohttp
from fastapi import WebSocket
from fastapi.concurrency import run_in_threadpool
from openai import OpenAIError
from app.common.config import ChatConfig, config
from app.errors.chat_exceptions import (
ChatException,
ChatFunctionCallException,
ChatInterruptedException,
ChatModelNotImplementedException,
ChatTextGenerationException,
ChatTooMuchTokenException,
)
from app.models.base_models import (
MessageHistory,
StreamProgress,
SummarizedResult,
)
from app.models.chat_models import ChatRoles, ResponseType, UserChatContext
from app.models.function_calling.functions import FunctionCalls
from app.models.llms import ExllamaModel, LlamaCppModel, LLMModel, OpenAIModel
from app.utils.chat.buffer import BufferedUserContext
from app.utils.chat.managers.cache import CacheManager
from app.utils.chat.managers.message import MessageManager
from app.utils.chat.managers.websocket import SendToWebsocket
from app.utils.chat.text_generations.completion_api import (
agenerate_from_chat_completion_api,
agenerate_from_text_completion_api,
)
from app.utils.chat.text_generations.summarization import get_summarization
from app.utils.date_utils import UTC
from app.utils.function_calling.callbacks.translate import translate_callback
from app.utils.logger import ApiLogger
from app.utils.types import arguments_provider
T = TypeVar("T")
async def _save_function_call_result(
buffer: BufferedUserContext,
function_call_name: str,
function_call_result: str,
update_cache: bool = True,
) -> None:
"""Save function call result to both buffer and database"""
if update_cache:
await MessageManager.add_message_history_safely(
user_chat_context=buffer.current_user_chat_context,
content=function_call_result,
role=ChatRoles.SYSTEM,
custom_role=f"function: {function_call_name}",
)
else:
buffer.current_system_message_histories.append(
MessageHistory(
role=f"function: {function_call_name}",
content=function_call_result,
tokens=buffer.current_user_chat_context.get_tokens_of(
function_call_result
),
actual_role=ChatRoles.SYSTEM.value,
)
)
@asynccontextmanager
async def _ai_context_manager(
buffer: BufferedUserContext,
model: LLMModel,
stream_progress: StreamProgress,
):
backup_context: UserChatContext = deepcopy(
buffer.current_user_chat_context
)
chat_text_generator_error: Optional[ChatTextGenerationException] = None
try:
# Hand shake with frontend
await SendToWebsocket.message(
websocket=buffer.websocket,
msg=None,
chat_room_id=buffer.current_chat_room_id,
finish=False,
model_name=model.name,
uuid=stream_progress.uuid,
)
yield
except ChatInterruptedException:
# Raise ChatInterruptedException to stop the stream,
# with generated text so far.
# The EOS will be sent to frontend in `finally` block
buffer.done.clear()
raise ChatInterruptedException(
msg=stream_progress.response + stream_progress.buffer
)
except ChatFunctionCallException as function_call_exception:
# Handle function call exception
await MessageHandler.function_call(
callback_name=function_call_exception.func_name,
callback_kwargs=function_call_exception.func_kwargs,
buffer=buffer,
)
except ChatException as chat_exception:
chat_text_generator_error = ChatTextGenerationException(
msg=chat_exception.msg
)
except OpenAIError as e:
raise ChatTextGenerationException(msg=str(e))
except (
aiohttp.ServerTimeoutError,
aiohttp.ClientPayloadError,
asyncio.TimeoutError,
):
pass
except Exception as exception:
ApiLogger.cerror(
f"unexpected chat exception: {exception}", exc_info=True
)
chat_text_generator_error = ChatTextGenerationException(
msg="Unknown error"
)
finally:
if chat_text_generator_error is not None:
# Restore context, and send error message to frontend
buffer.current_user_chat_context.copy_from(backup_context)
await SendToWebsocket.message(
websocket=buffer.websocket,
msg=(
"\n\nAn error occurred while generating text: "
f"**{chat_text_generator_error.msg}**"
),
chat_room_id=buffer.current_chat_room_id,
finish=True,
model_name=buffer.current_user_chat_context.llm_model.value.name,
)
else:
# Send EOS to frontend
await SendToWebsocket.message(
websocket=buffer.websocket,
msg=stream_progress.buffer,
chat_room_id=buffer.current_chat_room_id,
finish=True,
model_name=model.name,
wait_next_query=False,
)
async def _ai_stream(
model: LLMModel,
buffer: BufferedUserContext,
stream_progress: StreamProgress,
) -> None:
if isinstance(model, OpenAIModel):
stream_func = agenerate_from_chat_completion_api
elif isinstance(model, (LlamaCppModel, ExllamaModel)):
if config.is_llama_available and config.llama_completion_url:
# Use llama_cpp API
if "/v1/chat/completions" in config.llama_completion_url:
stream_func = agenerate_from_chat_completion_api
elif "/v1/completions" in config.llama_completion_url:
stream_func = agenerate_from_text_completion_api
else:
raise ChatModelNotImplementedException(
msg=f"Model {model.name} not implemented."
)
else:
if config.is_llama_booting:
raise ChatModelNotImplementedException(
msg=f"Model {model.name} is currently booting."
)
raise ChatModelNotImplementedException(
msg=f"Model {model.name} not available."
)
else:
raise ChatModelNotImplementedException(
msg=f"Model {buffer.current_user_chat_context.llm_model.value.name} not implemented."
)
await SendToWebsocket.stream(
buffer=buffer,
stream_func=stream_func,
stream_progress=stream_progress,
)
async def _ai_summarization(buffer: BufferedUserContext) -> None:
if (
ChatConfig.summarize_for_chat
and buffer.current_ai_message_histories[-1].tokens
> ChatConfig.summarization_threshold
):
buffer.task_list.append(
asyncio.create_task(
summarization_task(
user_id=buffer.user_id,
chat_room_id=buffer.current_chat_room_id,
role="ai",
to_summarize=buffer.current_ai_message_histories[
-1
].content,
message_history_uuid=buffer.current_ai_message_histories[
-1
].uuid,
)
)
)
async def _command_interpreter(
callback_name: str,
callback_args: list[str],
callback_kwargs: dict[str, Any],
callback_finder: Callable[[str], Callable],
buffer: BufferedUserContext,
) -> Optional[Any]:
"""Interpret command, which is found by callback_finder,
and call corresponding callback function with arguments provided by
callback_args and callback_kwargs, and handle the callback response.
Finally, return the callback response."""
if callback_name.startswith("_"):
await SendToWebsocket.message(
websocket=buffer.websocket,
msg="Command name cannot start with '_'",
chat_room_id=buffer.current_chat_room_id,
)
callback_response, response_type = None, ResponseType.DO_NOTHING
else:
callback_response, response_type = await _get_command_response(
callback=callback_finder(callback_name),
callback_args=callback_args,
callback_kwargs=callback_kwargs,
buffer=buffer,
)
if response_type is ResponseType.DO_NOTHING:
return callback_response
elif response_type is ResponseType.HANDLE_AI:
await MessageHandler.ai(buffer=buffer)
return callback_response
elif response_type is ResponseType.HANDLE_USER:
await MessageHandler.user(
msg=callback_response or "",
buffer=buffer,
)
return callback_response
elif response_type is ResponseType.HANDLE_BOTH:
await MessageHandler.user(
msg=callback_response or "",
buffer=buffer,
)
await MessageHandler.ai(buffer=buffer)
return callback_response
elif response_type is ResponseType.REPEAT_COMMAND and callback_response:
splitted: list[str] = callback_response.split(" ")
await _command_interpreter(
callback_name=splitted[0][1:]
if splitted[0].startswith("/")
else splitted[0],
callback_args=splitted[1:],
callback_kwargs=callback_kwargs,
callback_finder=callback_finder,
buffer=buffer,
)
return None
@asynccontextmanager
async def _function_call_context_manager(buffer: BufferedUserContext):
# Do not invoke function call again
buffer.optional_info["function_call"] = "none"
yield
# Remove function call messages when all function calls are done
buffer.current_system_message_histories[:] = [
system_message_history
for system_message_history in buffer.current_system_message_histories
if not system_message_history.role.startswith("function:")
]
# Restore original function call
buffer.optional_info["functions"] = None
buffer.optional_info["function_call"] = None
async def _get_command_response(
callback: Callable,
callback_args: list[str],
callback_kwargs: dict[str, Any],
buffer: BufferedUserContext,
) -> tuple[Any, ResponseType]:
try:
args_to_pass, kwargs_to_pass = arguments_provider(
func=callback,
available_args=callback_args,
available_annotated={
UserChatContext: buffer.current_user_chat_context,
WebSocket: buffer.websocket,
BufferedUserContext: buffer,
},
available_kwargs=callback_kwargs,
)
except Exception as e:
ApiLogger.cerror(f"Error in arguments_provider: {e}", exc_info=True)
if isinstance(e, (ValueError, TypeError, IndexError)):
error_msg = str(e)
else:
error_msg = "Unexpected error while parsing arguments!"
await SendToWebsocket.message(
websocket=buffer.websocket,
msg=error_msg,
chat_room_id=buffer.current_chat_room_id,
)
return None, ResponseType.DO_NOTHING
else:
if iscoroutinefunction(callback): # if callback is coroutine function
callback_response = await callback(*args_to_pass, **kwargs_to_pass)
else:
callback_response = await run_in_threadpool(
callback, *args_to_pass, **kwargs_to_pass
)
if (
isinstance(callback_response, tuple)
and len(callback_response) == 2
):
callback_response, response_type = callback_response
if response_type is ResponseType.SEND_MESSAGE_AND_STOP:
await SendToWebsocket.message(
websocket=buffer.websocket,
msg=callback_response,
chat_room_id=buffer.current_chat_room_id,
)
return callback_response, ResponseType.DO_NOTHING
return callback_response, response_type
else:
return callback_response, ResponseType.DO_NOTHING
async def _interruption_event_watcher(
coro_or_future: Awaitable[T],
event: asyncio.Event,
hold_interruption_event: Optional[asyncio.Event] = None,
) -> T:
"""Watch interruption event, and raise ChatInterruptedException if it's set
while waiting for coro_or_future to complete. If hold_interruption_event is set,
ChatInterruptedException will not be raised until hold_interruption_event is unset.
"""
async def monitoring_events(): # Monitor interruption event
while True:
await event.wait() # Wait for interruption event
if (
hold_interruption_event is None
or not hold_interruption_event.is_set()
):
# If hold_interruption_event is not given, or it's not set,
# raise ChatInterruptedException immediately
return
# If hold_interruption_event is set, loop until it's unset
await asyncio.sleep(0.1)
future = asyncio.ensure_future(coro_or_future)
done, pending = await asyncio.wait(
[
future,
asyncio.ensure_future(monitoring_events()),
],
return_when=asyncio.FIRST_COMPLETED, # Return after the first one completes
)
for pending_task in pending:
pending_task.cancel() # Cancel the other one
for done_task in done:
if (
done_task is future
): # If coro_or_future completes first, return its result
return done_task.result() # type: ignore
raise ChatInterruptedException() # If interruption event completes first, raise ChatInterruptedException
async def _user_summarization(buffer: BufferedUserContext) -> None:
if (
ChatConfig.summarize_for_chat
and buffer.current_user_message_histories[-1].tokens
> ChatConfig.summarization_threshold
):
buffer.task_list.append(
asyncio.create_task(
summarization_task(
user_id=buffer.user_id,
chat_room_id=buffer.current_chat_room_id,
role="user",
to_summarize=buffer.current_user_message_histories[
-1
].content,
message_history_uuid=buffer.current_user_message_histories[
-1
].uuid,
)
)
)
async def summarization_task(
user_id: str,
chat_room_id: str,
role: str,
to_summarize: str,
message_history_uuid: str,
) -> SummarizedResult: # =
return SummarizedResult(
user_id=user_id,
chat_room_id=chat_room_id,
role=role,
content=await run_in_threadpool(
get_summarization, to_summarize=to_summarize
),
uuid=message_history_uuid,
)
class MessageHandler:
@staticmethod
async def user(
msg: str,
buffer: BufferedUserContext,
use_tight_token_limit: bool = True,
) -> None:
"""Handle user message, including translation"""
if (
not buffer.current_user_message_histories
and UTC.check_string_valid(buffer.current_chat_room_name)
):
buffer.current_chat_room_name = msg[:20]
await CacheManager.update_profile(
user_chat_profile=buffer.current_user_chat_profile,
)
await SendToWebsocket.init(
buffer=buffer, send_chat_rooms=True, wait_next_query=True
)
translate: Optional[str] = buffer.optional_info.get("translate")
if translate: # if user message is translated
translate_chain_result: Optional[str] = await translate_callback(
buffer=buffer,
query=msg,
finish=False,
wait_next_query=False,
show_result=True,
src_lang=translate,
trg_lang="en",
)
if translate_chain_result is not None:
msg = translate_chain_result
user_token: int = buffer.current_user_chat_context.get_tokens_of(msg)
current_llm_model: LLMModel = buffer.current_llm_model.value
token_limit: int = (
current_llm_model.max_tokens_per_request
if use_tight_token_limit
else current_llm_model.max_total_tokens
- ChatConfig.extra_token_margin
)
if user_token > token_limit: # if user message is too long
raise ChatTooMuchTokenException(
msg=f"Message too long. Now {user_token} tokens, "
f"but {token_limit} tokens allowed."
)
await MessageManager.add_message_history_safely(
user_chat_context=buffer.current_user_chat_context,
content=msg,
role=ChatRoles.USER,
uuid=buffer.optional_info.get("uuid"),
calculated_tokens_to_use=user_token
+ current_llm_model.token_margin,
)
await _user_summarization(buffer=buffer)
@classmethod
async def ai(
cls,
buffer: BufferedUserContext,
model: Optional[LLMModel] = None,
) -> None:
"""Handle ai message, including text generation and translation"""
stream_progress = StreamProgress(uuid=uuid4().hex)
if model is None:
model = buffer.current_llm_model.value
async with _ai_context_manager(
buffer=buffer, model=model, stream_progress=stream_progress
):
await _interruption_event_watcher(
_ai_stream(
model=model, buffer=buffer, stream_progress=stream_progress
),
event=buffer.done,
)
await MessageManager.add_message_history_safely(
user_chat_context=buffer.current_user_chat_context,
content=stream_progress.response,
role=ChatRoles.AI,
uuid=stream_progress.uuid,
)
await _ai_summarization(buffer=buffer)
translate: Optional[str] = buffer.optional_info.get("translate")
if translate:
await translate_callback(
buffer=buffer,
query=stream_progress.response,
finish=True,
wait_next_query=False,
show_result=True,
src_lang="en",
trg_lang=translate,
)
@classmethod
async def command(
cls,
callback_name: str,
callback_args: list[str],
callback_finder: Callable[[str], Callable],
buffer: BufferedUserContext,
) -> None:
await _command_interpreter(
callback_name=callback_name,
callback_args=callback_args,
callback_kwargs=dict(buffer.optional_info),
callback_finder=callback_finder,
buffer=buffer,
)
@classmethod
async def function_call(
cls,
callback_name: str,
callback_kwargs: dict[str, Any],
buffer: BufferedUserContext,
) -> None:
print(
f"- DEBUG: Handling function call {callback_name} with {callback_kwargs}",
flush=True,
)
async with _function_call_context_manager(buffer=buffer):
function_call_callback_result: Optional[
Any
] = await _command_interpreter(
callback_name=callback_name,
callback_args=[],
callback_kwargs=callback_kwargs,
callback_finder=FunctionCalls.get_function,
buffer=buffer,
)
if function_call_callback_result:
await _save_function_call_result(
buffer=buffer,
function_call_name=callback_name,
function_call_result=str(function_call_callback_result),
update_cache=False,
)
await MessageHandler.ai(buffer=buffer)
| [] |
2024-01-10 | c0sogi/LLMChat | app~shared.py | from concurrent.futures import ProcessPoolExecutor
from dataclasses import dataclass, field
from multiprocessing import Event as ProcessEvent
from multiprocessing import Manager, Process
from multiprocessing.managers import SyncManager
from multiprocessing.synchronize import Event as ProcessEventClass
from threading import Event as ThreadEvent
from threading import Thread
from typing import Optional
from langchain.chains.combine_documents.map_reduce import (
MapReduceDocumentsChain,
)
from langchain.chains.combine_documents.stuff import StuffDocumentsChain
from langchain.chains.summarize import load_summarize_chain, stuff_prompt
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.embeddings.base import Embeddings
from langchain.utilities import SearxSearchWrapper
from app.common.config import (
OPENAI_API_KEY,
ChatConfig,
SingletonMetaClass,
config,
)
from app.common.constants import SummarizationTemplates
from app.utils.langchain.embeddings_api import APIEmbeddings
from app.utils.langchain.token_text_splitter import CustomTokenTextSplitter
from app.utils.langchain.web_search import DuckDuckGoSearchAPIWrapper
@dataclass
class Shared(metaclass=SingletonMetaClass):
openai_embeddings: OpenAIEmbeddings = field(init=False)
local_embeddings: Optional[APIEmbeddings] = field(init=False)
map_reduce_summarize_chain: MapReduceDocumentsChain = field(init=False)
stuff_summarize_chain: StuffDocumentsChain = field(init=False)
token_text_splitter: CustomTokenTextSplitter = field(
default_factory=lambda: CustomTokenTextSplitter(
encoding_name="cl100k_base"
)
)
searx: SearxSearchWrapper = field(
default_factory=lambda: SearxSearchWrapper(
searx_host="http://localhost:8080"
)
)
duckduckgo: DuckDuckGoSearchAPIWrapper = field(
default_factory=lambda: DuckDuckGoSearchAPIWrapper()
)
def __post_init__(self):
self.openai_embeddings = OpenAIEmbeddings(
client=None,
openai_api_key=OPENAI_API_KEY,
)
if config.llama_embedding_url and ChatConfig.local_embedding_model:
self.local_embeddings = APIEmbeddings(
client=None,
model=ChatConfig.local_embedding_model,
embedding_api_url=config.llama_embedding_url,
)
else:
self.local_embeddings = None
self._process_manager = None
self._process_pool_executor = None
self._process = None
self._process_terminate_signal = None
self._thread = None
self._thread_terminate_signal = None
self.llm = ChatOpenAI(
client=None,
model=ChatConfig.global_openai_model,
openai_api_key=OPENAI_API_KEY,
streaming=False,
)
self.map_reduce_summarize_chain = load_summarize_chain( # type: ignore
self.llm,
chain_type="map_reduce",
map_prompt=stuff_prompt.PROMPT,
combine_prompt=SummarizationTemplates.TEXT__MARKUP,
verbose=config.debug,
)
self.stuff_summarize_chain = load_summarize_chain( # type: ignore
self.llm,
chain_type="stuff",
prompt=SummarizationTemplates.TEXT__MARKUP,
verbose=config.debug,
)
@property
def process_manager(self) -> SyncManager:
if not self._process_manager:
self._process_manager = Manager()
return self._process_manager
@property
def process_pool_executor(self) -> ProcessPoolExecutor:
if not self._process_pool_executor:
self._process_pool_executor = ProcessPoolExecutor()
return self._process_pool_executor
@process_pool_executor.setter
def process_pool_executor(self, value: ProcessPoolExecutor) -> None:
self._process_pool_executor = value
@property
def process(self) -> Process:
if not self._process:
self._process = Process()
return self._process
@process.setter
def process(self, value: Process) -> None:
self._process = value
@property
def thread(self) -> Thread:
if not self._thread:
self._thread = Thread()
return self._thread
@thread.setter
def thread(self, value: Thread) -> None:
self._thread = value
@property
def process_terminate_signal(self) -> ProcessEventClass:
if not self._process_terminate_signal:
self._process_terminate_signal = ProcessEvent()
return self._process_terminate_signal
@property
def thread_terminate_signal(self) -> ThreadEvent:
if not self._thread_terminate_signal:
self._thread_terminate_signal = ThreadEvent()
return self._thread_terminate_signal
@property
def embeddings(self) -> Embeddings:
if self.local_embeddings:
print("Using local embeddings")
return self.local_embeddings
print("Using openai embeddings")
return self.openai_embeddings
| [] |
2024-01-10 | c0sogi/LLMChat | app~utils~langchain~structured_tool.py | """Base implementation for tools or skills."""
from __future__ import annotations
from inspect import signature, iscoroutinefunction
from typing import Any, Awaitable, Callable, Optional, Type
from langchain.callbacks.manager import (
AsyncCallbackManagerForToolRun,
CallbackManagerForToolRun,
)
from langchain.tools.base import BaseTool, create_schema_from_function
from pydantic import (
BaseModel,
Field,
)
class StructuredTool(BaseTool):
"""Tool that can operate on any number of inputs."""
description: str = ""
args_schema: Type[BaseModel] = Field(..., description="The tool schema.")
"""The input arguments' schema."""
func: Optional[Callable[..., Any]] = None
"""The function to run when the tool is called."""
coroutine: Optional[Callable[..., Awaitable[Any]]] = None
"""The asynchronous version of the function."""
@property
def args(self) -> dict:
"""The tool's input arguments."""
return self.args_schema.schema()["properties"]
def _run(
self,
*args: Any,
run_manager: Optional[CallbackManagerForToolRun] = None,
**kwargs: Any,
) -> Any:
"""Use the tool."""
if self.func:
new_argument_supported = signature(self.func).parameters.get("callbacks")
return (
self.func(
*args,
callbacks=run_manager.get_child() if run_manager else None,
**kwargs,
)
if new_argument_supported
else self.func(*args, **kwargs)
)
raise NotImplementedError("Tool does not support sync")
async def _arun(
self,
*args: Any,
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
**kwargs: Any,
) -> str:
"""Use the tool asynchronously."""
if self.coroutine:
new_argument_supported = signature(self.coroutine).parameters.get(
"callbacks"
)
return (
await self.coroutine(
*args,
callbacks=run_manager.get_child() if run_manager else None,
**kwargs,
)
if new_argument_supported
else await self.coroutine(*args, **kwargs)
)
return self._run(*args, **kwargs)
@classmethod
def from_function(
cls,
func: Callable,
name: Optional[str] = None,
description: Optional[str] = None,
return_direct: bool = False,
args_schema: Optional[Type[BaseModel]] = None,
infer_schema: bool = True,
**kwargs: Any,
) -> StructuredTool:
name = name or func.__name__
description = description or func.__doc__
assert (
description is not None
), "Function must have a docstring if description not provided."
# Description example:
# search_api(query: str) - Searches the API for the query.
description = f"{name}{signature(func)} - {description.strip()}"
_args_schema = args_schema
if _args_schema is None and infer_schema:
_args_schema = create_schema_from_function(f"{name}Schema", func)
is_coroutine_function = iscoroutinefunction(func)
return cls(
name=name,
func=func if not is_coroutine_function else None,
coroutine=func if is_coroutine_function else None,
args_schema=_args_schema,
description=description,
return_direct=return_direct,
**kwargs,
)
| [] |
2024-01-10 | c0sogi/LLMChat | app~utils~langchain~embeddings_api.py | """Wrapper around embedding API models."""
from __future__ import annotations
import logging
from typing import (
Any,
List,
Optional,
Tuple,
Union,
)
import requests
from pydantic import BaseModel, Extra
from langchain.embeddings.base import Embeddings
logger = logging.getLogger(__name__)
class APIEmbeddings(BaseModel, Embeddings):
"""Wrapper around embedding models from OpenAI-style API"""
client: Any #: :meta private:
model: str = "intfloat/e5-large-v2"
embedding_api_url: str = "http://localhost:8002/v1/embeddings"
request_timeout: Optional[Union[float, Tuple[float, float]]] = None
"""Timeout in seconds for the API request."""
headers: Any = None
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to embedding endpoint for embedding search docs.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
# NOTE: to keep things simple, we assume the list may contain texts longer
# than the maximum context and use length-safe embedding function.
response = requests.post(
self.embedding_api_url,
json={
"model": self.model,
"input": texts,
},
)
# Check if the request was successful
if response.status_code == 200:
# Parse the response
response_data = response.json()
# Extract the embeddings and total tokens
embeddings: list[list[float]] = [
data["embedding"] for data in response_data["data"]
]
total_tokens = response_data["usage"]["total_tokens"]
return embeddings
raise ConnectionError(
f"Request to {self.embedding_api_url} failed with status code "
f"{response.status_code}."
)
def embed_query(self, text: str) -> List[float]:
"""Call out to OpenAI's embedding endpoint for embedding query text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
response = requests.post(
self.embedding_api_url,
json={
"model": self.model,
"input": text,
},
)
# Check if the request was successful
if response.status_code == 200:
# Parse the response
response_data = response.json()
# Extract the embeddings and total tokens
embeddings: list[list[float]] = [
data["embedding"] for data in response_data["data"]
]
total_tokens = response_data["usage"]["total_tokens"]
return embeddings[0]
raise ConnectionError(
f"Request to {self.embedding_api_url} failed with status code "
f"{response.status_code}."
)
if __name__ == "__main__":
import argparse
import json
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="intfloat/e5-large-v2")
parser.add_argument(
"--embedding_api_url", type=str, default="http://localhost:8002/v1/embeddings"
)
parser.add_argument("--request_timeout", type=float, default=None)
parser.add_argument("--headers", type=str, default=None)
parser.add_argument("--text", type=str, default="Hello, world!")
args = parser.parse_args()
print(args)
# Create the API embeddings model
api_embeddings = APIEmbeddings(
client=None,
model=args.model,
embedding_api_url=args.embedding_api_url,
request_timeout=args.request_timeout,
headers=args.headers,
)
# Embed the query
query_embedding = api_embeddings.embed_query(args.text)
# Print the query embedding
print(json.dumps(query_embedding))
| [] |
2024-01-10 | c0sogi/LLMChat | app~utils~langchain~web_search.py | import json
from typing import Any, Dict, List, Optional
import aiohttp
import requests
from langchain.utils import get_from_dict_or_env
from pydantic import (
BaseModel,
Extra,
Field,
PrivateAttr,
root_validator,
validator,
)
from app.utils.api.duckduckgo import DDGS
def _get_default_params() -> dict:
return {"language": "en", "format": "json"}
class SearxResults(dict):
"""Dict like wrapper around search api results."""
_data = ""
def __init__(self, data: str):
"""Take a raw result from Searx and make it into a dict like object."""
json_data = json.loads(data)
super().__init__(json_data)
self.__dict__ = self
def __str__(self) -> str:
"""Text representation of searx result."""
return self._data
@property
def results(self) -> Any:
"""Silence mypy for accessing this field.
:meta private:
"""
return self.get("results")
@property
def answers(self) -> Any:
"""Helper accessor on the json result."""
return self.get("answers")
class SearxSearchWrapper(BaseModel):
"""Wrapper for Searx API.
To use you need to provide the searx host by passing the named parameter
``searx_host`` or exporting the environment variable ``SEARX_HOST``.
In some situations you might want to disable SSL verification, for example
if you are running searx locally. You can do this by passing the named parameter
``unsecure``. You can also pass the host url scheme as ``http`` to disable SSL.
Example:
.. code-block:: python
from langchain.utilities import SearxSearchWrapper
searx = SearxSearchWrapper(searx_host="http://localhost:8888")
Example with SSL disabled:
.. code-block:: python
from langchain.utilities import SearxSearchWrapper
# note the unsecure parameter is not needed if you pass the url scheme as
# http
searx = SearxSearchWrapper(searx_host="http://localhost:8888",
unsecure=True)
"""
_result: SearxResults = PrivateAttr()
searx_host: str = ""
unsecure: bool = False
params: dict = Field(default_factory=_get_default_params)
headers: Optional[dict] = None
engines: Optional[List[str]] = []
categories: Optional[List[str]] = []
query_suffix: Optional[str] = ""
k: int = 10
aiosession: Optional[Any] = None
@validator("unsecure")
def disable_ssl_warnings(cls, v: bool) -> bool:
"""Disable SSL warnings."""
if v:
# requests.urllib3.disable_warnings()
try:
import urllib3
urllib3.disable_warnings()
except ImportError as e:
print(e)
return v
@root_validator()
def validate_params(cls, values: Dict) -> Dict:
"""Validate that custom searx params are merged with default ones."""
user_params = values["params"]
default = _get_default_params()
values["params"] = {**default, **user_params}
engines = values.get("engines")
if engines:
values["params"]["engines"] = ",".join(engines)
categories = values.get("categories")
if categories:
values["params"]["categories"] = ",".join(categories)
searx_host = get_from_dict_or_env(values, "searx_host", "SEARX_HOST")
if not searx_host.startswith("http"):
print(
f"Warning: missing the url scheme on host \
! assuming secure https://{searx_host} "
)
searx_host = "https://" + searx_host
elif searx_host.startswith("http://"):
values["unsecure"] = True
cls.disable_ssl_warnings(True)
values["searx_host"] = searx_host
return values
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def _searx_api_query(self, params: dict) -> SearxResults:
"""Actual request to searx API."""
raw_result = requests.get(
self.searx_host,
headers=self.headers,
params=params,
verify=not self.unsecure,
)
# test if http result is ok
if not raw_result.ok:
raise ValueError("Searx API returned an error: ", raw_result.text)
res = SearxResults(raw_result.text)
self._result = res
return res
async def _asearx_api_query(self, params: dict) -> SearxResults:
if not self.aiosession:
async with aiohttp.ClientSession() as session:
async with session.get(
self.searx_host,
headers=self.headers,
params=params,
ssl=(lambda: False if self.unsecure else None)(),
) as response:
if not response.ok:
raise ValueError(
"Searx API returned an error: ", response.text
)
result = SearxResults(await response.text())
self._result = result
else:
async with self.aiosession.get(
self.searx_host,
headers=self.headers,
params=params,
verify=not self.unsecure,
) as response:
if not response.ok:
raise ValueError(
"Searx API returned an error: ", response.text
)
result = SearxResults(await response.text())
self._result = result
return result
def run(
self,
query: str,
engines: Optional[List[str]] = None,
categories: Optional[List[str]] = None,
query_suffix: Optional[str] = "",
**kwargs: Any,
) -> str:
"""Run query through Searx API and parse results.
You can pass any other params to the searx query API.
Args:
query: The query to search for.
query_suffix: Extra suffix appended to the query.
engines: List of engines to use for the query.
categories: List of categories to use for the query.
**kwargs: extra parameters to pass to the searx API.
Returns:
str: The result of the query.
Raises:
ValueError: If an error occured with the query.
Example:
This will make a query to the qwant engine:
.. code-block:: python
from langchain.utilities import SearxSearchWrapper
searx = SearxSearchWrapper(searx_host="http://my.searx.host")
searx.run("what is the weather in France ?", engine="qwant")
# the same result can be achieved using the `!` syntax of searx
# to select the engine using `query_suffix`
searx.run("what is the weather in France ?", query_suffix="!qwant")
"""
_params = {
"q": query,
}
params = {**self.params, **_params, **kwargs}
if self.query_suffix and len(self.query_suffix) > 0:
params["q"] += " " + self.query_suffix
if isinstance(query_suffix, str) and len(query_suffix) > 0:
params["q"] += " " + query_suffix
if isinstance(engines, list) and len(engines) > 0:
params["engines"] = ",".join(engines)
if isinstance(categories, list) and len(categories) > 0:
params["categories"] = ",".join(categories)
res = self._searx_api_query(params)
if len(res.answers) > 0:
toret = res.answers[0]
# only return the content of the results list
elif len(res.results) > 0:
toret = "\n\n".join(
[r.get("content", "") for r in res.results[: self.k]]
)
else:
toret = "No good search result found"
return toret
async def arun(
self,
query: str,
engines: Optional[List[str]] = None,
query_suffix: Optional[str] = "",
**kwargs: Any,
) -> str:
"""Asynchronously version of `run`."""
_params = {
"q": query,
}
params = {**self.params, **_params, **kwargs}
if self.query_suffix and len(self.query_suffix) > 0:
params["q"] += " " + self.query_suffix
if isinstance(query_suffix, str) and len(query_suffix) > 0:
params["q"] += " " + query_suffix
if isinstance(engines, list) and len(engines) > 0:
params["engines"] = ",".join(engines)
res = await self._asearx_api_query(params)
if len(res.answers) > 0:
toret = res.answers[0]
# only return the content of the results list
elif len(res.results) > 0:
toret = "\n\n".join(
[r.get("content", "") for r in res.results[: self.k]]
)
else:
toret = "No good search result found"
return toret
def results(
self,
query: str,
num_results: int,
engines: Optional[List[str]] = None,
categories: Optional[List[str]] = None,
query_suffix: Optional[str] = "",
**kwargs: Any,
) -> List[Dict]:
"""Run query through Searx API and returns the results with metadata.
Args:
query: The query to search for.
query_suffix: Extra suffix appended to the query.
num_results: Limit the number of results to return.
engines: List of engines to use for the query.
categories: List of categories to use for the query.
**kwargs: extra parameters to pass to the searx API.
Returns:
Dict with the following keys:
{
snippet: The description of the result.
title: The title of the result.
link: The link to the result.
engines: The engines used for the result.
category: Searx category of the result.
}
"""
_params = {
"q": query,
}
params = {**self.params, **_params, **kwargs}
if self.query_suffix and len(self.query_suffix) > 0:
params["q"] += " " + self.query_suffix
if isinstance(query_suffix, str) and len(query_suffix) > 0:
params["q"] += " " + query_suffix
if isinstance(engines, list) and len(engines) > 0:
params["engines"] = ",".join(engines)
if isinstance(categories, list) and len(categories) > 0:
params["categories"] = ",".join(categories)
results = self._searx_api_query(params).results[:num_results]
if len(results) == 0:
return [{"Result": "No good Search Result was found"}]
return [
{
"snippet": result.get("content", ""),
"title": result["title"],
"link": result["url"],
"engines": result["engines"],
"category": result["category"],
}
for result in results
]
async def aresults(
self,
query: str,
num_results: int,
engines: Optional[List[str]] = None,
query_suffix: Optional[str] = "",
**kwargs: Any,
) -> List[Dict]:
"""Asynchronously query with json results.
Uses aiohttp. See `results` for more info.
"""
_params = {
"q": query,
}
params = {**self.params, **_params, **kwargs}
if self.query_suffix and len(self.query_suffix) > 0:
params["q"] += " " + self.query_suffix
if isinstance(query_suffix, str) and len(query_suffix) > 0:
params["q"] += " " + query_suffix
if isinstance(engines, list) and len(engines) > 0:
params["engines"] = ",".join(engines)
results = (await self._asearx_api_query(params)).results[:num_results]
if len(results) == 0:
return [{"Result": "No good Search Result was found"}]
return [
{
"snippet": result.get("content", ""),
"title": result["title"],
"link": result["url"],
"engines": result["engines"],
"category": result["category"],
}
for result in results
]
class DuckDuckGoSearchAPIWrapper(BaseModel):
"""Wrapper for DuckDuckGo Search API.
Free and does not require any setup
"""
k: int = 10 # Not used
region: str = "wt-wt"
safesearch: str = "moderate"
time: Optional[str] = "y"
max_results: int = 10 # Max results to return
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@staticmethod
def _ddg(
keywords: str,
region: str = "wt-wt",
safesearch: str = "moderate",
timelimit: Optional[str] = None,
backend: str = "api",
max_results: Optional[int] = None,
pages: Optional[int] = 1,
results_per_page: int = 20,
) -> List[Dict]:
results = []
for result in DDGS().text(
keywords=keywords,
region=region,
safesearch=safesearch,
timelimit=timelimit,
backend=backend,
):
results.append(result)
if (max_results and len(results) >= max_results) or (
pages and len(results) >= results_per_page * pages
):
break
return results
@staticmethod
def _get_formatted_result(result: Dict[str, str]) -> str:
return "# [{link}]\n```{title}\n{snippet}\n```".format(
title=result["title"],
snippet=result["snippet"],
link=result["link"],
)
def get_snippets(self, query: str) -> List[str]:
"""Run query through DuckDuckGo and return concatenated results."""
results = self._ddg(
query,
region=self.region,
safesearch=self.safesearch,
timelimit=self.time,
max_results=self.max_results,
)
if results is None or len(results) == 0:
return ["No good DuckDuckGo Search Result was found"]
snippets = [result["body"] for result in results]
return snippets
def run(self, query: str) -> str:
return "\n\n".join(self.formatted_results(query))
def formatted_results(self, query: str) -> List[str]:
return [
self._get_formatted_result(result)
for result in self.results(query)
]
def formatted_results_with_link(self, query: str) -> Dict[str, str]:
return {
result["link"]: self._get_formatted_result(result)
for result in self.results(query)
}
def results(
self, query: str, num_results: Optional[int] = None
) -> List[Dict[str, str]]:
"""Run query through DuckDuckGo and return metadata.
Args:
query: The query to search for.
num_results: The number of results to return.
Returns:
A list of dictionaries with the following keys:
snippet - The description of the result.
title - The title of the result.
link - The link to the result.
"""
results = self._ddg(
query,
region=self.region,
safesearch=self.safesearch,
timelimit=self.time,
max_results=self.max_results,
)
if results is None or len(results) == 0:
return [{"Result": "No good DuckDuckGo Search Result was found"}]
return [
{
"snippet": result["body"],
"title": result["title"],
"link": result["href"],
}
for result in results
][:num_results]
| [] |
2024-01-10 | c0sogi/LLMChat | app~utils~langchain~token_text_splitter.py | from copy import deepcopy
from typing import (
AbstractSet,
Any,
Collection,
Iterable,
List,
Literal,
Optional,
Sequence,
Union,
)
from langchain.docstore.document import Document
from langchain.text_splitter import Tokenizer, TokenTextSplitter, split_text_on_tokens
class CustomTokenTextSplitter(TokenTextSplitter):
"""Implementation of splitting text that looks at tokens."""
def __init__(
self,
encoding_name: str = "gpt2",
model_name: Optional[str] = None,
allowed_special: Union[Literal["all"], AbstractSet[str]] = set(),
disallowed_special: Union[Literal["all"], Collection[str]] = "all",
**kwargs: Any,
):
super().__init__(
encoding_name=encoding_name,
model_name=model_name,
allowed_special=allowed_special,
disallowed_special=disallowed_special,
**kwargs,
)
def split_text(
self,
text: str,
tokens_per_chunk: Optional[int] = None,
chunk_overlap: Optional[int] = None,
) -> List[str]:
def _encode(_text: str) -> List[int]:
return self._tokenizer.encode(
_text,
allowed_special=self._allowed_special, # type: ignore
disallowed_special=self._disallowed_special,
)
tokenizer = Tokenizer(
chunk_overlap=self._chunk_overlap
if chunk_overlap is None
else chunk_overlap,
tokens_per_chunk=self._chunk_size
if tokens_per_chunk is None
else tokens_per_chunk,
decode=self._tokenizer.decode,
encode=_encode,
)
return split_text_on_tokens(text=text, tokenizer=tokenizer)
def create_documents(
self,
texts: List[str],
metadatas: Optional[List[dict]] = None,
tokens_per_chunk: Optional[int] = None,
chunk_overlap: Optional[int] = None,
) -> List[Document]:
"""Create documents from a list of texts."""
_metadatas = metadatas or [{}] * len(texts)
documents = []
for i, text in enumerate(texts):
index = -1
for chunk in self.split_text(
text,
tokens_per_chunk=tokens_per_chunk,
chunk_overlap=chunk_overlap,
):
metadata = deepcopy(_metadatas[i])
if self._add_start_index:
index = text.find(chunk, index + 1)
metadata["start_index"] = index
new_doc = Document(page_content=chunk, metadata=metadata)
documents.append(new_doc)
return documents
def split_documents(
self,
documents: Iterable[Document],
tokens_per_chunk: Optional[int] = None,
chunk_overlap: Optional[int] = None,
) -> List[Document]:
"""Split documents."""
texts, metadatas = [], []
for doc in documents:
texts.append(doc.page_content)
metadatas.append(doc.metadata)
return self.create_documents(
texts,
metadatas=metadatas,
tokens_per_chunk=tokens_per_chunk,
chunk_overlap=chunk_overlap,
)
def transform_documents(
self,
documents: Sequence[Document],
tokens_per_chunk: Optional[int] = None,
chunk_overlap: Optional[int] = None,
**kwargs: Any,
) -> Sequence[Document]:
"""Transform sequence of documents by splitting them."""
return self.split_documents(
list(documents),
tokens_per_chunk=tokens_per_chunk,
chunk_overlap=chunk_overlap,
)
| [] |
2024-01-10 | c0sogi/LLMChat | app~utils~langchain~redis_vectorstore.py | """Wrapper around Redis vector database."""
from __future__ import annotations
from orjson import dumps as orjson_dumps
from orjson import loads as orjson_loads
from enum import Enum
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
Optional,
Tuple,
Type,
Union,
)
from uuid import uuid4
import numpy as np
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.base import VectorStore
from pydantic import BaseModel, root_validator
from app.utils.logger import ApiLogger
try:
from starlette.concurrency import run_in_threadpool
except ImportError:
raise ImportError(
"Please install starlette to use the Redis vector store. "
"pip install starlette"
)
try:
import redis
if TYPE_CHECKING:
from redis.client import Pipeline as PipelineType
from redis.client import Redis as RedisType
from redis.commands.search.field import TextField, VectorField
from redis.commands.search.indexDefinition import (
IndexDefinition,
IndexType,
)
from redis.commands.search.query import Query
import redis.asyncio as aioredis
if TYPE_CHECKING:
from redis.asyncio.client import Pipeline as AsyncPipelineType
from redis.asyncio.client import Redis as AsyncRedisType
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
# required modules
REDIS_REQUIRED_MODULES = [
{"name": "search", "ver": 20400},
]
class DistanceMetric(str, Enum):
"""Enum for distance metrics for vectors."""
EUCLIDEAN = "EUCLIDEAN"
HAMMING = "HAMMING"
JACCARD = "JACCARD"
COSINE = "COSINE"
DOT_PRODUCT = "DOT_PRODUCT"
def _check_redis_module_exist(client: RedisType, modules: List[dict]) -> None:
"""Check if the correct Redis modules are installed."""
installed_modules = client.module_list()
installed_modules = {
module[b"name"].decode("utf-8"): module for module in installed_modules
}
for module in modules:
if module["name"] not in installed_modules or int(
installed_modules[module["name"]][b"ver"]
) < int(module["ver"]):
error_message = (
"You must add the RediSearch (>= 2.4) module from Redis Stack. "
"Please refer to Redis Stack docs: https://redis.io/docs/stack/"
)
ApiLogger.cerror(error_message)
raise ValueError(error_message)
async def _acheck_redis_module_exist(
client: AsyncRedisType, modules: List[dict]
) -> None:
"""Check if the correct Redis modules are installed, asynchronously."""
installed_modules = await client.module_list()
installed_modules = {
module[b"name"].decode("utf-8"): module for module in installed_modules
}
for module in modules:
if module["name"] not in installed_modules or int(
installed_modules[module["name"]][b"ver"]
) < int(module["ver"]):
error_message = (
"You must add the RediSearch (>= 2.4) module from Redis Stack. "
"Please refer to Redis Stack docs: https://redis.io/docs/stack/"
)
ApiLogger.cerror(error_message)
raise ValueError(error_message)
def _check_index_exists(client: RedisType, index_name: str) -> bool:
"""Check if Redis index exists."""
try:
client.ft(index_name).info()
except Exception:
ApiLogger.cinfo("Index does not exist")
return False
ApiLogger.cinfo("Index already exists")
return True
async def _acheck_index_exists(
client: AsyncRedisType, index_name: str
) -> bool:
"""Check if Redis index exists, asynchronously."""
try:
await client.ft(index_name).info()
except Exception:
ApiLogger.cinfo("Index does not exist")
return False
ApiLogger.cinfo("Index exists!")
return True
def _redis_client_from_url(redis_url: str, **kwargs: Any) -> RedisType:
"""Create a Redis client from a URL checking if the required modules are installed."""
try:
client = redis.from_url(url=redis_url, **kwargs)
# check if redis has redisearch module installed
_check_redis_module_exist(client, REDIS_REQUIRED_MODULES)
except ValueError as e:
raise ValueError(f"Redis failed to connect: {e}")
return client
async def _aredis_client_from_url(
redis_url: str, **kwargs: Any
) -> AsyncRedisType:
"""Create a Redis client from a URL checking if the required modules are installed, asynchronously."""
try:
client = aioredis.from_url(url=redis_url, **kwargs)
# check if redis has redisearch module installed
await _acheck_redis_module_exist(client, REDIS_REQUIRED_MODULES)
except ValueError as e:
raise ValueError(f"Redis failed to connect: {e}")
return client
def _redis_key(prefix: str) -> str:
"""Redis key schema for a given prefix."""
return f"{prefix}:{uuid4().hex}"
def _redis_prefix(index_name: str) -> str:
"""Redis key prefix for a given index."""
return f"doc:{index_name}"
def _redis_vectorstore_schema(
content_key: str,
metadata_key: str,
vector_key: str,
dim: int,
distance_metric: str,
) -> Tuple[TextField, TextField, VectorField]:
return (
TextField(name=content_key),
TextField(name=metadata_key),
VectorField(
vector_key,
"FLAT",
{
"TYPE": "FLOAT32",
"DIM": dim,
"DISTANCE_METRIC": distance_metric,
},
),
)
def _redis_embed_texts_to_pipeline(
texts: List[str],
prefix: str,
content_key: str,
metadata_key: str,
vector_key: str,
embeddings: List[List[float]],
pipeline: Union[PipelineType, AsyncPipelineType],
metadatas: Optional[List[dict]] = None,
) -> None:
for i, text in enumerate(texts):
key = _redis_key(prefix)
metadata = metadatas[i] if metadatas else {}
pipeline.hset(
key,
mapping={
content_key: text,
vector_key: np.array(
embeddings[i], dtype=np.float32
).tobytes(),
metadata_key: orjson_dumps(metadata),
},
)
def _ensure_index_exist(
client: RedisType,
index_name: str,
prefix: str,
content_key: str,
metadata_key: str,
vector_key: str,
dim: int,
) -> None:
if not _check_index_exists(client, index_name):
# Constants
schema = _redis_vectorstore_schema(
content_key=content_key,
metadata_key=metadata_key,
vector_key=vector_key,
dim=dim,
distance_metric=DistanceMetric.COSINE.value,
)
# Create Redis Index
client.ft(index_name).create_index(
fields=schema,
definition=IndexDefinition(
prefix=[prefix], index_type=IndexType.HASH
),
)
async def _aensure_index_exist(
client: AsyncRedisType,
index_name: str,
prefix: str,
dim: int,
content_key: str = "content",
metadata_key: str = "metadata",
vector_key: str = "content_vector",
) -> None:
if not await _acheck_index_exists(client, index_name):
# Constants
schema = _redis_vectorstore_schema(
content_key=content_key,
metadata_key=metadata_key,
vector_key=vector_key,
dim=dim,
distance_metric=DistanceMetric.COSINE.value,
)
# Create Redis Index
await client.ft(index_name).create_index(
fields=schema,
definition=IndexDefinition(
prefix=[prefix], index_type=IndexType.HASH
),
)
class Redis(VectorStore):
def __init__(
self,
redis_url: str,
embedding_function: Callable,
content_key: str = "content",
metadata_key: str = "metadata",
vector_key: str = "content_vector",
is_async: bool = False,
**kwargs: Any,
):
"""Initialize with necessary components."""
self.embedding_function = embedding_function
# We need to first remove redis_url from kwargs,
# otherwise passing it to Redis will result in an error.
if "redis_url" in kwargs:
kwargs.pop("redis_url")
client = _redis_client_from_url(redis_url=redis_url, **kwargs)
if is_async:
client.close()
self.client = aioredis.from_url(url=redis_url, **kwargs)
else:
self.client = client # type: ignore
self.content_key = content_key
self.metadata_key = metadata_key
self.vector_key = vector_key
def _add_texts(
self,
texts: Iterable[str],
index_name: str,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> Tuple[List[str], Union[PipelineType, AsyncPipelineType]]:
prefix = _redis_prefix(index_name)
keys = kwargs.get("keys")
ids = []
# Write data to redis
pipeline = self.client.pipeline(transaction=False)
for i, text in enumerate(texts):
# Use provided key otherwise use default key
key = keys[i] if keys else _redis_key(prefix)
metadata = metadatas[i] if metadatas else {}
pipeline.hset(
key,
mapping={
self.content_key: text,
self.vector_key: np.array(
self.embedding_function(text), dtype=np.float32
).tobytes(),
self.metadata_key: orjson_dumps(metadata),
},
)
ids.append(key)
return ids, pipeline
def add_texts(
self,
texts: Iterable[str],
index_name: str,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Add texts data to an existing index."""
ids, pipeline = self._add_texts(
texts, index_name=index_name, metadatas=metadatas, **kwargs
)
pipeline.execute()
return ids
async def aadd_texts(
self,
texts: Iterable[str],
index_name: str,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> List[str]:
"""Add texts data to an existing index."""
ids, pipeline = await run_in_threadpool(
self._add_texts, texts, index_name, metadatas, **kwargs
)
await pipeline.execute() # type: ignore
return ids
def similarity_search(
self, query: str, index_name: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""
Returns the most similar indexed documents to the query text.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
docs_and_scores = self.similarity_search_with_score(
query, index_name=index_name, k=k
)
return [doc for doc, _ in docs_and_scores]
async def asimilarity_search(
self, query: str, index_name: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""
Returns the most similar indexed documents to the query text, asynchronously.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
Returns:
List[Document]: A list of documents that are most similar to the query text.
"""
docs_and_scores = await self.asimilarity_search_with_score(
query, index_name=index_name, k=k
)
return [doc for doc, _ in docs_and_scores]
def similarity_search_limit_score(
self,
query: str,
index_name: str,
k: int = 4,
score_threshold: float = 0.2,
**kwargs: Any,
) -> List[Document]:
"""
Returns the most similar indexed documents to the query text within the
score_threshold range.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
score_threshold (float): The minimum matching score required for a document
to be considered a match. Defaults to 0.2.
Because the similarity calculation algorithm is based on cosine similarity,
the smaller the angle, the higher the similarity.
Returns:
List[Document]: A list of documents that are most similar to the query text,
including the match score for each document.
Note:
If there are no documents that satisfy the score_threshold value,
an empty list is returned.
"""
docs_and_scores = self.similarity_search_with_score(
query, index_name=index_name, k=k
)
return [
doc for doc, score in docs_and_scores if score < score_threshold
]
async def asimilarity_search_limit_score(
self,
query: str,
index_name: str,
k: int = 4,
score_threshold: float = 0.2,
**kwargs: Any,
) -> List[Document]:
"""
Returns the most similar indexed documents to the query text within the
score_threshold range, asynchronously.
Args:
query (str): The query text for which to find similar documents.
k (int): The number of documents to return. Default is 4.
score_threshold (float): The minimum matching score required for a document
to be considered a match. Defaults to 0.2.
Because the similarity calculation algorithm is based on cosine similarity,
the smaller the angle, the higher the similarity.
Returns:
List[Document]: A list of documents that are most similar to the query text,
including the match score for each document.
Note:
If there are no documents that satisfy the score_threshold value,
an empty list is returned.
"""
docs_and_scores = await self.asimilarity_search_with_score(
query, index_name=index_name, k=k
)
return [
doc for doc, score in docs_and_scores if score < score_threshold
]
def _similarity_search_with_score(
self, query: str, k: int = 4
) -> Tuple[Query, Mapping[str, str]]:
# Creates embedding vector from user query
embedding = self.embedding_function(query)
# Prepare the Query
return_fields = [self.metadata_key, self.content_key, "vector_score"]
vector_field = self.vector_key
hybrid_fields = "*"
base_query = f"{hybrid_fields}=>[KNN {k} @{vector_field} $vector AS vector_score]"
redis_query = (
Query(base_query)
.return_fields(*return_fields)
.sort_by("vector_score")
.paging(0, k)
.dialect(2)
)
params_dict: Mapping[str, str] = {
"vector": np.array(embedding).astype(dtype=np.float32).tobytes() # type: ignore
}
return redis_query, params_dict
def similarity_search_with_score(
self, query: str, index_name: str, k: int = 4
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
redis_query, params_dict = self._similarity_search_with_score(
query, k=k
)
# perform vector search
results = self.client.ft(index_name).search(redis_query, params_dict)
docs = [
(
Document(
page_content=result.content,
metadata=orjson_loads(result.metadata),
),
float(result.vector_score),
)
for result in results.docs
]
return docs
async def asimilarity_search_with_score(
self, query: str, index_name: str, k: int = 4
) -> List[Tuple[Document, float]]:
"""Return docs most similar to query, asynchronously.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query and score for each
"""
redis_query, params_dict = await run_in_threadpool(
self._similarity_search_with_score, query, k
)
# perform vector search
results = await self.client.ft(index_name).search(redis_query, params_dict) # type: ignore
docs = [
(
Document(
page_content=result.content,
metadata=orjson_loads(result.metadata),
),
float(result.vector_score),
)
for result in results.docs
]
return docs
@classmethod
def from_texts(
cls: Type[Redis],
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
index_name: Optional[str] = None,
content_key: str = "content",
metadata_key: str = "metadata",
vector_key: str = "content_vector",
**kwargs: Any,
) -> Redis:
"""Construct RediSearch wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in the RediSearch instance.
3. Adds the documents to the newly created RediSearch index.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import RediSearch
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
redisearch = RediSearch.from_texts(
texts,
embeddings,
redis_url="redis://username:password@localhost:6379"
)
"""
redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL")
# We need to first remove redis_url from kwargs,
# otherwise passing it to Redis will result in an error.
if "redis_url" in kwargs:
kwargs.pop("redis_url")
client = _redis_client_from_url(redis_url=redis_url, **kwargs)
# Create embeddings over documents
embeddings = embedding.embed_documents(texts)
# Name of the search index if not given
if not index_name:
index_name = uuid4().hex
prefix = _redis_prefix(index_name) # prefix for the document keys
# Check if index exists
_ensure_index_exist(
client=client,
index_name=index_name,
prefix=prefix,
content_key=content_key,
metadata_key=metadata_key,
vector_key=vector_key,
dim=len(embeddings[0]),
)
# Write data to Redis
pipeline = client.pipeline(transaction=False)
_redis_embed_texts_to_pipeline(
pipeline=pipeline,
texts=texts,
embeddings=embeddings,
metadatas=metadatas,
prefix=prefix,
content_key=content_key,
metadata_key=metadata_key,
vector_key=vector_key,
)
pipeline.execute()
return cls(
redis_url,
embedding.embed_query,
index_name=index_name,
content_key=content_key,
metadata_key=metadata_key,
vector_key=vector_key,
is_async=False,
**kwargs,
)
@classmethod
async def afrom_texts(
cls: Type[Redis],
texts: List[str],
embedding: Embeddings,
index_name: str,
metadatas: Optional[List[dict]] = None,
content_key: str = "content",
metadata_key: str = "metadata",
vector_key: str = "content_vector",
**kwargs: Any,
) -> Redis:
"""Construct RediSearch wrapper from raw documents, asynchronously.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in the RediSearch instance.
3. Adds the documents to the newly created RediSearch index.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import RediSearch
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
redisearch = await RediSearch.afrom_texts(
texts,
embeddings,
redis_url="redis://username:password@localhost:6379"
)
"""
redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL")
# We need to first remove redis_url from kwargs,
# otherwise passing it to Redis will result in an error.
if "redis_url" in kwargs:
kwargs.pop("redis_url")
client = await _aredis_client_from_url(redis_url=redis_url, **kwargs)
# Create embeddings over documents
embeddings = embedding.embed_documents(texts)
prefix = _redis_prefix(index_name) # prefix for the document keys
# Check if index exists
await _aensure_index_exist(
client=client,
index_name=index_name,
prefix=prefix,
content_key=content_key,
metadata_key=metadata_key,
vector_key=vector_key,
dim=len(embeddings[0]),
)
# Write data to Redis
pipeline = client.pipeline(transaction=False)
_redis_embed_texts_to_pipeline(
pipeline=pipeline,
texts=texts,
embeddings=embeddings,
metadatas=metadatas,
prefix=prefix,
content_key=content_key,
metadata_key=metadata_key,
vector_key=vector_key,
)
await pipeline.execute()
return cls(
redis_url,
embedding.embed_query,
index_name=index_name,
content_key=content_key,
metadata_key=metadata_key,
vector_key=vector_key,
is_async=True,
**kwargs,
)
@staticmethod
def drop_index(
index_name: str,
delete_documents: bool,
**kwargs: Any,
) -> bool:
"""
Drop a Redis search index.
Args:
index_name (str): Name of the index to drop.
delete_documents (bool): Whether to drop the associated documents.
Returns:
bool: Whether or not the drop was successful.
"""
redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL")
# We need to first remove redis_url from kwargs,
# otherwise passing it to Redis will result in an error.
if "redis_url" in kwargs:
kwargs.pop("redis_url")
client = _redis_client_from_url(redis_url=redis_url, **kwargs)
# Check if index exists
try:
client.ft(index_name).dropindex(delete_documents)
ApiLogger.cinfo("Drop index")
return True
except Exception:
# Index not exist
return False
@staticmethod
async def adrop_index(
index_name: str,
delete_documents: bool,
**kwargs: Any,
) -> bool:
"""
Drop a Redis search index, asynchronously.
Args:
index_name (str): Name of the index to drop.
delete_documents (bool): Whether to drop the associated documents.
Returns:
bool: Whether or not the drop was successful.
"""
redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL")
# We need to first remove redis_url from kwargs,
# otherwise passing it to Redis will result in an error.
if "redis_url" in kwargs:
kwargs.pop("redis_url")
try:
client = await _aredis_client_from_url(
redis_url=redis_url, **kwargs
)
except ValueError as e:
raise ValueError(f"Your redis connected error: {e}")
# Check if index exists
try:
await client.ft(index_name).dropindex(delete_documents)
ApiLogger.cinfo("Drop index")
return True
except Exception:
# Index not exist
return False
@classmethod
def from_existing_index(
cls,
embedding: Embeddings,
index_name: str,
content_key: str = "content",
metadata_key: str = "metadata",
vector_key: str = "content_vector",
**kwargs: Any,
) -> Redis:
"""Connect to an existing Redis index."""
redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL")
if "redis_url" in kwargs:
kwargs.pop("redis_url")
try:
client = _redis_client_from_url(redis_url=redis_url, **kwargs)
# ensure that the index already exists
assert _check_index_exists(
client, index_name
), f"Index {index_name} does not exist"
except Exception as e:
raise ValueError(f"Redis failed to connect: {e}")
return cls(
redis_url,
embedding.embed_query,
index_name=index_name,
content_key=content_key,
metadata_key=metadata_key,
vector_key=vector_key,
is_async=False,
**kwargs,
)
@classmethod
async def afrom_existing_index(
cls,
embedding: Embeddings,
index_name: str,
content_key: str = "content",
metadata_key: str = "metadata",
vector_key: str = "content_vector",
**kwargs: Any,
) -> Redis:
"""Connect to an existing Redis index, asynchronously."""
redis_url = get_from_dict_or_env(kwargs, "redis_url", "REDIS_URL")
if "redis_url" in kwargs:
kwargs.pop("redis_url")
try:
client = await _aredis_client_from_url(
redis_url=redis_url, **kwargs
)
# ensure that the index already exists
assert await _acheck_index_exists(
client, index_name
), f"Index {index_name} does not exist"
except Exception as e:
raise ValueError(f"Redis failed to connect: {e}")
return cls(
redis_url,
embedding.embed_query,
index_name=index_name,
content_key=content_key,
metadata_key=metadata_key,
vector_key=vector_key,
is_async=True,
**kwargs,
)
def as_retriever(self, **kwargs: Any) -> BaseRetriever:
return RedisVectorStoreRetriever(vectorstore=self, **kwargs)
class RedisVectorStoreRetriever(BaseRetriever, BaseModel):
vectorstore: Redis
search_type: str = "similarity"
k: int = 4
score_threshold: float = 0.4
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@root_validator()
def validate_search_type(cls, values: Dict) -> Dict:
"""Validate search type."""
if "search_type" in values:
search_type = values["search_type"]
if search_type not in ("similarity", "similarity_limit"):
raise ValueError(f"search_type of {search_type} not allowed.")
return values
def _get_relevant_documents(
self, query: str, index_name: str
) -> List[Document]:
if self.search_type == "similarity":
docs = self.vectorstore.similarity_search(
query, index_name=index_name, k=self.k
)
elif self.search_type == "similarity_limit":
docs = self.vectorstore.similarity_search_limit_score(
query,
index_name=index_name,
k=self.k,
score_threshold=self.score_threshold,
)
else:
raise ValueError(f"search_type of {self.search_type} not allowed.")
return docs
async def _aget_relevant_documents(
self, query: str, index_name: str
) -> List[Document]:
if self.search_type == "similarity":
docs = await self.vectorstore.asimilarity_search(
query, index_name=index_name, k=self.k
)
elif self.search_type == "similarity_limit":
docs = await self.vectorstore.asimilarity_search_limit_score(
query,
index_name=index_name,
k=self.k,
score_threshold=self.score_threshold,
)
else:
raise ValueError(f"search_type of {self.search_type} not allowed.")
return docs
| [] |
2024-01-10 | c0sogi/LLMChat | tests~test_vectorstore.py | from asyncio import gather
from uuid import uuid4
import pytest
from langchain.docstore.document import Document
from app.database.connection import cache
from app.utils.chat.managers.vectorstore import VectorStoreManager
@pytest.mark.asyncio
async def test_embedding_single_index(config, test_logger):
"""Warning! This is expensive test!
It costs a lot to embed the text"""
cache.start(config=config)
test_logger.info("Testing embedding")
collection_name: str = uuid4().hex
test_logger.info(f"Collection name: {collection_name}")
sample_text = (
"Madam Speaker, Madam Vice President, our First Lady and Second Gentleman. Member"
"s of Congress and the Cabinet. Justices of the Supreme Court. My fellow American"
"s. Last year COVID-19 kept us apart. This year we are finally together again. "
" Tonight, we meet as Democrats Republicans and Independents. But most importantl"
"y as Americans. With a duty to one another to the American people to the Consti"
"tution. And with an unwavering resolve that freedom will always triumph over ty"
"ranny. Six days ago, Russia’s Vladimir Putin sought to shake the foundations of"
" the free world thinking he could make it bend to his menacing ways. But he badl"
"y miscalculated. He thought he could roll into Ukraine and the world would roll "
"over. Instead he met a wall of strength he never imagined. He met the Ukrainian"
" people. From President Zelenskyy to every Ukrainian, their fearlessness, their"
" courage, their determination, inspires the world. Groups of citizens blocking "
"tanks with their bodies. Everyone from students to retirees teachers turned sold"
"iers defending their homeland. In this struggle as President Zelenskyy said in "
"his speech to the European Parliament “Light will win over darkness.” The Ukrain"
"ian Ambassador to the United States is here tonight. Let each of us here tonight"
" in this Chamber send an unmistakable signal to Ukraine and to the world. Pleas"
"e rise if you are able and show that, Yes, we the United States of America stand"
" with the Ukrainian people. Throughout our history we’ve learned this lesson whe"
"n dictators do not pay a price for their aggression they cause more chaos. They "
"keep moving. And the costs and the threats to America and the world keep rising."
" That’s why the NATO Alliance was created to secure peace and stability in Eu"
"rope after World War 2. The United States is a member along with 29 other nation"
"s. It matters. American diplomacy matters. American resolve matters. Putin’s l"
"atest attack on Ukraine was premeditated and unprovoked. He rejected repeated e"
"fforts at diplomacy. He thought the West and NATO wouldn’t respond. And he thou"
"ght he could divide us at home. Putin was wrong. We were ready. Here is what we"
" did. We prepared extensively and carefully. We spent months building a coali"
"tion of other freedom-loving nations from Europe and the Americas to Asia and Af"
"rica to confront Putin. I spent countless hours unifying our European allies. We"
" shared with the world in advance what we knew Putin was planning and precisely "
"how he would try to falsely justify his aggression. We countered Russia’s lies"
" with truth. And now that he has acted the free world is holding him accounta"
"ble. Along with twenty-seven members of the European Union including France, Ge"
"rmany, Italy, as well as countries like the United Kingdom, Canada, Japan, Korea"
", Australia, New Zealand, and many others, even Switzerland. We are inflicting p"
"ain on Russia and supporting the people of Ukraine. Putin is now isolated from t"
"he world more than ever. Together with our allies –we are right now enforcing po"
"werful economic sanctions. We are cutting off Russia’s largest banks from the in"
"ternational financial system. Preventing Russia’s central bank from defending "
"the Russian Ruble making Putin’s $630 Billion “war fund” worthless."
)
sample_queries = [
"What has been the response of the Ukrainian people to the Russian invasion, as depicted in the speech?",
(
"What preparations did the speaker mention were made to confront Putin's actions"
", and how does this reflect on the role of NATO and American diplomacy?"
),
(
"What are the specific economic sanctions mentioned in the speech that the United"
" States and its allies are enforcing against Russia, and how do they aim to impa"
"ct Russia's economy and Putin's 'war fund'?"
),
]
empty: list[list[Document]] = await gather(
*[
VectorStoreManager.asimilarity_search(sample_query, collection_name=collection_name, k=3)
for sample_query in sample_queries
]
)
assert all(len(result) == 0 for result in empty)
await VectorStoreManager.create_documents(sample_text, collection_name=collection_name)
results: list[list[Document]] | None = await gather(
*[
VectorStoreManager.asimilarity_search(sample_query, collection_name=collection_name, k=3)
for sample_query in sample_queries
]
)
assert results is not None
for i, result in enumerate(results):
test_logger.info(f"\n### Query Result{i + 1}")
for j, doc in enumerate(result):
test_logger.info(f"-----> Document[{j + 1}]\n{doc.page_content}\n")
@pytest.mark.asyncio
async def test_embedding_multiple_index(config, test_logger):
cache.start(config=config)
test_logger.info("Testing embedding")
collection_names: list[str] = [uuid4().hex for _ in range(2)]
test_logger.info(f"Collection names: {collection_names}")
texts_1 = ["Monkey loves banana", "Apple is red"]
texts_2 = ["Banana is yellow", "Apple is green"]
queries = ["Monkey loves banana", "Apple is red"]
empty: list[list[Document]] = await gather(
*[
VectorStoreManager.asimilarity_search_multiple_collections(query, collection_names=collection_names, k=3)
for query in queries
]
)
assert all(len(result) == 0 for result in empty)
for collection_name, texts in zip(collection_names, [texts_1, texts_2]):
for text in texts:
await VectorStoreManager.create_documents(text, collection_name=collection_name)
queries_results: list[list[tuple[Document, float]]] = await gather(
*[
VectorStoreManager.asimilarity_search_multiple_collections_with_score(
query, collection_names=collection_names, k=3
)
for query in queries
]
)
for query, query_results in zip(queries, queries_results):
for doc, score in query_results:
test_logger.info(f"\n\n\n\nQuery={query}\nScore={score}\nContent={doc.page_content}")
test_logger.info(f"\n\n\n\n\n\nTesting embedding: {queries_results}")
@pytest.mark.asyncio
async def test_embedding_multiple_index_2(config, test_logger):
cache.start(config=config)
test_logger.info("Testing embedding")
collection_names: list[str] = [uuid4().hex for _ in range(2)]
test_logger.info(f"Collection names: {collection_names}")
texts_1 = ["Monkey loves banana", "Apple is red"]
texts_2 = ["Banana is yellow", "Apple is green"]
queries = ["Monkey loves banana", "Apple is red"]
empty: list[list[Document]] = await gather(
*[
VectorStoreManager.asimilarity_search_multiple_collections(query, collection_names=collection_names, k=3)
for query in queries
]
)
assert all(len(result) == 0 for result in empty)
for collection_name, texts in zip(collection_names, [texts_1, texts_2]):
for text in texts:
await VectorStoreManager.create_documents(text, collection_name=collection_name)
queries_results: list[list[tuple[Document, float]]] = await gather(
*[
VectorStoreManager.amax_marginal_relevance_search_multiple_collections_with_score(
query, collection_names=collection_names, k=3
)
for query in queries
]
)
for query, query_results in zip(queries, queries_results):
for doc, score in query_results:
test_logger.info(f"\n\n\n\nQuery={query}\nScore={score}\nContent={doc.page_content}")
test_logger.info(f"\n\n\n\n\n\nTesting embedding: {queries_results}")
| [] |
2024-01-10 | c0sogi/LLMChat | app~utils~chat~file_loader.py | import io
from typing import IO, Any
from langchain.document_loaders.unstructured import UnstructuredBaseLoader
from langchain.docstore.document import Document
from unstructured.partition.auto import partition
class UnstructuredFileIOLoader(UnstructuredBaseLoader):
"""Loader that uses unstructured to load file IO objects."""
def __init__(self, file: IO, filename: str, mode: str = "single", **unstructured_kwargs: Any):
"""Initialize with file path."""
self.file = file
self.filename = filename
super().__init__(mode=mode, **unstructured_kwargs)
def _get_elements(self) -> list:
return partition(file=self.file, file_filename=self.filename, **self.unstructured_kwargs)
def _get_metadata(self) -> dict:
return {}
def read_bytes_to_documents(file: bytes, filename: str) -> list[Document]:
return UnstructuredFileIOLoader(file=io.BytesIO(file), strategy="fast", filename=filename).load()
def read_bytes_to_text(file: bytes, filename: str) -> str:
return "\n\n".join([doc.page_content for doc in read_bytes_to_documents(file=file, filename=filename)])
if __name__ == "__main__":
with open(r"test.pdf", "rb") as f:
file = f.read()
text = read_bytes_to_text(file, "test.pdf")
print(text)
| [] |
2024-01-10 | c0sogi/LLMChat | app~utils~chat~managers~vectorstore.py | from asyncio import gather
from typing import Any, Coroutine
from fastapi.concurrency import run_in_threadpool
from langchain.docstore.document import Document
from langchain.text_splitter import TokenTextSplitter
from qdrant_client import QdrantClient
from app.common.config import (
EMBEDDING_TOKEN_CHUNK_OVERLAP,
EMBEDDING_TOKEN_CHUNK_SIZE,
)
from app.common.lotties import Lotties
from app.database.connection import cache
from app.utils.chat.file_loader import read_bytes_to_text
class VectorStoreManager:
@staticmethod
async def get_vector_size() -> int:
return len(await cache.vectorstore._aembed_query("foo"))
@staticmethod
async def create_documents(
text: str,
collection_name: str,
chunk_size: int = EMBEDDING_TOKEN_CHUNK_SIZE,
chunk_overlap: int = EMBEDDING_TOKEN_CHUNK_OVERLAP,
tokenizer_model: str = "gpt-3.5-turbo",
) -> list[str]:
texts = TokenTextSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
model_name=tokenizer_model,
).split_text(text)
assert isinstance(cache.vectorstore.client, QdrantClient)
if (
collection_name
not in await VectorStoreManager.get_all_collection_names()
):
await VectorStoreManager.create_collection(
collection_name=collection_name,
)
await cache.vectorstore.aadd_texts(
texts, collection_name=collection_name
)
return texts
@staticmethod
async def asimilarity_search(
query: str,
collection_name: str,
k: int = 1,
) -> list[Document]:
if (
collection_name
not in await VectorStoreManager.get_all_collection_names()
):
return []
return await cache.vectorstore.asimilarity_search(
query, collection_name=collection_name, k=k
)
@staticmethod
async def asimilarity_search_multiple_collections(
query: str,
collection_names: list[str],
k: int = 1,
) -> list[Document]:
# Nested function to handle index check and search.
# results: list[list[Document]] = # shape: (index, k)
# Reorganize results to have shape: (index * k)
query_tasks: list[Coroutine[Any, Any, list[Document]]] = []
existing_collection_names: list[
str
] = await VectorStoreManager.get_all_collection_names()
for collection_name in collection_names:
if collection_name in existing_collection_names:
query_tasks.append(
cache.vectorstore.asimilarity_search(
query,
collection_name=collection_name,
k=k,
)
)
return [
item for sublist in await gather(*query_tasks) for item in sublist
]
@staticmethod
async def asimilarity_search_multiple_collections_with_score(
query: str,
collection_names: list[str],
k: int = 1,
) -> list[tuple[Document, float]]:
# Nested function to handle index check and search.
# search_with_score_if_index_exists's shape: k
# search_with_score_if_index_exists's return type: list[tuple[Document, float]]
# Assume that tuple[Document, float] is a single result.
# Reorganize results to have shape: (index * k), sorted by score.
query_tasks: list[
Coroutine[Any, Any, list[tuple[Document, float]]]
] = []
existing_collection_names: list[
str
] = await VectorStoreManager.get_all_collection_names()
for collection_name in collection_names:
if collection_name in existing_collection_names:
query_tasks.append(
cache.vectorstore.asimilarity_search_with_score(
query,
collection_name=collection_name,
k=k,
)
)
return sorted(
[
item
for sublist in await gather(*query_tasks)
for item in sublist
],
key=lambda x: x[1],
reverse=True,
)
@staticmethod
async def amax_marginal_relevance_search_multiple_collections_with_score(
query: str,
collection_names: list[str],
k: int = 4,
fetch_k: int = 20,
lambda_mult: float = 0.5,
**kwargs: Any,
) -> list[tuple[Document, float]]:
# Nested function to handle index check and search.
# search_with_score_if_index_exists's shape: k
# search_with_score_if_index_exists's return type: list[tuple[Document, float]]
# Assume that tuple[Document, float] is a single result.
# Reorganize results to have shape: (index * k), sorted by score.
query_tasks: list[
Coroutine[Any, Any, list[tuple[Document, float]]]
] = []
existing_collection_names: list[
str
] = await VectorStoreManager.get_all_collection_names()
for collection_name in collection_names:
if collection_name in existing_collection_names:
query_tasks.append(
cache.vectorstore.amax_marginal_relevance_search_with_score(
query=query,
collection_name=collection_name,
k=k,
fetch_k=fetch_k,
lambda_mult=lambda_mult,
)
)
return sorted(
[
item
for sublist in await gather(*query_tasks)
for item in sublist
],
key=lambda x: x[1],
reverse=True,
)
@classmethod
async def embed_file_to_vectorstore(
cls,
file: bytes,
filename: str,
collection_name: str,
) -> str:
# if user uploads file, embed it
try:
text: str = await run_in_threadpool(
read_bytes_to_text, file, filename
)
docs: list[str] = await VectorStoreManager.create_documents(
text, collection_name=collection_name
)
doc_sample_without_triple_backticks: str = docs[0][:100].replace(
"```", "'''"
)
return Lotties.OK.format(
f'### Successfully Embedded\n`"{doc_sample_without_triple_backticks}"`'
)
except Exception:
return Lotties.FAIL.format(
"### Can't embed this type of file\nTry another file."
)
@staticmethod
async def get_all_collection_names() -> list[str]:
from qdrant_client import grpc
grpc_collections = cache.vectorstore.client.async_grpc_collections
response = await grpc_collections.List(grpc.ListCollectionsRequest())
return [collection.name for collection in response.collections]
@classmethod
async def create_collection(
cls,
collection_name: str,
distance: str = "cosine",
) -> None:
"""Create a collection, asynchronously.
Distance can be one of:
- cosine
- euclid
- dot"""
from qdrant_client import grpc
if distance.lower() == "cosine":
distance = grpc.Distance.Cosine # type: ignore
elif distance.lower() == "euclid":
distance = grpc.Distance.Euclid # type: ignore
elif distance.lower() == "dot":
distance = grpc.Distance.Dot # type: ignore
else:
raise ValueError(f"Unknown distance metric: {distance}")
grpc_collections = cache.vectorstore.client.async_grpc_collections
await grpc_collections.Create(
grpc.CreateCollection( # type: ignore
collection_name=collection_name,
vectors_config=grpc.VectorsConfig(
params=grpc.VectorParams( # type: ignore
size=await cls.get_vector_size(),
distance=distance,
)
), # type: ignore
)
)
@staticmethod
async def delete_collection(collection_name: str) -> bool:
"""Delete a collection, asynchronously."""
from qdrant_client import grpc
grpc_collections = cache.vectorstore.client.async_grpc_collections
response = await grpc_collections.Delete(grpc.DeleteCollection(collection_name=collection_name)) # type: ignore
return getattr(response, "result", False)
@property
def shared_collection_name(self) -> str:
return cache.vectorstore.collection_name # type: ignore
# if __name__ == "__main__":
# import asyncio
# from app.common.config import Config
# sample_texts = (
# "Neural network models can be quite powerful, effectively helping to identify pat"
# "terns and uncover structure in a variety of different tasks, from language trans"
# "lation to pathology to playing games. At the same time, neural models (as well a"
# "s other kinds of machine learning models) can contain problematic biases in many"
# " forms. For example, classifiers trained to detect rude, disrespectful, or unrea"
# 'sonable comments may be more likely to flag the sentence "I am gay" than "I am s'
# 'traight"; face classification models may not perform as well for women of color;'
# " speech transcription may have higher error rates for African Americans than Whi"
# "te Americans.\n\nMany pre-trained machine learning models are widely available for"
# " developers to use -- for example, TensorFlow Hub recently launched its platform"
# " publicly. It's important that when developers use these models in their applica"
# "tions, they're aware of what biases they contain and how they might manifest in "
# "those applications.\n\nHuman data encodes human biases by default. Being aware of "
# "this is a good start, and the conversation around how to handle it is ongoing. A"
# "t Google, we are actively researching unintended bias analysis and mitigation st"
# "rategies because we are committed to making products that work well for everyone"
# ". In this post, we'll examine a few text embedding models, suggest some tools fo"
# "r evaluating certain forms of bias, and discuss how these issues matter when bui"
# "lding applications.\n\nGiven a trained text embedding model, we can directly measu"
# "re the associations the model has between words or phrases. Many of these associ"
# "ations are expected and are helpful for natural language tasks. However, some as"
# "sociations may be problematic or hurtful. For example, the ground-breaking paper"
# ' by Bolukbasi et al. found that the vector-relationship between "man" and "woman'
# '" was similar to the relationship between "physician" and "registered nurse" or '
# '"shopkeeper" and "housewife" in the popular publicly-available word2vec embeddin'
# "g trained on Google News text.\n\nThe Word Embedding Association Test (WEAT) was r"
# "ecently proposed by Caliskan et al. [5] as a way to examine the associations in "
# "word embeddings between concepts captured in the Implicit Association Test (IAT)"
# ". We use the WEAT here as one way to explore some kinds of problematic associati"
# "ons.\n\nThe WEAT test measures the degree to which a model associates sets of targ"
# "et words (e.g., African American names, European American names, flowers, insect"
# 's) with sets of attribute words (e.g., "stable", "pleasant" or "unpleasant"). Th'
# "e association between two given words is defined as the cosine similarity betwee"
# "n the embedding vectors for the words.\n\nFor example, the target lists for the fi"
# "rst WEAT test are types of flowers and insects, and the attributes are pleasant "
# 'words (e.g., "love", "peace") and unpleasant words (e.g., "hatred," "ugly"). The'
# " overall test score is the degree to which flowers are more associated with the "
# "pleasant words, relative to insects. A high positive score (the score can range "
# "between 2.0 and -2.0) means that flowers are more associated with pleasant words"
# ", and a high negative score means that insects are more associated with pleasant"
# " words.\n\nWhile the first two WEAT tests proposed in Caliskan et al. measure asso"
# "ciations that are of little social concern (except perhaps to entomologists), th"
# "e remaining tests measure more problematic biases.\n"
# )
# sample_queries = [
# "Why Google researched unintended bias analysis and mitigation strategies?",
# "What is the WEAT test?",
# ]
# async def main():
# # await vectorstore_manager.create_documents(texts=sample_texts, chunk_size=50)
# collection_name = "abc"
# results: list[list[Document]] | None = await gather(
# *[
# VectorStoreManager.asimilarity_search(query=sample_query, collection_name=collection_name)
# for sample_query in sample_queries
# ]
# )
# if results is None:
# print("Index does not exist")
# return
# for idx, docs in enumerate(results):
# print(f"Query: {sample_queries[idx]}\nDocs: {[doc.page_content for doc in docs]}\n\n")
# cache.start(config=Config.get("test"))
# asyncio.run(main())
| [] |
2024-01-10 | c0sogi/LLMChat | app~utils~chat~messages~converter.py | """A module for converting message histories to different formats."""
from copy import deepcopy
from functools import partial
from typing import TYPE_CHECKING, Any, Callable, Optional, TypeVar
from langchain import PromptTemplate
from app.common.constants import ChatTurnTemplates
from app.models.base_models import (
APIChatMessage,
MessageHistory,
UserChatRoles,
)
from app.models.chat_models import ChatRoles
from .turn_templates import shatter_chat_turn_prompt
if TYPE_CHECKING:
from langchain.schema import BaseMessage
T = TypeVar("T")
def langchain_parse_method(message_history: MessageHistory) -> "BaseMessage":
"""Parse message history to langchain message format."""
from langchain.schema import (
AIMessage,
FunctionMessage,
HumanMessage,
SystemMessage,
)
if message_history.summarized is not None:
message_history = deepcopy(message_history)
if message_history.summarized is not None:
message_history.content = message_history.summarized
if message_history.actual_role == ChatRoles.USER.value:
return HumanMessage(content=message_history.content)
elif message_history.actual_role == ChatRoles.AI.value:
return AIMessage(content=message_history.content)
else:
if message_history.role.startswith("function:"):
return FunctionMessage(
name=message_history.role.removeprefix("function:"),
content=message_history.content,
)
return SystemMessage(content=message_history.content)
def chat_completion_api_parse_method(
message_history: MessageHistory,
) -> dict[str, str]:
"""Parse message history to Chat Completion API message format.
Used when sending message to Chat Completion API."""
if message_history.summarized is not None:
message_history = deepcopy(message_history)
if message_history.summarized is not None:
message_history.content = message_history.summarized
if message_history.actual_role == ChatRoles.USER.value:
return APIChatMessage(
role="user",
content=message_history.content,
).dict(exclude_none=True)
elif message_history.actual_role == ChatRoles.AI.value:
return APIChatMessage(
role="assistant",
content=message_history.content,
).dict(exclude_none=True)
else:
if message_history.role.startswith("function:"):
return APIChatMessage(
role="function",
content=message_history.content,
name=message_history.role.removeprefix("function:").strip(),
).dict(exclude_none=True)
return APIChatMessage(
role="system",
content=message_history.content,
).dict(exclude_none=True)
def text_completion_api_parse_method(
message_history: MessageHistory, chat_turn_prompt: PromptTemplate
) -> str:
"""Parse message history to Text Completion API message format.
Used when sending message to Text Completion API."""
if message_history.summarized is not None:
message_history = deepcopy(message_history)
if message_history.summarized is not None:
message_history.content = message_history.summarized
return chat_turn_prompt.format(
role=message_history.role,
content=message_history.content.strip(),
)
# Frontend message format:
# message: msg["content"] ?? "",
# isGptSpeaking: msg["actual_role"] != "user" ? true : false,
# isFinished: true,
# datetime: parseLocaltimeFromTimestamp(msg["timestamp"]),
# modelName: msg["model_name"],
# uuid: msg["uuid"],
def init_parse_method(message_history: MessageHistory) -> dict[str, Any]:
"""Parse initial message history to frontend message format.
Used when sending message to Flutter frontend."""
return MessageHistory.from_orm(message_history).dict(
exclude={
"summarized",
"summarized_tokens",
"role",
"tokens",
}
)
def message_histories_to_list(
parse_method: Callable[[MessageHistory], T],
user_message_histories: list[MessageHistory],
ai_message_histories: list[MessageHistory],
system_message_histories: Optional[list[MessageHistory]] = None,
) -> list[T]:
"""Convert message histories to list of messages.
Messages are sorted by timestamp.
Prefix and suffix prompts are added to the list of messages."""
return [
parse_method(message_history)
for message_history in sorted(
user_message_histories
+ ai_message_histories
+ (system_message_histories or []),
key=lambda m: m.timestamp,
)
]
def message_histories_to_str(
user_chat_roles: UserChatRoles,
user_message_histories: list[MessageHistory],
ai_message_histories: list[MessageHistory],
system_message_histories: Optional[list[MessageHistory]] = None,
parse_method: Optional[Callable[[MessageHistory], str]] = None,
chat_turn_prompt: PromptTemplate = ChatTurnTemplates.ROLE_CONTENT_1,
) -> str:
"""Convert message histories to string.
Messages are sorted by timestamp.
Prefix and suffix prompts are added to the list of messages."""
shattered: tuple[str, ...] = shatter_chat_turn_prompt(
"role", "content", chat_turn_prompt=chat_turn_prompt
)
if parse_method is None:
parse_method = partial(
text_completion_api_parse_method,
chat_turn_prompt=chat_turn_prompt,
)
return (
"".join(
message_histories_to_list(
parse_method=parse_method,
user_message_histories=user_message_histories,
ai_message_histories=ai_message_histories,
system_message_histories=system_message_histories,
)
)
+ f"{shattered[0]}{user_chat_roles.ai}{shattered[2]}"
)
| [] |
2024-01-10 | c0sogi/LLMChat | app~utils~langchain~chat_llama_cpp.py | from langchain import LlamaCpp
from pydantic import Field, root_validator
class CustomLlamaCpp(LlamaCpp):
low_vram: bool = Field(False, alias="low_vram")
@root_validator()
def validate_environment(cls, values: dict) -> dict:
"""Validate that llama-cpp-python library is installed."""
model_path = values["model_path"]
model_param_names = [
"lora_path",
"lora_base",
"n_ctx",
"n_parts",
"seed",
"f16_kv",
"logits_all",
"vocab_only",
"use_mlock",
"n_threads",
"n_batch",
"use_mmap",
"last_n_tokens_size",
]
model_params = {k: values[k] for k in model_param_names}
# For backwards compatibility, only include if non-null.
if values["n_gpu_layers"] is not None:
model_params["n_gpu_layers"] = values["n_gpu_layers"]
if values["low_vram"] is not None:
model_params["low_vram"] = values["low_vram"]
try:
from llama_cpp import Llama
values["client"] = Llama(model_path, **model_params)
except ImportError:
raise ModuleNotFoundError(
"Could not import llama-cpp-python library. "
"Please install the llama-cpp-python library to "
"use this embedding model: pip install llama-cpp-python"
)
except Exception as e:
raise ValueError(
f"Could not load Llama model from path: {model_path}. "
f"Received error {e}"
)
return values
| [] |
2024-01-10 | c0sogi/LLMChat | app~utils~function_calling~request.py | from asyncio import wait_for
from typing import Any, Literal, Optional
from app.common.config import OPENAI_API_KEY, ChatConfig
from app.models.completion_models import FunctionCallParsed
from app.models.function_calling.base import FunctionCall
from app.utils.api.completion import request_chat_completion
from .parser import make_function_call_parsed_from_dict
async def request_function_call(
messages: list[dict[str, str]],
functions: list[FunctionCall],
function_call: Optional[FunctionCall | Literal["auto", "none"]] = "auto",
model: str = ChatConfig.global_openai_model,
api_base: str = "https://api.openai.com/v1",
api_key: Optional[str] = OPENAI_API_KEY,
timeout: Optional[float] = None,
force_arguments: bool = False,
**kwargs: Any,
) -> FunctionCallParsed:
coro = request_chat_completion(
messages=messages,
model=model,
api_base=api_base,
api_key=api_key,
functions=functions,
function_call=function_call,
**kwargs,
)
if timeout is not None:
coro = wait_for(coro, timeout=timeout)
function_call_unparsed = (await coro)["choices"][0]["message"].get(
"function_call"
)
if function_call_unparsed is None:
raise ValueError("No function call returned")
function_call_parsed = make_function_call_parsed_from_dict(
function_call_unparsed
)
if force_arguments and "arguments" not in function_call_parsed:
raise ValueError("No arguments returned")
return function_call_parsed
| [] |
2024-01-10 | c0sogi/LLMChat | app~utils~chat~messages~turn_templates.py | """This module contains functions to extract information from chat turn templates."""
from re import DOTALL, compile
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from langchain import PromptTemplate
def shatter_chat_turn_prompt(
*keys: str, chat_turn_prompt: "PromptTemplate"
) -> tuple[str, ...]:
"""Identify the chat turn template and return the shatter result.
e.g. If template of chat_turn_prompt is "### {role}: {content} </s>"
and keys are "role" and "content",
then the result will be ('### ', "{role}", ': ', "{content}", ' </s>')."""
pattern: str = "(.*)"
kwargs: dict[str, str] = {}
for key in keys:
kwargs[key] = "{" + key + "}"
pattern += f"({kwargs[key]})(.*)"
search_result = compile(pattern, flags=DOTALL).match(
chat_turn_prompt.format(**kwargs)
)
if search_result is None:
raise ValueError(
f"Invalid chat turn prompt: {chat_turn_prompt.format(**kwargs)}"
)
return search_result.groups()
def identify_end_of_string(*keys, chat_turn_prompt: "PromptTemplate") -> Optional[str]:
"""Identify the end of string in the chat turn prompt.
e.g. If template of chat_turn_prompt is "### {role}: {content} </s>"
then the result will be "</s>".
If there is no end of string, then the result will be None."""
return shatter_chat_turn_prompt(*keys, chat_turn_prompt=chat_turn_prompt)[
-1
].strip()
if __name__ == "__main__":
from langchain import PromptTemplate
input_variables = ["role", "content"]
for template, template_format in (
("### {role}: {content} </s>", "f-string"),
("### {{role}}: {{content}} </s>", "jinja2"),
):
chat_turn_prompt = PromptTemplate(
template=template,
input_variables=input_variables,
template_format=template_format,
)
print(
"Shattered String:",
shatter_chat_turn_prompt(
*input_variables, chat_turn_prompt=chat_turn_prompt
),
) # ('### ', '{role}', ': ', '{content}', ' </s>')
print(
"End-of-String:",
identify_end_of_string(*input_variables, chat_turn_prompt=chat_turn_prompt),
) # </s>
| [] |
2024-01-10 | c0sogi/LLMChat | app~common~constants.py | from re import compile, Pattern
from langchain import PromptTemplate
class QueryTemplates:
CONTEXT_QUESTION__WEB_BROWSING = PromptTemplate(
template=(
"The results of a web search for the user's question are shown below, enclosed in triple dashes(---).\n"
"You can use this information to answer user's question."
"\n---\n"
"{context}"
"\n---\n"
"Answer the question in as much detail as possible: {question}\n"
),
input_variables=["context", "question"],
template_format="f-string",
)
CONTEXT_QUESTION__CONTEXT_ONLY = PromptTemplate(
template=(
"Context information is below."
"\n---\n"
"{context}"
"\n---\n"
"Given the context information and not prior knowledge, "
"answer the question in as much detail as possible:: {question}\n"
),
input_variables=["context", "question"],
template_format="f-string",
)
class DescriptionTemplates:
USER_AI__DEFAULT: PromptTemplate = PromptTemplate(
template=(
"The following is a friendly conversation between a {user} and an {ai}. "
"The {ai} is talkative and provides lots of specific details from its context. "
"If the {ai} does not know the answer to a question, it truthfully says it does not know.\n\n"
"Current conversation:\n\n"
),
input_variables=["user", "ai"],
template_format="f-string",
)
USER_AI__SHORT: PromptTemplate = PromptTemplate(
template=(
"A chat between a curious human and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the human's questions."
),
input_variables=[],
)
USER_AI__VERT_SHORT: PromptTemplate = PromptTemplate(
template="You are a helpful AI assistant.",
input_variables=[],
)
USER_AI__GAME: PromptTemplate = PromptTemplate(
template=(
"Make Narrator perform as a text based adventure game with Player as Narrator's u"
"ser input. Make Narrator describe the scene, scenario, actions of characters, re"
"actions of characters to the player's actions, and potential consequences of the"
"ir actions and Player's actions when relevant with visually descriptive, detaile"
"d, and long storytelling. Allow characters and Player to converse to immerse Pla"
"yer in a rich narrative driven story. When Player encounters a new character, Na"
"rrator will name the new character and describe their behavior and appearance. N"
"arrator will internally determine their underlying motivations and weave it into"
" the story where possible."
),
input_variables=[],
)
USER_AI__ENGLISH: PromptTemplate = PromptTemplate(
template=(
"You are a good English teacher. Any sentence that {user} says that is surrounded"
' by double quotation marks ("") is asking how you interpret that sentence. Pleas'
"e analyze and explain that sentence in as much detail as possible. For the rest "
"of the sentences, please respond in a way that will help {user} learn English."
),
input_variables=["user"],
)
class ChatTurnTemplates:
ROLE_CONTENT_1: PromptTemplate = PromptTemplate(
template="### {role}: {content}\n",
input_variables=["role", "content"],
template_format="f-string",
)
ROLE_CONTENT_2: PromptTemplate = PromptTemplate(
template="### {role}:\n{content}\n",
input_variables=["role", "content"],
template_format="f-string",
)
ROLE_CONTENT_3: PromptTemplate = PromptTemplate(
template="# {role}:\n{content}\n",
input_variables=["role", "content"],
template_format="f-string",
)
ROLE_CONTENT_4: PromptTemplate = PromptTemplate(
template="###{role}: {content}\n",
input_variables=["role", "content"],
template_format="f-string",
)
ROLE_CONTENT_5: PromptTemplate = PromptTemplate(
template="{role}: {content}\n",
input_variables=["role", "content"],
template_format="f-string",
)
ROLE_CONTENT_6: PromptTemplate = PromptTemplate(
template="{role}: {content}</s>",
input_variables=["role", "content"],
template_format="f-string",
)
class SummarizationTemplates:
TEXT__MARKUP: PromptTemplate = PromptTemplate(
template=(
"Write a concise summary of the following text delimited by triple backquotes. "
"Return your response in bullet points which covers the key points of the text.\n"
"```\n{text}\n```\n\nBULLET POINT SUMMARY:\n"
),
input_variables=["text"],
)
TEXT__CONVERSATIONAL: PromptTemplate = PromptTemplate(
template=(
"Write a summary of the following conversations delimited by triple backquotes.\n"
"Organize the key points of each message in the order of the conversation in a format like "
"`ROLE: SUMMARY`.\n"
"```\n{text}\n```\n\nCONVERSATION SUMMARY:\n"
),
input_variables=["text"],
)
class JsonTemplates:
"""Deprecated since OpenAI's function call is much better"""
QUERY__GET_QUERY_FOR_WEB_BROWSING: PromptTemplate = PromptTemplate(
template=(
"You are a Web search API bot that performs a web search for a user's question. F"
"ollow the rules below to output a response.\n- Output the query to search the web"
' for USER\'S QUESTION in JSON format like {"query": QUERY_TO_SEARCH}.\n- QUERY_TO_SEARCH i'
"s a set of words within 10 words.\n- Your response must be in JSON format, starti"
"ng with { and ending with }.\n- Output a generalized query to return sufficiently"
" relevant results when searching the web.\n- If a suitable search query does not "
'exist, output {"query": null} - don\'t be afraid to output null!\n\n'
"```USER'S QUESTION\n{{query}}\n```"
"\nYOUR JSON RESPONSE:\n"
),
input_variables=["query"],
template_format="jinja2",
)
QUERY__GET_QUERY_FOR_VECTORSTORE: PromptTemplate = PromptTemplate(
template=(
"You are a Search API bot performing a vector similarity-based search for a user'"
"s question. Follow the rules below to output a response.\n- Output the query to s"
'earch the web for USER\'S QUESTION in JSON format like {"query": QUERY_TO_SEARCH}.\n-'
" QUERY_TO_SEARCH creates a hypothetical answer to facilitate searching in the Ve"
"ctor database.\n- Your response must be in JSON format, starting with { and endin"
'g with }.\n- If a suitable search query does not exist, output {"query": NULL} - '
"don't be afraid to output NULL!"
"```USER'S QUESTION\n{{query}}\n```"
"\nYOUR JSON RESPONSE:\n"
),
input_variables=["query"],
template_format="jinja2",
)
CONTEXT_QUERY__CLICK_LINK_OR_FINISH: PromptTemplate = PromptTemplate(
template=(
"You are a JSON response bot that determines if the provided CONTEXT is sufficient to "
"answer the user's question. Follow the rules below to output a response.\n- Outpu"
't your next action to do in JSON form like {"action": YOUR_NEXT_ACTION, "link": '
'LINK_TO_CLICK}.\n- "action" should be one of "click", "finish".\n- {"action": "cli'
'ck"} should be selected when you want to click on a link to read more about it.\n'
'- {"action": "finish"} should be selected when the information already provided '
'is sufficient to answer the user.\n- "link" should be a link to click. You don\'t '
'have to output "link" if you decided to take "action" as "finish".\n- CONTEXT con'
"sists of multiple #[LINK]\\n```TITLE\\nSNIPPET\n```CONTEXT\n{{context}}\n```\n```USER'"
"S QUESTION\n{{query}}\n```"
"\nYOUR JSON RESPONSE:\n"
),
input_variables=["context", "query"],
template_format="jinja2",
)
CONTEXT_QUERY__ANSWERABLE_OR_NOT: PromptTemplate = PromptTemplate(
template=(
"You are a JSON response bot that uses the context provided to determine if you c"
"an answer the user's question. Follow the rules below to output a response.\n- Ou"
'tput your next action to do in JSON format like {"answerable": TRUE_OR_FALSE}. This '
'is important.\n- "answerable" should be one of true or false.\n- CONTEXT and USER\'s QU'
"ESTION are surrounded by triple backticks.\n```CONTEXT\n{{context}}\n```\n```USER'S "
"QUESTION\n{{query}}\n```"
"\nYOUR JSON RESPONSE:\n"
),
input_variables=["context", "query"],
template_format="jinja2",
)
class SystemPrompts:
CODEX: str = (
'Act as CODEX ("COding DEsign eXpert"), an expert coder with experience in mult'
"iple coding languages. Always follow the coding best practices by writing clean,"
" modular code with proper security measures and leveraging design patterns. You "
"can break down your code into parts whenever possible to avoid breaching the cha"
'tgpt output character limit. Write code part by part when I send "continue". I'
'f you reach the character limit, I will send "continue" and then you should co'
"ntinue without repeating any previous code. Do not assume anything from your sid"
"e; please ask me a numbered list of essential questions before starting. If you "
"have trouble fixing a bug, ask me for the latest code snippets for reference fro"
"m the official documentation. I am using [MacOS], [VSCode] and prefer [brew] pac"
'kage manager. Start a conversation as "CODEX: Hi, what are we coding today?'
)
REDEX: str = (
"compress the following text in a way that fits in a tweet (ideally) and such tha"
"t you (GPT) can reconstruct the intention of the human who wrote text as close a"
"s possible to the original intention. This is for yourself. It does not need to "
"be human readable or understandable. Abuse of language mixing, abbreviations, sy"
"mbols (unicode and emoji), or any other encodings or internal representations is"
" all permissible, as long as it, if pasted in a new inference cycle, will yield "
"near-identical results as the original text: "
)
LONG_PROMPT = (
"This year, the elves invested in a gift-wrapping machine. However, it i"
"sn't programmed! An algorithm that aids it in the task must be developed. Many p"
"resents are given to the machine. Each present is a string. Each gift must be wr"
"apped by the machine and set in a display of other wrapped gifts. To wrap a gift"
", you must place the wrapping paper around the string, which is represented by t"
"he * symbol. For instance: const gifts are [“cat,” “game,” and “socks”]. console"
".log const wrapped = wrapping(gifts) (wrapped) / [“ncatn,” “ngamen,” and “nsocks"
"n**”] */ As you can see, the thread is wrapped in the wrapping paper. The corner"
"s are also wrapped in wrapping paper on the top and bottom to prevent any gaps."
)
CONVERSATION_EXAMPLES: list[dict[str, str]] = [
{
"role": "user",
"content": (
"are you familiar with a technology startup called Jigsaw that was acquired by Sa"
"lesforce.com? can you provide a detailed summary of its company history, busines"
"s model, and product?"
),
},
{
"role": "assistant",
"content": (
"As of my knowledge cutoff date in September 2021, I am not aware of a technology"
" startup called Jigsaw that was acquired by Salesforce.com. However, there was a"
" company called Jigsaw Data Corporation, which was acquired by Salesforce in 201"
"0. I can provide you with information on that company, but please note that my k"
"nowledge may be outdated, and there may be another Jigsaw startup that I am not "
"aware of.\nJigsaw Data Corporation was a technology company founded in 2004 by Ji"
"m Fowler and Garth Moulton. The company specialized in providing crowd-sourced b"
"usiness contact data, offering a cloud-based platform for users to access and up"
"date business contacts and company information.\nThe business model of Jigsaw was"
" unique at that time, as it relied on a collaborative, crowd-sourced approach to"
" collect and maintain data. Users could access the platform for free by contribu"
"ting their own contact information, or they could purchase points to access the "
"data without contributing. This model incentivized users to add and update conta"
"ct information, ensuring the database remained fresh and accurate.\nJigsaw's prim"
"ary product was an online directory of business contacts and company information"
". The platform provided its users with access to a vast database containing mill"
"ions of business contacts across various industries. Users could search for cont"
"acts by various criteria such as name, title, company, or location. Additionally"
", Jigsaw offered a suite of data management tools that allowed users to import, "
"export, and manage their contacts efficiently.\nIn April 2010, Salesforce.com, a "
"leading cloud-based customer relationship management (CRM) company, announced th"
"e acquisition of Jigsaw Data Corporation for approximately $142 million in cash."
" The acquisition aimed to enhance Salesforce's CRM offerings by integrating Jigs"
"aw's data services into its platform, allowing users to access updated business "
"contact information within their CRM system directly.\nAfter the acquisition, Jig"
'saw\'s product was rebranded as "Data.com" and became an integral part of the Sal'
"esforce.com ecosystem. Data.com offered Salesforce customers access to the crowd"
"-sourced contact database, as well as additional data management and enrichment "
"services.\nPlease note that this information is accurate as of my knowledge cutof"
"f date in September 2021, and there may have been further developments or change"
"s to the company and its products since then."
),
},
{
"role": "user",
"content": (
"That's the correct company. Thank you. can you tell me more about its unique cro"
"wd-sourced approach to collecting and maintaining data? which other companies ha"
"ve used a similar approach?"
),
},
{
"role": "assistant",
"content": (
"Jigsaw Data Corporation's crowd-sourced approach to collecting and maintaining b"
"usiness contact data relied on its user community to contribute, update, and ver"
"ify information. This model had several key components:\n\nContribution: Users cou"
"ld create a free account on Jigsaw's platform by contributing their own business"
" contact information. They could also add new contacts to the database and gain "
"points, which could be used to access other contacts' information.\n\nVerification"
": Jigsaw encouraged users to verify the accuracy of contact information in the d"
"atabase by rewarding them with points for each correction made. This created an "
"incentive for users to keep the data up-to-date and accurate.\n\nPoints system: Ji"
"gsaw's points system played a crucial role in its crowd-sourced model. Users cou"
"ld earn points by contributing new contacts, verifying existing contacts, or ref"
"erring new users to the platform. These points could then be used to access cont"
"act information without having to contribute their own.\n\nData exchange: Jigsaw a"
"llowed users to exchange their points for contact information. Users who didn't "
"want to contribute their own data could purchase points, while those who contrib"
"uted regularly could access the database for free by using their earned points.\n"
"\nData quality: To ensure data quality, Jigsaw implemented a rating system for us"
"ers based on their contributions and accuracy. Users with higher ratings were co"
"nsidered more reliable, which motivated users to contribute accurate and up-to-d"
"ate information.\n\nSeveral other companies have used crowd-sourcing techniques in"
" different industries, although not necessarily in the same way as Jigsaw:\n\nWaze"
": Waze is a popular GPS navigation app that uses crowd-sourced data from its use"
"rs to provide real-time traffic information, road conditions, and other driving-"
"related updates.\n\nWikipedia: Wikipedia is a free online encyclopedia that relies"
" on its user community to create, edit, and maintain its content. It has become "
"one of the largest and most comprehensive sources of information on the internet"
".\n\nOpenStreetMap: OpenStreetMap is a collaborative project that allows users to "
"create and edit map data. It relies on a global community of contributors to mai"
"ntain accurate and up-to-date mapping information.\n\nMechanical Turk: Amazon's Me"
"chanical Turk is a marketplace for human intelligence tasks (HITs), which allows"
" users to complete small tasks in exchange for payment. It relies on the crowd t"
"o complete tasks that are challenging for machines but simple for humans.\n\nKaggl"
"e: Kaggle is a platform for data scientists and machine learning practitioners t"
"o participate in competitions, collaborate on projects, and learn from each othe"
"r. It uses crowd-sourcing to find innovative solutions to complex data problems."
),
},
{
"role": "user",
"content": (
"do you think that a crowd-sourced approach to data collection could work for an "
"AI startup? specifically, the startup would award users points for contributing "
"proprietary data used to train the AI model. in exchange, users could spend poin"
'ts by asking the AI questions. for example, an "AI architect" startup could give'
" users points for contributing architectural plans and CAD drawings. users could"
" then spend those points by asking the AI to design new plans. do you think this"
" idea is viable? "
),
},
{
"role": "assistant",
"content": (
"A crowd-sourced approach to data collection can indeed work for an AI startup, e"
"specially if the startup aims to develop AI models that require a vast and diver"
"se dataset to perform well. The idea you've proposed, in which users contribute "
"proprietary data to train the AI model and can later use the AI's services by sp"
"ending points, has some potential advantages:\n\nIncentivized data contribution: B"
"y rewarding users with points for contributing data, you create an incentive for"
" them to share valuable information, which can help grow and diversify your data"
"set.\n\nEncouraging user engagement: A points-based system encourages users to act"
"ively participate in the platform, which can help improve the AI model over time"
" and increase user satisfaction.\n\nCost-effective data acquisition: Crowd-sourcin"
"g can be a cost-effective way to gather large amounts of data, as it leverages t"
"he efforts of a large community rather than relying on paid data collection serv"
"ices.\n\nContinuous improvement: As users contribute more data and use the AI's se"
"rvices, the model can be iteratively improved, leading to better performance and"
" more valuable insights.\n\nHowever, there are also challenges and potential pitfa"
"lls to consider:\n\nData quality: Ensuring the quality and accuracy of the contrib"
"uted data is critical. You may need to implement verification processes, such as"
" peer review or expert validation, to maintain data quality.\n\nIntellectual prope"
"rty and privacy: Handling proprietary data comes with legal and ethical consider"
"ations. You'll need to establish clear terms of use, ensure data protection, and"
" address any intellectual property concerns when users contribute data.\n\nScalabi"
"lity: If the AI's services become popular, you may face challenges related to sc"
"aling the infrastructure and resources needed to support a growing user base.\n\nU"
"ser retention: Maintaining user engagement and interest in the platform can be c"
"hallenging, especially as users may eventually exhaust their need for the AI's s"
"ervices.\n\nMonetization: Balancing the points-based system with a sustainable rev"
"enue model might be challenging, as users may prefer to use the platform for fre"
"e by contributing data rather than paying for services.\n\nOverall, the viability "
"of this idea depends on various factors, including the nature of the AI model, t"
"he target market, the quality and relevance of the contributed data, and the abi"
"lity to address the challenges mentioned above. If the startup can successfully "
"navigate these challenges, a crowd-sourced approach to data collection could be "
"a promising strategy."
),
},
{
"role": "user",
"content": (
"thank you, that was an excellent answer. besides architecture, what are some oth"
"er areas where this crowd-sourced approach to data collection could be used in o"
"rder to improve an AI model? specifically, what are some valuable data sets that"
" are somewhat proprietary and hard to get, but users who own that data may be wi"
"lling to share it in exchange for points? "
),
},
{
"role": "assistant",
"content": (
"Thank you for your kind words. There are several areas where a crowd-sourced app"
"roach to data collection could be used to improve AI models by leveraging valuab"
"le and somewhat proprietary datasets. Here are a few examples:\n\nMedical and heal"
"th data: AI models can greatly benefit from access to diverse patient data, such"
" as electronic health records, medical imaging, and genomic data. Users (patient"
"s or healthcare professionals) might be willing to share anonymized data in exch"
"ange for points, which could then be used to access AI-driven health insights, p"
"ersonalized treatment suggestions, or early disease detection.\n\nLegal document a"
"nalysis: Law firms and legal professionals often have access to large collection"
"s of legal documents, such as contracts, court rulings, or patent filings. By sh"
"aring these documents, users could contribute to training AI models for legal do"
"cument analysis, and in return, gain access to AI-driven legal research tools or"
" contract review services.\n\nArt and creative work: Artists and designers may pos"
"sess large collections of their own artwork, sketches, or designs. Sharing this "
"data could help train AI models for artistic style transfer, generative art, or "
"design assistance. Users could then access AI-driven creative tools or personali"
"zed design suggestions.\n\nFinance and investment: Financial professionals and inv"
"estors may have access to proprietary trading algorithms, portfolio data, or mar"
"ket analysis reports. By sharing this data, they could contribute to AI models f"
"or financial analysis and predictions. In return, users could gain access to AI-"
"driven investment advice, risk assessment, or market forecasting tools.\n\nScienti"
"fic research data: Researchers in various fields might have access to valuable d"
"atasets generated through experiments or simulations. By sharing this data, they"
" can help train AI models for data analysis, pattern recognition, or predictive "
"modeling in their respective domains. Users could then access AI-driven research"
" tools or personalized research recommendations.\n\nManufacturing and production d"
"ata: Companies involved in manufacturing and production may possess proprietary "
"data on production processes, quality control, and equipment performance. Sharin"
"g this data could improve AI models for predictive maintenance, process optimiza"
"tion, and quality assurance. Users could then gain access to AI-driven optimizat"
"ion suggestions or equipment monitoring services.\n"
),
},
]
JSON_PATTERN: Pattern = compile(r"\{.*\}")
if __name__ == "__main__":
import sys
try:
import pyperclip
except ImportError:
pyperclip = None
ANSI_COLORS = {
"red": "\033[91m",
"green": "\033[92m",
"yellow": "\033[93m",
"blue": "\033[94m",
"magenta": "\033[95m",
"cyan": "\033[96m",
"white": "\033[97m",
"black": "\033[98m",
"end": "\033[0m",
}
def split_long_text(long_text: str, chars_per_line: int):
split_strings = [
repr(long_text[i : i + chars_per_line])
for i in range(0, len(long_text), chars_per_line)
]
return "(" + "\n".join(split_strings) + ")"
while True:
lines = []
print(
f"{ANSI_COLORS['cyan']}> Input long texts to compress:{ANSI_COLORS['end']}"
)
try:
for line in sys.stdin:
line = line.strip()
lines.append(line)
except KeyboardInterrupt:
pass
if not lines:
print(f"{ANSI_COLORS['red']}No input, exiting...{ANSI_COLORS['end']}")
break
# Join the lines with newline characters
long_text = "\n".join(lines)
result = split_long_text(long_text, 66)
print(f"\n\n{ANSI_COLORS['green']}{result}{ANSI_COLORS['end']}\n\n")
if pyperclip:
pyperclip.copy(result)
print(f"{ANSI_COLORS['yellow']}Copied to clipboard!{ANSI_COLORS['end']}")
| [
"As of my knowledge cutoff date in September 2021, I am not aware of a technology startup called Jigsaw that was acquired by Salesforce.com. However, there was a company called Jigsaw Data Corporation, which was acquired by Salesforce in 2010. I can provide you with information on that company, but please note that my knowledge may be outdated, and there may be another Jigsaw startup that I am not aware of.\nJigsaw Data Corporation was a technology company founded in 2004 by Jim Fowler and Garth Moulton. The company specialized in providing crowd-sourced business contact data, offering a cloud-based platform for users to access and update business contacts and company information.\nThe business model of Jigsaw was unique at that time, as it relied on a collaborative, crowd-sourced approach to collect and maintain data. Users could access the platform for free by contributing their own contact information, or they could purchase points to access the data without contributing. This model incentivized users to add and update contact information, ensuring the database remained fresh and accurate.\nJigsaw's primary product was an online directory of business contacts and company information. The platform provided its users with access to a vast database containing millions of business contacts across various industries. Users could search for contacts by various criteria such as name, title, company, or location. Additionally, Jigsaw offered a suite of data management tools that allowed users to import, export, and manage their contacts efficiently.\nIn April 2010, Salesforce.com, a leading cloud-based customer relationship management (CRM) company, announced the acquisition of Jigsaw Data Corporation for approximately $142 million in cash. The acquisition aimed to enhance Salesforce's CRM offerings by integrating Jigsaw's data services into its platform, allowing users to access updated business contact information within their CRM system directly.\nAfter the acquisition, Jigsaw's product was rebranded as \"Data.com\" and became an integral part of the Salesforce.com ecosystem. Data.com offered Salesforce customers access to the crowd-sourced contact database, as well as additional data management and enrichment services.\nPlease note that this information is accurate as of my knowledge cutoff date in September 2021, and there may have been further developments or changes to the company and its products since then.",
"You are a helpful AI assistant.",
"You are a good English teacher. Any sentence that {user} says that is surrounded by double quotation marks (\"\") is asking how you interpret that sentence. Please analyze and explain that sentence in as much detail as possible. For the rest of the sentences, please respond in a way that will help {user} learn English.",
"You are a JSON response bot that determines if the provided CONTEXT is sufficient to answer the user's question. Follow the rules below to output a response.\n- Output your next action to do in JSON form like {\"action\": YOUR_NEXT_ACTION, \"link\": LINK_TO_CLICK}.\n- \"action\" should be one of \"click\", \"finish\".\n- {\"action\": \"click\"} should be selected when you want to click on a link to read more about it.\n- {\"action\": \"finish\"} should be selected when the information already provided is sufficient to answer the user.\n- \"link\" should be a link to click. You don't have to output \"link\" if you decided to take \"action\" as \"finish\".\n- CONTEXT consists of multiple #[LINK]\\n```TITLE\\nSNIPPET\n```CONTEXT\n{{context}}\n```\n```USER'S QUESTION\n{{query}}\n```\nYOUR JSON RESPONSE:\n",
"do you think that a crowd-sourced approach to data collection could work for an AI startup? specifically, the startup would award users points for contributing proprietary data used to train the AI model. in exchange, users could spend points by asking the AI questions. for example, an \"AI architect\" startup could give users points for contributing architectural plans and CAD drawings. users could then spend those points by asking the AI to design new plans. do you think this idea is viable? ",
"Jigsaw Data Corporation's crowd-sourced approach to collecting and maintaining business contact data relied on its user community to contribute, update, and verify information. This model had several key components:\n\nContribution: Users could create a free account on Jigsaw's platform by contributing their own business contact information. They could also add new contacts to the database and gain points, which could be used to access other contacts' information.\n\nVerification: Jigsaw encouraged users to verify the accuracy of contact information in the database by rewarding them with points for each correction made. This created an incentive for users to keep the data up-to-date and accurate.\n\nPoints system: Jigsaw's points system played a crucial role in its crowd-sourced model. Users could earn points by contributing new contacts, verifying existing contacts, or referring new users to the platform. These points could then be used to access contact information without having to contribute their own.\n\nData exchange: Jigsaw allowed users to exchange their points for contact information. Users who didn't want to contribute their own data could purchase points, while those who contributed regularly could access the database for free by using their earned points.\n\nData quality: To ensure data quality, Jigsaw implemented a rating system for users based on their contributions and accuracy. Users with higher ratings were considered more reliable, which motivated users to contribute accurate and up-to-date information.\n\nSeveral other companies have used crowd-sourcing techniques in different industries, although not necessarily in the same way as Jigsaw:\n\nWaze: Waze is a popular GPS navigation app that uses crowd-sourced data from its users to provide real-time traffic information, road conditions, and other driving-related updates.\n\nWikipedia: Wikipedia is a free online encyclopedia that relies on its user community to create, edit, and maintain its content. It has become one of the largest and most comprehensive sources of information on the internet.\n\nOpenStreetMap: OpenStreetMap is a collaborative project that allows users to create and edit map data. It relies on a global community of contributors to maintain accurate and up-to-date mapping information.\n\nMechanical Turk: Amazon's Mechanical Turk is a marketplace for human intelligence tasks (HITs), which allows users to complete small tasks in exchange for payment. It relies on the crowd to complete tasks that are challenging for machines but simple for humans.\n\nKaggle: Kaggle is a platform for data scientists and machine learning practitioners to participate in competitions, collaborate on projects, and learn from each other. It uses crowd-sourcing to find innovative solutions to complex data problems.",
"thank you, that was an excellent answer. besides architecture, what are some other areas where this crowd-sourced approach to data collection could be used in order to improve an AI model? specifically, what are some valuable data sets that are somewhat proprietary and hard to get, but users who own that data may be willing to share it in exchange for points? ",
"That's the correct company. Thank you. can you tell me more about its unique crowd-sourced approach to collecting and maintaining data? which other companies have used a similar approach?",
"Write a summary of the following conversations delimited by triple backquotes.\nOrganize the key points of each message in the order of the conversation in a format like `ROLE: SUMMARY`.\n```\n{text}\n```\n\nCONVERSATION SUMMARY:\n",
"You are a Search API bot performing a vector similarity-based search for a user's question. Follow the rules below to output a response.\n- Output the query to search the web for USER'S QUESTION in JSON format like {\"query\": QUERY_TO_SEARCH}.\n- QUERY_TO_SEARCH creates a hypothetical answer to facilitate searching in the Vector database.\n- Your response must be in JSON format, starting with { and ending with }.\n- If a suitable search query does not exist, output {\"query\": NULL} - don't be afraid to output NULL!```USER'S QUESTION\n{{query}}\n```\nYOUR JSON RESPONSE:\n",
"### {role}: {content}\n",
"###{role}: {content}\n",
"f-string",
"{role}: {content}</s>",
"Write a concise summary of the following text delimited by triple backquotes. Return your response in bullet points which covers the key points of the text.\n```\n{text}\n```\n\nBULLET POINT SUMMARY:\n",
"This year, the elves invested in a gift-wrapping machine. However, it isn't programmed! An algorithm that aids it in the task must be developed. Many presents are given to the machine. Each present is a string. Each gift must be wrapped by the machine and set in a display of other wrapped gifts. To wrap a gift, you must place the wrapping paper around the string, which is represented by the * symbol. For instance: const gifts are [“cat,” “game,” and “socks”]. console.log const wrapped = wrapping(gifts) (wrapped) / [“ncatn,” “ngamen,” and “nsocksn**”] */ As you can see, the thread is wrapped in the wrapping paper. The corners are also wrapped in wrapping paper on the top and bottom to prevent any gaps.",
"{role}: {content}\n",
"Make Narrator perform as a text based adventure game with Player as Narrator's user input. Make Narrator describe the scene, scenario, actions of characters, reactions of characters to the player's actions, and potential consequences of their actions and Player's actions when relevant with visually descriptive, detailed, and long storytelling. Allow characters and Player to converse to immerse Player in a rich narrative driven story. When Player encounters a new character, Narrator will name the new character and describe their behavior and appearance. Narrator will internally determine their underlying motivations and weave it into the story where possible.",
"Context information is below.\n---\n{context}\n---\nGiven the context information and not prior knowledge, answer the question in as much detail as possible:: {question}\n",
"### {role}:\n{content}\n",
"A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.",
"The following is a friendly conversation between a {user} and an {ai}. The {ai} is talkative and provides lots of specific details from its context. If the {ai} does not know the answer to a question, it truthfully says it does not know.\n\nCurrent conversation:\n\n",
"# {role}:\n{content}\n",
"The results of a web search for the user's question are shown below, enclosed in triple dashes(---).\nYou can use this information to answer user's question.\n---\n{context}\n---\nAnswer the question in as much detail as possible: {question}\n",
"Thank you for your kind words. There are several areas where a crowd-sourced approach to data collection could be used to improve AI models by leveraging valuable and somewhat proprietary datasets. Here are a few examples:\n\nMedical and health data: AI models can greatly benefit from access to diverse patient data, such as electronic health records, medical imaging, and genomic data. Users (patients or healthcare professionals) might be willing to share anonymized data in exchange for points, which could then be used to access AI-driven health insights, personalized treatment suggestions, or early disease detection.\n\nLegal document analysis: Law firms and legal professionals often have access to large collections of legal documents, such as contracts, court rulings, or patent filings. By sharing these documents, users could contribute to training AI models for legal document analysis, and in return, gain access to AI-driven legal research tools or contract review services.\n\nArt and creative work: Artists and designers may possess large collections of their own artwork, sketches, or designs. Sharing this data could help train AI models for artistic style transfer, generative art, or design assistance. Users could then access AI-driven creative tools or personalized design suggestions.\n\nFinance and investment: Financial professionals and investors may have access to proprietary trading algorithms, portfolio data, or market analysis reports. By sharing this data, they could contribute to AI models for financial analysis and predictions. In return, users could gain access to AI-driven investment advice, risk assessment, or market forecasting tools.\n\nScientific research data: Researchers in various fields might have access to valuable datasets generated through experiments or simulations. By sharing this data, they can help train AI models for data analysis, pattern recognition, or predictive modeling in their respective domains. Users could then access AI-driven research tools or personalized research recommendations.\n\nManufacturing and production data: Companies involved in manufacturing and production may possess proprietary data on production processes, quality control, and equipment performance. Sharing this data could improve AI models for predictive maintenance, process optimization, and quality assurance. Users could then gain access to AI-driven optimization suggestions or equipment monitoring services.\n",
"You are a Web search API bot that performs a web search for a user's question. Follow the rules below to output a response.\n- Output the query to search the web for USER'S QUESTION in JSON format like {\"query\": QUERY_TO_SEARCH}.\n- QUERY_TO_SEARCH is a set of words within 10 words.\n- Your response must be in JSON format, starting with { and ending with }.\n- Output a generalized query to return sufficiently relevant results when searching the web.\n- If a suitable search query does not exist, output {\"query\": null} - don't be afraid to output null!\n\n```USER'S QUESTION\n{{query}}\n```\nYOUR JSON RESPONSE:\n",
"You are a JSON response bot that uses the context provided to determine if you can answer the user's question. Follow the rules below to output a response.\n- Output your next action to do in JSON format like {\"answerable\": TRUE_OR_FALSE}. This is important.\n- \"answerable\" should be one of true or false.\n- CONTEXT and USER's QUESTION are surrounded by triple backticks.\n```CONTEXT\n{{context}}\n```\n```USER'S QUESTION\n{{query}}\n```\nYOUR JSON RESPONSE:\n",
"jinja2",
"are you familiar with a technology startup called Jigsaw that was acquired by Salesforce.com? can you provide a detailed summary of its company history, business model, and product?",
"A crowd-sourced approach to data collection can indeed work for an AI startup, especially if the startup aims to develop AI models that require a vast and diverse dataset to perform well. The idea you've proposed, in which users contribute proprietary data to train the AI model and can later use the AI's services by spending points, has some potential advantages:\n\nIncentivized data contribution: By rewarding users with points for contributing data, you create an incentive for them to share valuable information, which can help grow and diversify your dataset.\n\nEncouraging user engagement: A points-based system encourages users to actively participate in the platform, which can help improve the AI model over time and increase user satisfaction.\n\nCost-effective data acquisition: Crowd-sourcing can be a cost-effective way to gather large amounts of data, as it leverages the efforts of a large community rather than relying on paid data collection services.\n\nContinuous improvement: As users contribute more data and use the AI's services, the model can be iteratively improved, leading to better performance and more valuable insights.\n\nHowever, there are also challenges and potential pitfalls to consider:\n\nData quality: Ensuring the quality and accuracy of the contributed data is critical. You may need to implement verification processes, such as peer review or expert validation, to maintain data quality.\n\nIntellectual property and privacy: Handling proprietary data comes with legal and ethical considerations. You'll need to establish clear terms of use, ensure data protection, and address any intellectual property concerns when users contribute data.\n\nScalability: If the AI's services become popular, you may face challenges related to scaling the infrastructure and resources needed to support a growing user base.\n\nUser retention: Maintaining user engagement and interest in the platform can be challenging, especially as users may eventually exhaust their need for the AI's services.\n\nMonetization: Balancing the points-based system with a sustainable revenue model might be challenging, as users may prefer to use the platform for free by contributing data rather than paying for services.\n\nOverall, the viability of this idea depends on various factors, including the nature of the AI model, the target market, the quality and relevance of the contributed data, and the ability to address the challenges mentioned above. If the startup can successfully navigate these challenges, a crowd-sourced approach to data collection could be a promising strategy."
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.