date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | minjechoi/SOCKET | experiments~zeroshot~predict.py | import os
import sys
import argparse
import json
import math
import re
import string
import collections
from getpass import getpass
import torch
from langchain.llms import OpenAI
from transformers import (
AutoConfig,
pipeline
)
from langchain.chat_models import ChatOpenAI
from datasets import load_dataset
from sklearn.metrics import precision_recall_fscore_support
import pandas as pd
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.llms import OpenAI
from langchain.llms import HuggingFacePipeline
from transformers.pipelines.pt_utils import KeyDataset
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.evaluation.qa import QAEvalChain
from tqdm import tqdm
from sklearn.metrics import precision_recall_fscore_support
from langchain.chat_models import ChatOpenAI
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import AIMessage, HumanMessage, SystemMessage
print('PID: ',os.getpid())
parser = argparse.ArgumentParser("")
parser.add_argument("--model_type", type=str, default='huggingface')
parser.add_argument("--model_name_or_path", type=str, default='google/flan-t5-small')
parser.add_argument("--data_name_or_path", type=str, default='Blablablab/SOCKET')
parser.add_argument("--model_cache_dir", type=str, default=None)
parser.add_argument("--data_split", type=str, default='test')
parser.add_argument("--batch_size", default=32, type=int)
parser.add_argument("--max_seq_len", default=2048, type=int)
parser.add_argument("--max_new_tokens", default=100, type=int)
parser.add_argument("--tasks", type=str, default='ALL')
parser.add_argument("--result_path", default='results/')
parser.add_argument("--use_cuda", action="store_true")
parser.add_argument("--use_sockette", action="store_true")
parser.add_argument("--unit_test", action="store_true")
parser.add_argument("--debug", action="store_true")
# functions for normalizing texts
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
if not s: return []
return normalize_answer(s).split()
# functions for computing scores
def compute_exact(a_gold, a_pred):
return int(normalize_answer(a_gold) == normalize_answer(a_pred))
def compute_f1(a_gold, a_pred):
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
res = int(gold_toks == pred_toks)
return res, res, res
if num_same == 0:
return 0,0,0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1, precision, recall
def get_mean(li):
return sum(li)/len(li)
def get_all_f1(groundtruth, answer):
f1 = get_mean([compute_f1(g,a)[0] for a,g in zip(answer,groundtruth) ])
p = get_mean([compute_f1(g,a)[1] for a,g in zip(answer,groundtruth) ])
r = get_mean([compute_f1(g,a)[2] for a,g in zip(answer,groundtruth) ])
return p, r, f1
def data_iterator(data, batch_size = 64):
n_batches = math.ceil(len(data) / batch_size)
for idx in range(n_batches):
x = data[idx *batch_size:(idx+1) * batch_size]
yield x
# def get_max_seq_len(model_id):
# model_id = model_id.lower()
# if 'opt-' in model_id:
# return 2048
# elif 'bloom' in model_id:
# return 2048
# elif 'gpt' in model_id:
# return 2048
# else:
# return 2048
def truncate(sen, tokenizer, max_length=512):
en_sen = tokenizer.encode(sen)
sen = tokenizer.decode(en_sen[:max_length])
return sen
args = parser.parse_args()
print(args)
# modify transformers cache
if args.model_cache_dir:
os.environ['TRANSFORMERS_CACHE'] = args.model_cache_dir
if not os.path.exists(args.result_path):
os.makedirs(args.result_path)
# load prompts
ppt_df = pd.read_csv('socket_prompts.csv')
if args.tasks in ['CLS','REG','PAIR','SPAN']:
tasks_df = ppt_df[ppt_df['type']==args.tasks]
elif args.tasks == 'ALL':
tasks_df = ppt_df
elif args.tasks in set(ppt_df['task']):
tasks_df = ppt_df[ppt_df['task']==args.tasks]
elif ',' in args.tasks:
tasks = args.tasks.split(',')
tasks_df = pd.concat([ppt_df[ppt_df['task']==task] for task in tasks],axis=0)
else:
print('task type not accepted')
quit()
print(tasks_df)
print(tasks_df.columns)
# fetch LLM
use_cuda = args.use_cuda
model_type, model_id = args.model_type, args.model_name_or_path
max_seq_len = args.max_seq_len
if use_cuda:
device = 0
dtype = torch.float16
else:
device = torch.device('cpu')
dtype = torch.float32
if model_type == 'huggingface':
if re.search('t5-|alpaca|bart-', model_id):
pipe_type = "text2text-generation"
else:
pipe_type = "text-generation"
print(pipe_type)
if 'llama' in model_id:
from transformers import LlamaTokenizer, LlamaConfig
tokenizer = LlamaTokenizer.from_pretrained(model_id)
config = LlamaConfig.from_pretrained(model_id)
else:
from transformers import AutoTokenizer, AutoConfig
tokenizer = AutoTokenizer.from_pretrained(model_id)
config = AutoConfig.from_pretrained(model_id)
hf_pipe = pipeline(pipe_type, model=model_id, tokenizer=tokenizer, device=device, torch_dtype=dtype)
llm = hf_pipe#
elif model_type.startswith('openai'):
API_KEY = os.getenv("OPENAI_API_KEY")
if API_KEY is None:
API_KEY = getpass("Paste your OpenAI key from: https://platform.openai.com/account/api-keys\n")
assert API_KEY.startswith("sk-"), "This doesn't look like a valid OpenAI API key"
print("OpenAI API key configured")
if model_type == 'openai':
llm = OpenAI(model_name=model_id, temperature=0, openai_api_key=API_KEY)
if model_type == 'openai_chat':
llm = ChatOpenAI(model_name=model_id, temperature=0, openai_api_key=API_KEY)
else:
print("Unsupported Model: {}".format(model_type))
data_name_or_path, data_split = args.data_name_or_path, args.data_split
# set result directory
res_path = os.path.join(args.result_path, '%s_res.tsv'%args.model_name_or_path.replace('/','-'))
# create empty dataframes to save results
res_df = pd.read_csv(res_path,sep='\t') if os.path.exists(res_path) else pd.DataFrame()
prev_tasks = set(res_df['task']) if len(res_df) else set()
tasks_df = tasks_df[~tasks_df['task'].isin(prev_tasks)]
print('%d tasks remaining'%len(tasks_df))
# for each task
for i,task_info in tqdm(tasks_df.iterrows()):
task_info = dict(task_info)
task, task_type = task_info['task'],task_info['type']
print(task_info)
# load dataset for task
dataset = load_dataset(args.data_name_or_path, task, data_split)[data_split]
if task_type == 'PAIR' or task_type == 'CLS':
ppt_template = "%s\nOptions:\n%s\nPlease only answer with the options. "%(task_info['question'], '\n'.join(eval(task_info['options'])))
else:
ppt_template = "%s\n"%(task_info['question'])
# specify instructions for alpaca or llama-2 models
if re.search('alpaca|llama', model_id):
ppt_template = "Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.\
\n\n### Instruction:\nYou will be presented with a question, please answer that correctly.\
\n\n### Input:\n%s Provide the answer without explaining your reasoning. \n\n### Response:"%ppt_template
else:
ppt_template = "Question: %sAnswer:"%ppt_template
print(ppt_template)
ln_template = len(tokenizer.tokenize(ppt_template))
valid_ln = max_seq_len - ln_template - args.max_new_tokens
prompts = []
if task_type == 'PAIR':
pairs = [it.split('[SEP]') for it in dataset['text']]
for text_a, text_b in pairs:
en_text_a, en_text_b = tokenizer.encode(text_a), tokenizer.encode(text_b)
ln_a, ln_b = len(en_text_a), len(en_text_b)
if (ln_a+ln_b)> valid_ln:
if ln_a>ln_b:
en_text_a = en_text_a[:ln_a+ln_b-valid_ln]
else:
en_text_b = en_text_b[:ln_a+ln_b-valid_ln]
text_a, text_b = tokenizer.decode(en_text_a), tokenizer.decode(en_text_b)
prompts.append(ppt_template.replace("{text_a}", text_a).replace("{text_b}", text_b))
# dataset = dataset.add_column('prompt', [ppt_template.replace("{text_a}", it[0]).replace("{text_b}", it[1]) for it in pairs])
else:
for text in dataset['text']:
prompts.append(ppt_template.replace("{text}", truncate(text, tokenizer, valid_ln)))
# dataset = dataset.add_column('prompt', [ppt_template.replace("{text}", truncate(it, args.max_length)) for it in dataset['text']])
dataset = dataset.add_column('prompt', prompts)
print(dataset['prompt'][:1])
if task_type == 'PAIR' or task_type == 'CLS':
d_labels = [it.replace('-', ' ').lower() for it in dataset.info.features['label'].names]
labels = eval(task_info['options'])
label2id = {l:i for i,l in enumerate(labels)}
print(d_labels, labels, label2id)
# optional: use only up to first 1000 samples (SOCKETTE) for quicker evaluation
if args.unit_test:
dataset = dataset[:args.batch_size]
elif args.use_sockette:
dataset = dataset[:1000]
# iterate through batches to get prediction results
batch_size=int(args.batch_size)
data_iter = data_iterator(dataset['prompt'], batch_size)
outputs = []
for batch in tqdm(data_iter, total=int(len(dataset['prompt'])/batch_size)):
if pipe_type=='text-generation':
output = llm(batch, max_new_tokens = args.max_new_tokens, return_full_text=False, clean_up_tokenization_spaces=True)
elif pipe_type=='text2text-generation':
output = llm(batch, max_new_tokens = args.max_new_tokens)
elif pipe_type=='gpt':
sys.exit(0) # later
outputs.extend(output)
# process prediction results
dataset = pd.DataFrame(dataset)
dataset['task'] = task
outs = []
for it in outputs:
if pipe_type=='text-generation':
if 'llama' in model_id:
answer = ' '.join(it[0]['generated_text'].split()).strip()
else:
answer = it[0]['generated_text'].strip().split('\n')[0].strip()
elif pipe_type=='text2text-generation':
answer = ' '.join(it['generated_text'].split()).strip()
outs.append(answer)
dataset['generated_text'] = outs
res_df = pd.concat([res_df, dataset])
# save updated predictions
res_df.to_csv(res_path,index=False,sep='\t')
| [
"PLACEHOLDER\n",
"PLACEHOLDER\nOptions:\nPLACEHOLDER\nPlease only answer with the options. ",
"Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. \n\n### Instruction:\nYou will be presented with a question, please answer that correctly. \n\n### Input:\nPLACEHOLDER Provide the answer without explaining your reasoning. \n\n### Response:",
"[]",
"Question: PLACEHOLDERAnswer:"
] |
2024-01-10 | adityabhattad2021/langchain-playground | pdf-dist~app~chat~memories~sql_memory.py | from pydantic import BaseModel
from langchain.memory import ConversationBufferMemory
from langchain.schema import BaseChatMessageHistory
from app.web.api import (
get_messages_by_conversation_id,
add_message_to_conversation
)
class SqlMessageHistory(BaseChatMessageHistory,BaseModel):
conversation_id:str
@property
def messages(self):
return get_messages_by_conversation_id(self.conversation_id)
def add_message(self,message):
return add_message_to_conversation(
conversation_id=self.conversation_id,
role=message.type,
content=message.content
)
def clear(self):
pass
def build_memory(chat_args):
return ConversationBufferMemory(
chat_memory=SqlMessageHistory(
conversation_id=chat_args.conversation_id
),
return_messages=True,
memory_key="chat_history",
output_key="answer"
) | [] |
2024-01-10 | adityabhattad2021/langchain-playground | pdf-dist~app~chat~vector_stores~pipecone.py | import os
import pinecone
from langchain.vectorstores import Pinecone
from app.chat.embeddings.openai import embeddings
pinecone.init(
api_key=os.getenv("PINECONE_API_KEY"),
environment=os.getenv("PINECONE_ENV_NAME"),
)
vector_store = Pinecone.from_existing_index(
os.getenv("PINECONE_INDEX_NAME"),
embeddings
)
def build_retriever(chat_args,k):
search_kwargs = {
"filter":{"pdf_id":chat_args.pdf_id},
"k":k,
}
return vector_store.as_retriever(
search_kwargs=search_kwargs
) | [] |
2024-01-10 | adityabhattad2021/langchain-playground | agents~tools~report.py | from langchain.tools import StructuredTool
from pydantic.v1 import BaseModel
def write_report(filename,html):
with open(filename,'w') as f:
f.write(html)
class WriteReportArgsSchema(BaseModel):
filename:str
html:str
write_report_tool = StructuredTool.from_function(
name="write_report",
description="Write an HTML file on to the disk. Use this tool whenever the users asks for a report.",
func=write_report,
args_schema=WriteReportArgsSchema
)
| [] |
2024-01-10 | adityabhattad2021/langchain-playground | pdf-dist~app~chat~create_embeddings.py | from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from app.chat.vector_stores.pipecone import vector_store
def create_embeddings_for_pdf(pdf_id: str, pdf_path: str):
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=500,
chunk_overlap=100
)
print(pdf_path)
loader = PyPDFLoader(pdf_path)
docs = loader.load_and_split(text_splitter)
for doc in docs:
doc.metadata = {
"page":doc.metadata["page"],
"text":doc.page_content,
"pdf_id":pdf_id
}
vector_store.add_documents(docs)
print("Successfully created embeddings for the PDF!") | [] |
2024-01-10 | adityabhattad2021/langchain-playground | pdf-dist~app~chat~llms~chatopenai.py | from langchain.chat_models import ChatOpenAI
def build_llm(chat_args,model_name):
return ChatOpenAI(streaming=chat_args.streaming,model_name=model_name) | [] |
2024-01-10 | adityabhattad2021/langchain-playground | facts-gpt~redundant_filter_retriever.py | from typing import List
from langchain.embeddings.base import Embeddings
from langchain.vectorstores.base import VectorStore
from langchain.schema import BaseRetriever, Document
class RedundantFilterRetriever(BaseRetriever):
embeddings: Embeddings
vectorDB: VectorStore
def get_relevant_documents(self, query: str) -> List[Document]:
emb = self.embeddings.embed_query(query)
res = self.vectorDB.max_marginal_relevance_search_by_vector(
embedding=emb,
lambda_mult=0.8,
)
return res
async def aget_relevant_documents(self, query: str) -> List[Document]:
return [] | [] |
2024-01-10 | MostafaRizk/TS-Platform | src~helpers~rendering.py | """
2D rendering framework from OpenAI's Gym System
"""
from __future__ import division
import os
import six
import sys
if "Apple" in sys.version:
if 'DYLD_FALLBACK_LIBRARY_PATH' in os.environ:
os.environ['DYLD_FALLBACK_LIBRARY_PATH'] += ':/usr/lib'
# (JDS 2016/04/15): avoid bug on Anaconda 2.3.0 / Yosemite
from helpers import error
try:
import pyglet
except ImportError as e:
raise ImportError('''
Cannot import pyglet.
HINT: you can install pyglet directly via 'pip install pyglet'.
But if you really just want to install all Gym dependencies and not have to think about it,
'pip install -e .[all]' or 'pip install gym[all]' will do it.
''')
try:
from pyglet.gl import *
except ImportError as e:
raise ImportError('''
Error occurred while running `from pyglet.gl import *`
HINT: make sure you have OpenGL install. On Ubuntu, you can run 'apt-get install python-opengl'.
If you're running on a server, you may need a virtual frame buffer; something like this should work:
'xvfb-run -s \"-screen 0 1400x900x24\" python <your_script.py>'
''')
import math
import numpy as np
RAD2DEG = 57.29577951308232
def get_display(spec):
"""Convert a display specification (such as :0) into an actual Display
object.
Pyglet only supports multiple Displays on Linux.
"""
if spec is None:
return None
elif isinstance(spec, six.string_types):
return pyglet.canvas.Display(spec)
else:
raise error.Error('Invalid display specification: {}. (Must be a string like :0 or None.)'.format(spec))
class Viewer(object):
def __init__(self, width, height, display=None):
display = get_display(display)
self.width = width
self.height = height
self.window = pyglet.window.Window(width=width, height=height, display=display)
self.window.on_close = self.window_closed_by_user
self.isopen = True
self.geoms = []
self.onetime_geoms = []
self.transform = Transform()
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
def close(self):
self.window.close()
def window_closed_by_user(self):
self.isopen = False
def set_bounds(self, left, right, bottom, top):
assert right > left and top > bottom
scalex = self.width/(right-left)
scaley = self.height/(top-bottom)
self.transform = Transform(
translation=(-left*scalex, -bottom*scaley),
scale=(scalex, scaley))
def add_geom(self, geom):
self.geoms.append(geom)
def add_onetime(self, geom):
self.onetime_geoms.append(geom)
def render(self, return_rgb_array=False):
glClearColor(1,1,1,1)
self.window.clear()
self.window.switch_to()
self.window.dispatch_events()
self.transform.enable()
for geom in self.geoms:
geom.render()
for geom in self.onetime_geoms:
geom.render()
self.transform.disable()
arr = None
if return_rgb_array:
buffer = pyglet.image.get_buffer_manager().get_color_buffer()
image_data = buffer.get_image_data()
arr = np.frombuffer(image_data.get_data(), dtype=np.uint8)
# In https://github.com/openai/gym-http-api/issues/2, we
# discovered that someone using Xmonad on Arch was having
# a window of size 598 x 398, though a 600 x 400 window
# was requested. (Guess Xmonad was preserving a pixel for
# the boundary.) So we use the buffer height/width rather
# than the requested one.
arr = arr.reshape(buffer.height, buffer.width, 4)
arr = arr[::-1,:,0:3]
self.window.flip()
self.onetime_geoms = []
return arr if return_rgb_array else self.isopen
# Convenience
def draw_circle(self, radius=10, res=30, filled=True, **attrs):
geom = make_circle(radius=radius, res=res, filled=filled)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def draw_polygon(self, v, filled=True, **attrs):
geom = make_polygon(v=v, filled=filled)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def draw_polyline(self, v, **attrs):
geom = make_polyline(v=v)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def draw_line(self, start, end, **attrs):
geom = Line(start, end)
_add_attrs(geom, attrs)
self.add_onetime(geom)
return geom
def get_array(self):
self.window.flip()
image_data = pyglet.image.get_buffer_manager().get_color_buffer().get_image_data()
self.window.flip()
arr = np.fromstring(image_data.get_data(), dtype=np.uint8, sep='')
arr = arr.reshape(self.height, self.width, 4)
return arr[::-1,:,0:3]
def __del__(self):
self.close()
def _add_attrs(geom, attrs):
if "color" in attrs:
geom.set_color(*attrs["color"])
if "linewidth" in attrs:
geom.set_linewidth(attrs["linewidth"])
class Geom(object):
def __init__(self):
self._color=Color((0, 0, 0, 1.0))
self.attrs = [self._color]
def render(self):
for attr in reversed(self.attrs):
attr.enable()
self.render1()
for attr in self.attrs:
attr.disable()
def render1(self):
raise NotImplementedError
def add_attr(self, attr):
self.attrs.append(attr)
def set_color(self, r, g, b):
self._color.vec4 = (r, g, b, 1)
class Attr(object):
def enable(self):
raise NotImplementedError
def disable(self):
pass
class Transform(Attr):
def __init__(self, translation=(0.0, 0.0), rotation=0.0, scale=(1,1)):
self.set_translation(*translation)
self.set_rotation(rotation)
self.set_scale(*scale)
def enable(self):
glPushMatrix()
glTranslatef(self.translation[0], self.translation[1], 0) # translate to GL loc ppint
glRotatef(RAD2DEG * self.rotation, 0, 0, 1.0)
glScalef(self.scale[0], self.scale[1], 1)
def disable(self):
glPopMatrix()
def set_translation(self, newx, newy):
self.translation = (float(newx), float(newy))
def set_rotation(self, new):
self.rotation = float(new)
def set_scale(self, newx, newy):
self.scale = (float(newx), float(newy))
class Color(Attr):
def __init__(self, vec4):
self.vec4 = vec4
def enable(self):
glColor4f(*self.vec4)
class LineStyle(Attr):
def __init__(self, style):
self.style = style
def enable(self):
glEnable(GL_LINE_STIPPLE)
glLineStipple(1, self.style)
def disable(self):
glDisable(GL_LINE_STIPPLE)
class LineWidth(Attr):
def __init__(self, stroke):
self.stroke = stroke
def enable(self):
glLineWidth(self.stroke)
class Point(Geom):
def __init__(self):
Geom.__init__(self)
def render1(self):
glBegin(GL_POINTS) # draw point
glVertex3f(0.0, 0.0, 0.0)
glEnd()
class FilledPolygon(Geom):
def __init__(self, v):
Geom.__init__(self)
self.v = v
def render1(self):
if len(self.v) == 4 : glBegin(GL_QUADS)
elif len(self.v) > 4 : glBegin(GL_POLYGON)
else: glBegin(GL_TRIANGLES)
for p in self.v:
glVertex3f(p[0], p[1],0) # draw each vertex
glEnd()
def make_circle(radius=10, res=30, filled=True):
points = []
for i in range(res):
ang = 2*math.pi*i / res
points.append((math.cos(ang)*radius, math.sin(ang)*radius))
if filled:
return FilledPolygon(points)
else:
return PolyLine(points, True)
def make_polygon(v, filled=True):
if filled: return FilledPolygon(v)
else: return PolyLine(v, True)
def make_polyline(v):
return PolyLine(v, False)
def make_capsule(length, width):
l, r, t, b = 0, length, width/2, -width/2
box = make_polygon([(l,b), (l,t), (r,t), (r,b)])
circ0 = make_circle(width/2)
circ1 = make_circle(width/2)
circ1.add_attr(Transform(translation=(length, 0)))
geom = Compound([box, circ0, circ1])
return geom
class Compound(Geom):
def __init__(self, gs):
Geom.__init__(self)
self.gs = gs
for g in self.gs:
g.attrs = [a for a in g.attrs if not isinstance(a, Color)]
def render1(self):
for g in self.gs:
g.render()
class PolyLine(Geom):
def __init__(self, v, close):
Geom.__init__(self)
self.v = v
self.close = close
self.linewidth = LineWidth(1)
self.add_attr(self.linewidth)
def render1(self):
glBegin(GL_LINE_LOOP if self.close else GL_LINE_STRIP)
for p in self.v:
glVertex3f(p[0], p[1],0) # draw each vertex
glEnd()
def set_linewidth(self, x):
self.linewidth.stroke = x
class Line(Geom):
def __init__(self, start=(0.0, 0.0), end=(0.0, 0.0)):
Geom.__init__(self)
self.start = start
self.end = end
self.linewidth = LineWidth(1)
self.add_attr(self.linewidth)
def render1(self):
glBegin(GL_LINES)
glVertex2f(*self.start)
glVertex2f(*self.end)
glEnd()
class Image(Geom):
def __init__(self, fname, width, height):
Geom.__init__(self)
self.width = width
self.height = height
img = pyglet.image.load(fname)
self.img = img
self.flip = False
def render1(self):
self.img.blit(-self.width/2, -self.height/2, width=self.width, height=self.height)
# ================================================================
class SimpleImageViewer(object):
def __init__(self, display=None, maxwidth=500):
self.window = None
self.isopen = False
self.display = display
self.maxwidth = maxwidth
def imshow(self, arr):
if self.window is None:
height, width, _channels = arr.shape
if width > self.maxwidth:
scale = self.maxwidth / width
width = int(scale * width)
height = int(scale * height)
self.window = pyglet.window.Window(width=width, height=height,
display=self.display, vsync=False, resizable=True)
self.width = width
self.height = height
self.isopen = True
@self.window.event
def on_resize(width, height):
self.width = width
self.height = height
@self.window.event
def on_close():
self.isopen = False
assert len(arr.shape) == 3, "You passed in an image with the wrong number shape"
image = pyglet.image.ImageData(arr.shape[1], arr.shape[0],
'RGB', arr.tobytes(), pitch=arr.shape[1]*-3)
gl.glTexParameteri(gl.GL_TEXTURE_2D,
gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
texture = image.get_texture()
texture.width = self.width
texture.height = self.height
self.window.clear()
self.window.switch_to()
self.window.dispatch_events()
texture.blit(0, 0) # draw
self.window.flip()
def close(self):
if self.isopen and sys.meta_path:
# ^^^ check sys.meta_path to avoid 'ImportError: sys.meta_path is None, Python is likely shutting down'
self.window.close()
self.isopen = False
def __del__(self):
self.close()
| [] |
2024-01-10 | MostafaRizk/TS-Platform | src~helpers~error.py | """
Error Framework from OpenAI Gym
"""
import sys
class Error(Exception):
pass
# Local errors
class Unregistered(Error):
"""Raised when the user requests an item from the registry that does
not actually exist.
"""
pass
class UnregisteredEnv(Unregistered):
"""Raised when the user requests an env from the registry that does
not actually exist.
"""
pass
class UnregisteredBenchmark(Unregistered):
"""Raised when the user requests an env from the registry that does
not actually exist.
"""
pass
class DeprecatedEnv(Error):
"""Raised when the user requests an env from the registry with an
older version number than the latest env with the same name.
"""
pass
class UnseedableEnv(Error):
"""Raised when the user tries to seed an env that does not support
seeding.
"""
pass
class DependencyNotInstalled(Error):
pass
class UnsupportedMode(Exception):
"""Raised when the user requests a rendering mode not supported by the
environment.
"""
pass
class ResetNeeded(Exception):
"""When the monitor is active, raised when the user tries to step an
environment that's already done.
"""
pass
class ResetNotAllowed(Exception):
"""When the monitor is active, raised when the user tries to step an
environment that's not yet done.
"""
pass
class InvalidAction(Exception):
"""Raised when the user performs an action not contained within the
action space
"""
pass
# API errors
class APIError(Error):
def __init__(self, message=None, http_body=None, http_status=None,
json_body=None, headers=None):
super(APIError, self).__init__(message)
if http_body and hasattr(http_body, 'decode'):
try:
http_body = http_body.decode('utf-8')
except:
http_body = ('<Could not decode body as utf-8. '
'Please report to [email protected]>')
self._message = message
self.http_body = http_body
self.http_status = http_status
self.json_body = json_body
self.headers = headers or {}
self.request_id = self.headers.get('request-id', None)
def __unicode__(self):
if self.request_id is not None:
msg = self._message or "<empty message>"
return u"Request {0}: {1}".format(self.request_id, msg)
else:
return self._message
def __str__(self):
try: # Python 2
return unicode(self).encode('utf-8')
except NameError: # Python 3
return self.__unicode__()
class APIConnectionError(APIError):
pass
class InvalidRequestError(APIError):
def __init__(self, message, param, http_body=None,
http_status=None, json_body=None, headers=None):
super(InvalidRequestError, self).__init__(
message, http_body, http_status, json_body,
headers)
self.param = param
class AuthenticationError(APIError):
pass
class RateLimitError(APIError):
pass
# Video errors
class VideoRecorderError(Error):
pass
class InvalidFrame(Error):
pass
# Wrapper errors
class DoubleWrapperError(Error):
pass
class WrapAfterConfigureError(Error):
pass
class RetriesExceededError(Error):
pass
# Vectorized environments errors
class AlreadyPendingCallError(Exception):
"""
Raised when `reset`, or `step` is called asynchronously (e.g. with
`reset_async`, or `step_async` respectively), and `reset_async`, or
`step_async` (respectively) is called again (without a complete call to
`reset_wait`, or `step_wait` respectively).
"""
def __init__(self, message, name):
super(AlreadyPendingCallError, self).__init__(message)
self.name = name
class NoAsyncCallError(Exception):
"""
Raised when an asynchronous `reset`, or `step` is not running, but
`reset_wait`, or `step_wait` (respectively) is called.
"""
def __init__(self, message, name):
super(NoAsyncCallError, self).__init__(message)
self.name = name
class ClosedEnvironmentError(Exception):
"""
Trying to call `reset`, or `step`, while the environment is closed.
"""
pass
| [] |
2024-01-10 | cplusx/layout_diffuse | test_utils.py | import os
import torch
import numpy as np
from torch.utils.data import DataLoader
from data.random_sampling import RandomNoise
from model_utils import default, get_obj_from_str
from callbacks.coco_layout.sampling_save_fig import ColorMapping, plot_bbox_without_overlap, plot_bounding_box
import cv2
def get_test_dataset(args):
sampling_args = args['sampling_args']
sampling_w_noise = default(sampling_args.get('sampling_w_noise'), False)
if sampling_w_noise:
test_dataset = RandomNoise(
sampling_args['image_size'],
sampling_args['image_size'],
sampling_args['in_channel'],
sampling_args['num_samples']
)
else:
from data import get_dataset
args['data']['val_args']['data_len'] = sampling_args['num_samples']
_, test_dataset = get_dataset(**args['data'])
test_loader = DataLoader(test_dataset, batch_size=args['data']['batch_size'], num_workers=4, shuffle=False)
return test_dataset, test_loader
def get_test_callbacks(args, expt_path):
sampling_args = args['sampling_args']
callbacks = []
callbacks_obj = sampling_args.get('callbacks')
for target in callbacks_obj:
callbacks.append(
get_obj_from_str(target)(expt_path)
)
return callbacks
def postprocess_image(batched_x, batched_bbox, class_id_to_name, image_callback=lambda x: x):
x = batched_x[0]
bbox = batched_bbox[0]
x = x.permute(1, 2, 0).detach().cpu().numpy().clip(-1, 1)
x = (x + 1) / 2
x = image_callback(x)
image_with_bbox = overlap_image_with_bbox(x, bbox, class_id_to_name)
canvas_with_bbox = overlap_image_with_bbox(np.ones_like(x), bbox, class_id_to_name)
return x, image_with_bbox, canvas_with_bbox
def overlap_image_with_bbox(image, bbox, class_id_to_name):
label_color_mapper = ColorMapping(id_class_mapping=class_id_to_name)
image_with_bbox = plot_bbox_without_overlap(
image.copy(),
bbox,
label_color_mapper
) if len(bbox) <= 10 else None
if image_with_bbox is not None:
return image_with_bbox
return plot_bounding_box(
image.copy(),
bbox,
label_color_mapper
)
def generate_completion(caption, api_key, additional_caption=''):
import openai
# check if api_key is valid
def validate_api_key(api_key):
import re
regex = "^sk-[a-zA-Z0-9]{48}$" # regex pattern for OpenAI API key
if not isinstance(api_key, str):
return None
if not re.match(regex, api_key):
return None
return api_key
openai.api_key = validate_api_key(api_key)
if openai.api_key is None:
print('WARNING: invalid OpenAI API key, using default caption')
return caption
prompt = f'Describe a scene with following words: ' + caption + '. Use the above words to generate a prompt for drawing with a diffusion model. Use at least 30 words and at most 80 words and include all given words. The final image should looks nice and be related to the given words'
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{
"role": "user",
"content": prompt
}]
)
return response.choices[0].message.content.strip() + additional_caption
def concatenate_class_labels_to_caption(objects, class_id_to_name, api_key=None, additional_caption=''):
# if want to add additional description for styles, add it to additonal_caption
caption = ''
for i in objects:
caption += class_id_to_name[i[4]+1] + ', '
caption = caption.rstrip(', ')
if api_key is not None:
caption = generate_completion(caption, api_key=api_key, additional_caption=additional_caption)
print('INFO: using openai text completion and the generated caption is: \n', caption)
else:
caption = caption + additional_caption
print('INFO: using default caption: \n', caption)
return caption
def sample_one_image(bbox_path, ddpm_model, device, class_name_to_id, class_id_to_name, api_key=None, image_size=(512, 512), additional_caption=''):
# the format of text file is: x, y, w, h, class_id
with open(bbox_path, 'r') as IN:
raw_objects = [i.strip().split(',') for i in IN]
objects = []
for i in raw_objects:
i[0] = float(i[0])
i[1] = float(i[1])
i[2] = float(i[2])
i[3] = float(i[3])
class_name = i[4].strip()
if class_name in class_name_to_id:
# remove objects that are not in coco, these objects have class id but not appear in coco
i[4] = int(class_name_to_id[class_name]) - 1
objects.append(i)
if len(objects) == 0:
return None, None, None
batch = []
image_resizer = ImageResizer()
new_h, new_w = image_resizer.get_proper_size(image_size)
batch.append(torch.randn(1, 3, new_h, new_w).to(device))
batch.append(torch.from_numpy(np.array(objects)).to(device).unsqueeze(0))
batch.append((
concatenate_class_labels_to_caption(objects, class_id_to_name, api_key, additional_caption),
))
res = ddpm_model.test_step(batch, 0) # we pass a batch but only text and layout is used when sampling
sampled_images = res['sampling']['model_output']
return postprocess_image(sampled_images, batch[1], class_id_to_name, image_callback=lambda x: image_resizer.to_original_size(x))
class ImageResizer:
def __init__(self):
self.original_size = None
def to_proper_size(self, img):
# Get the new height and width that can be divided by 64
new_h, new_w = self.get_proper_size(img.shape[:2])
# Resize the image using OpenCV's resize function
resized = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_AREA)
return resized
def to_original_size(self, img):
# Resize the image to original size using OpenCV's resize function
resized = cv2.resize(img, (self.original_size[1], self.original_size[0]), interpolation=cv2.INTER_AREA)
return resized
def get_proper_size(self, size):
self.original_size = size
# Calculate the new height and width that can be divided by 64
if size[0] % 64 == 0:
new_h = size[0]
else:
new_h = size[0] + (64 - size[0] % 64)
if size[1] % 64 == 0:
new_w = size[1]
else:
new_w = size[1] + (64 - size[1] % 64)
return new_h, new_w
def parse_test_args():
import argparse
import json
parser = argparse.ArgumentParser()
parser.add_argument(
'-c', '--config', type=str,
default='config/train.json')
parser.add_argument(
'-e', '--epoch', type=int,
default=None, help='which epoch to evaluate, if None, will use the latest')
parser.add_argument(
'--openai_api_key', type=str,
default=None, help='openai api key for generating text prompt')
parser.add_argument(
'--model_path', type=str,
default=None, help='model path for generating layout diffuse, if not provided, will use the latest.ckpt')
parser.add_argument(
'--additional_caption', type=str,
default='', help='additional caption for the generated image')
''' parser configs '''
args_raw = parser.parse_args()
with open(args_raw.config, 'r') as IN:
args = json.load(IN)
args.update(vars(args_raw))
return args
def load_test_models(args):
from train_utils import get_models, get_DDPM
models = get_models(args)
diffusion_configs = args['diffusion']
ddpm_model = get_DDPM(
diffusion_configs=diffusion_configs,
log_args=args,
**models
)
return ddpm_model
def load_model_weights(ddpm_model, args):
print('INFO: loading checkpoint')
if args['model_path'] is not None:
ckpt_path = args['model_path']
else:
expt_path = os.path.join(args['expt_dir'], args['expt_name'])
if args['epoch'] is None:
ckpt_to_use = 'latest.ckpt'
else:
ckpt_to_use = f'epoch={args["epoch"]:04d}.ckpt'
ckpt_path = os.path.join(expt_path, ckpt_to_use)
print(ckpt_path)
if os.path.exists(ckpt_path):
print(f'INFO: Found checkpoint {ckpt_path}')
ckpt = torch.load(ckpt_path, map_location='cpu')['state_dict']
ddpm_model.load_state_dict(ckpt)
else:
ckpt_path = None
raise RuntimeError('Cannot do inference without pretrained checkpoint') | [
"Describe a scene with following words: PLACEHOLDER. Use the above words to generate a prompt for drawing with a diffusion model. Use at least 30 words and at most 80 words and include all given words. The final image should looks nice and be related to the given words"
] |
2024-01-10 | SepidehHosseinian/h2o-llmstudio | llm_studio~src~metrics~text_causal_language_modeling_metrics.py | import logging
from functools import partial
from typing import Any, Dict, List
import numpy as np
import openai
import pandas as pd
from joblib import Parallel, delayed
from sacrebleu import BLEU
from sacrebleu.metrics.base import Metric
from llm_studio.src.datasets.text_utils import get_texts
logger = logging.getLogger(__name__)
def sacrebleu_score(
cfg: Any, results: Dict, val_df: pd.DataFrame, metric: Metric
) -> float:
scores = []
for predicted_text, target_text in zip(
results["predicted_text"], results["target_text"]
):
scores.append(metric.sentence_score(predicted_text, [target_text]).score)
return np.mean(scores)
def rate_reply(question, reference_answer, assistant_answer, model):
# motivated by https://github.com/lm-sys/FastChat/tree/main/fastchat/eval
template = open("prompts/eval_template.txt", "r").read()
template = template.format(
question=question,
reference_answer=reference_answer,
assistant_answer=assistant_answer,
)
for _ in range(3):
try:
response = openai.ChatCompletion.create(
model=model,
messages=[
{
"role": "system",
"content": "You are a helpful and precise assistant "
"for checking the quality of the answer.",
},
{
"role": "user",
"content": template,
},
],
temperature=0.1,
max_tokens=1024,
)
ret = response["choices"][0]["message"]["content"]
ret = ret.split("\n")
score = ret[0]
score = score.lower().replace("score:", "").strip()
score = float(score)
return score, " ".join(ret[1:]).strip()
except Exception:
pass
logger.warning("error in api call")
return 0.0, ""
def gpt_score(
cfg: Any,
results: Dict,
val_df: pd.DataFrame,
model: str = "gpt-3.5-turbo",
raw_results: bool = False,
) -> float:
if "metrics" in results:
return np.mean(results["metrics"].detach().cpu().numpy())
prompts = get_texts(val_df, cfg, separator="")
ret = Parallel(n_jobs=len(prompts), backend="multiprocessing")(
delayed(rate_reply)(prompt, target_text, predicted_text, model)
for prompt, predicted_text, target_text in zip(
prompts, results["predicted_text"], results["target_text"]
)
)
scores = [x[0] for x in ret]
explanations = [x[1] for x in ret]
if raw_results:
return scores, explanations
return np.mean(scores)
class Metrics:
"""Metrics factory. Returns metric value and should it be maximized or minimized"""
_metrics = {
"BLEU": (partial(sacrebleu_score, metric=BLEU(effective_order=True)), "max"),
"GPT3.5": (partial(gpt_score, model="gpt-3.5-turbo"), "max"),
"GPT4": (partial(gpt_score, model="gpt-4"), "max"),
}
@classmethod
def names(cls) -> List[str]:
return sorted(cls._metrics.keys())
@classmethod
def get(cls, name: str) -> Any:
"""Access to Metrics.
Args:
name: metrics name
Returns:
A class to build the Metrics
"""
return cls._metrics.get(name)
@classmethod
def suitable_metrics(cls, cfg: Any, results: Dict, val_df: pd.DataFrame) -> Dict:
"""Access to All Suitable Metrics. For some problem types (e.g. classification)
there might be metrics (e.g. Micro Averaged F1) that are only suitable in
specific cases (multiclass not binary). There might also be additional
metrics returned, which are not possible to select as validation metrics,
e.g. threshold dependant metrics
Returns:
A dictionary of all suitable metrics for current problem setup
"""
return cls._metrics
@classmethod
def all_metrics(cls) -> Dict:
"""Access to All Metrics. There might also be additional
metrics returned, which are not possible to select as validation metrics,
e.g. threshold dependant metrics
Returns:
A dictionary of all metrics (including not suitable metrics).
"""
return cls._metrics
| [
"You are a helpful and precise assistant for checking the quality of the answer.",
"prompts/eval_template.txt"
] |
2024-01-10 | the-crypt-keeper/can-ai-code | compare.py | #!/usr/bin/env python3
import json
import os
from jinja2 import Template
import fire
import yaml
from copy import copy
task_prompt = "Write a {{language}} function {{Signature}} {{Input}} that returns {{Output}}"
def prepare(TEST_LANGUAGE, path, files):
out = {}
models = []
for idx, info in enumerate(files):
file = os.path.join(path, info['eval'])
id = info['id']
tags = os.path.basename(file).replace('.ndjson', '').split('_')
prompt = tags[3]
params = tags[5]
model = tags[6]
models.append({'prompt': prompt, 'short_name': info.get('short_name',id), 'params': params, 'model': model, 'id': id, 'idx': idx, 'passed': 0, 'total': 0})
results = [json.loads(line) for line in open(file)]
for r in results:
if r['language'] != TEST_LANGUAGE:
continue
testid = r['name']+'-'+r['language']
task = Template(task_prompt).render(**r)
if testid not in out:
out[testid] = { 'results': {}, 'task': task, 'language': r['language'] }
check_summary = f"{r['status']} correct {r['passed']}/{r['total']}"
passing_tests = ''
failing_tests = ''
for c in r['checks']:
if c['status'] == 1:
eq = "inside" if 'eq-any' in c else '=='
passing_tests += f"PASS {c['assert']} {eq} {c.get('eq',c.get('eq-any'))}\n"
else:
neq = "not inside" if 'eq-any' in c else '!='
failing_tests += f"FAIL {c['assert']} {eq} {c.get('eq',c.get('eq-any'))} got {c['got']}\n"
out[testid]['results'][id] = {
'check_summary': check_summary,
'passing_tests': passing_tests,
'failing_tests': failing_tests,
'code': r['code'],
'answer': r['answer']
}
models[idx]['passed'] += r['passed']
models[idx]['total'] += r['total']
return { 'tests': out, 'models': models }
header_prompt = """
You are going to evaluate the results of language models on a {{language}} programming challenge: {{task}}
Automated tests have been used to verify corectness each solution produced, a detailed description of the results of each test will be provided.
For each model, you will be provided the code produced by the model and the result of all tests.
Compare and contrast the solutions each model produced. Do not repeat any of the generated code back to me. Highlight differences in solution approaches, test results, and provide a final summary of cohort performance on this challenge.
"""
model_prompt = """
---
Model: {{id}}
Test Result: {{check_summary}}
Test Details:
{{passing_tests}}{{failing_tests}}
Code:
```{{language}}
{{code}}
```
"""
footer_prompt = """
---
Analysis:"""
def analysis(data, analyser):
from langchain.chat_models import ChatOpenAI
from langchain import LLMChain, PromptTemplate
params = json.load(open('params/precise.json'))
model_params = {
'temperature': params['temperature'],
'max_tokens': params['max_new_tokens'],
'top_p': params['top_p'],
'presence_penalty': params['repetition_penalty']
}
model = ChatOpenAI(model_name=analyser, **model_params)
chain = LLMChain(llm=model, prompt=PromptTemplate(template='{input}', input_variables=['input']))
models = {}
for idx, model_info in enumerate(data['models']):
models[model_info['id']] = model_info
out = data['tests']
for testid in out.keys():
print(f"----- {testid} -----")
prompt = Template(header_prompt).render(**out[testid])
for idx in out[testid]['results'].keys():
model_info = models[idx]
print(model_info, " ", out[testid]['results'][idx]['check_summary'])
prompt += Template(model_prompt).render(**out[testid]['results'][idx], id=model_info['id'])
prompt += Template(footer_prompt).render(**out[testid])
out[testid]['summary'] = chain.run(input=prompt)
print()
print(out[testid]['summary'])
print()
return data
def main(config: str, path: str = "results/", analyser: str = "", language: str = "javascript,python"):
cfg = yaml.safe_load(open(config))
for lang in language.split(','):
cfg['language'] = lang
print('Comparing results for', lang)
data = prepare(cfg['language'], path, cfg['models'])
data['config'] = copy(cfg)
data['config']['title'] += f" ({lang})"
data['analyser'] = analyser
if analyser != "":
analysis(data, analyser)
outfile = config.replace('.yaml', f'-{lang}.json')
with open(outfile, 'w') as f:
json.dump(data, f, indent=4)
if __name__ == "__main__":
fire.Fire(main)
| [
"{input}",
"\n---\nModel: {{id}}\nTest Result: {{check_summary}}\nTest Details:\n{{passing_tests}}{{failing_tests}}\nCode:\n```{{language}}\n{{code}}\n```\n",
"\n---\nAnalysis:",
"\nYou are going to evaluate the results of language models on a {{language}} programming challenge: {{task}}\nAutomated tests have been used to verify corectness each solution produced, a detailed description of the results of each test will be provided.\nFor each model, you will be provided the code produced by the model and the result of all tests.\nCompare and contrast the solutions each model produced. Do not repeat any of the generated code back to me. Highlight differences in solution approaches, test results, and provide a final summary of cohort performance on this challenge.\n\n",
"Write a {{language}} function {{Signature}} {{Input}} that returns {{Output}}"
] |
2024-01-10 | constantine77/prompt-engineering | chatgpt-retrieval~chatgpt.py | import os
import sys
import openai
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import DirectoryLoader, TextLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.indexes import VectorstoreIndexCreator
from langchain.llms import OpenAI
from langchain.vectorstores import Chroma
import constants
os.environ["OPENAI_API_KEY"] = constants.APIKEY
# Enable to cache & reuse the model to disk (for repeated queries on the same data)
PERSIST = False
query = sys.argv[1]
print(query)
if PERSIST and os.path.exists("persist"):
print("Reusing index...\n")
vectorstore = Chroma(persist_directory="persist",
embedding_function=OpenAIEmbeddings())
from langchain.indexes.vectorstore import VectorStoreIndexWrapper
index = VectorStoreIndexWrapper(vectorstore=vectorstore)
else:
loader = TextLoader('data.txt')
# This code can also import folders, including various filetypes like PDFs using the DirectoryLoader.
# loader = DirectoryLoader(".", glob="*.txt")
if PERSIST:
index = VectorstoreIndexCreator(
vectorstore_kwargs={"persist_directory": "persist"}).from_loaders([loader])
else:
index = VectorstoreIndexCreator().from_loaders([loader])
chain = RetrievalQA.from_chain_type(
llm=ChatOpenAI(model="gpt-3.5-turbo"),
retriever=index.vectorstore.as_retriever(search_kwargs={"k": 1}),
)
print(chain.run(query))
| [] |
2024-01-10 | gwbcho/dpo-replication | noises~ounoise.py | import numpy as np
# Taken from OpenAI baselines - baselines/ddpg/noise.py
class ActionNoise(object):
def reset(self):
pass
class NormalActionNoise(ActionNoise):
"""
Noise generated by the normal distrobution.
Class Args:
mu: mean of dist
sigma: standard deviation
"""
def __init__(self, mu, sigma):
self.mu = mu
self.sigma = sigma
def __call__(self):
return np.random.normal(self.mu, self.sigma)
def reset(self):
pass
def __repr__(self):
return 'NormalActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)
class OrnsteinUhlenbeckActionNoise(ActionNoise):
def __init__(self, mu, sigma, theta=.15, dt=1e-2, x0=None):
self.theta = theta
self.mu = mu
self.sigma = sigma
self.dt = dt
self.x0 = x0
self.reset()
def __call__(self):
x = (self.x_prev + self.theta * (self.mu - self.x_prev) * self.dt + self.sigma
* np.sqrt(self.dt) * np.random.normal(size=self.mu.shape))
self.x_prev = x
return x
def reset(self):
self.x_prev = self.x0 if self.x0 is not None else np.zeros_like(self.mu)
def __repr__(self):
return 'OrnsteinUhlenbeckActionNoise(mu={}, sigma={})'.format(self.mu, self.sigma)
| [] |
2024-01-10 | JulienWakim/GP-Visit-Conversation-Capture | src~text_correction~text_correction.py | #Correcting input transcript
from openai import OpenAI
import os
def process_string(input_string):
# Split the input string into lines
lines = input_string.split('\n')
# Process each line
for i in range(len(lines)):
# Remove the timestamp
words = lines[i].split(' ', 1)
if len(words) > 1:
lines[i] = words[1]
# Remove opening brackets
lines[i] = lines[i].replace('[', '')
# Replace closing brackets with a colon
lines[i] = lines[i].replace(']', ':')
# Join the processed lines back into a string
result_string = '\n'.join(lines)
return result_string
def combine_lines(text):
lines = text.split("\n")
combined_lines = []
current_speaker = ""
current_text = ""
for line in lines:
if not line.strip():
continue
speaker, sentence = line.split(":", 1)
if speaker == current_speaker:
current_text += " " + sentence.strip()
else:
if current_text:
combined_lines.append(f"{current_speaker}: {current_text}")
current_speaker = speaker
current_text = sentence.strip()
# Add the last speaker's text
if current_text:
combined_lines.append(f"{current_speaker}: {current_text}")
return "\n".join(combined_lines)
def correctText(fixed_output):
fixed_output = process_string(fixed_output)
fixed_output = combine_lines(fixed_output)
client = OpenAI(
api_key="",
)
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "Correct any grammar and spelling mistakes in the user dialogue transcript."},
{"role": "user", "content": fixed_output}
]
)
fixed_output = completion.choices[0].message.content
completion = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "Which speaker is the doctor? Answer in one word."},
{"role": "user", "content": fixed_output}
]
)
doctor = completion.choices[0].message.content
if "SPEAKER_00" in doctor:
fixed_output = fixed_output.replace("SPEAKER_00", "Doctor")
fixed_output = fixed_output.replace("SPEAKER_01", "Patient")
# docGender = sp0
# patGender = sp1
else:
fixed_output = fixed_output.replace("SPEAKER_01", "Doctor")
fixed_output = fixed_output.replace("SPEAKER_00", "Patient")
# docGender = sp1
# patGender = sp0
print(fixed_output)
# Combine title and conversation
full_text = 'Conversation Script' + "\n\n" + fixed_output
# File name
file_name = "conversation_script.txt"
# Writing to file
with open(os.path.join('outputs',file_name), "w") as file:
file.write(full_text)
print(f"The conversation script has been saved as '{file_name}'.")
| [
"Which speaker is the doctor? Answer in one word.",
"Correct any grammar and spelling mistakes in the user dialogue transcript."
] |
2024-01-10 | JulienWakim/GP-Visit-Conversation-Capture | src~doctor_notes~note_generator.py | from openai import OpenAI
def generateNotes(fixed_output):
client = OpenAI(
# defaults to os.environ.get("OPENAI_API_KEY")
api_key='',
)
messages = []
for line in fixed_output.strip().split('\n'):
speaker, content = line.split(':', 1)
role = "assistant" if "Doctor" in speaker else "user"
messages.append({"role": role, "content": content.strip()})
messages.append(
{"role": "user", "content": "Please summarize the conversation in the form of doctor's notes."}
)
print(messages)
chat_completion = client.chat.completions.create(
messages=messages,
model="gpt-3.5-turbo",
)
print(chat_completion.choices[0].message.content)
# summary_response = client.chat.completions.create(
# model="gpt-3.5-turbo",
# messages=messages
# )
# print(summary_response.choices[0].message['content'])
| [
"Please summarize the conversation in the form of doctor's notes."
] |
2024-01-10 | d68864767/CodeCompanion---AI-Powered-Coding-Assistant | documentation_generator.py | ```python
# Import necessary libraries
import openai
from openai.api_resources.completion import Completion
class DocumentationGenerator:
def __init__(self):
# Initialize OpenAI API with the secret key
openai.api_key = 'your-openai-api-key'
def generate(self, data):
# Extract the language and code snippet from the data
language = data.get('language')
code_snippet = data.get('code_snippet')
# Define the prompt for the OpenAI API
prompt = f"Generate comments and documentation for the following {language} code:\n{code_snippet}"
# Call the OpenAI API to generate the documentation
response = Completion.create(engine="text-davinci-002", prompt=prompt, max_tokens=300)
# Extract the generated documentation from the response
documentation = response.choices[0].text.strip()
# Return the generated documentation
return {'documentation': documentation}
```
| [
"Generate comments and documentation for the following PLACEHOLDER code:\nPLACEHOLDER"
] |
2024-01-10 | d68864767/CodeCompanion---AI-Powered-Coding-Assistant | code_reviewer.py | ```python
# Import necessary libraries
import openai
from openai.api_resources.completion import Completion
class CodeReviewer:
def __init__(self):
# Initialize OpenAI API with the secret key
openai.api_key = 'your-openai-api-key'
def review(self, data):
# Extract the language and code snippet from the data
language = data.get('language')
code_snippet = data.get('code_snippet')
# Define the prompt for the OpenAI API
prompt = f"Review the following {language} code for quality, adherence to coding standards, and potential refactoring opportunities:\n{code_snippet}"
# Call the OpenAI API to generate the review
response = Completion.create(engine="text-davinci-002", prompt=prompt, max_tokens=500)
# Extract the review from the response
review = response.choices[0].text.strip()
# Return the review
return {'review': review}
```
| [
"Review the following PLACEHOLDER code for quality, adherence to coding standards, and potential refactoring opportunities:\nPLACEHOLDER"
] |
2024-01-10 | d68864767/CodeCompanion---AI-Powered-Coding-Assistant | learning_helper.py | ```python
# Import necessary libraries
import openai
from openai.api_resources.completion import Completion
class LearningHelper:
def __init__(self):
# Initialize OpenAI API with the secret key
openai.api_key = 'your-openai-api-key'
def learn(self, data):
# Extract the language and topic from the data
language = data.get('language')
topic = data.get('topic')
# Define the prompt for the OpenAI API
prompt = f"Explain the topic '{topic}' in {language} programming language with examples and best practices."
# Call the OpenAI API to generate the explanation
response = Completion.create(engine="text-davinci-002", prompt=prompt, max_tokens=500)
# Extract the generated explanation from the response
explanation = response.choices[0].text.strip()
# Return the explanation
return {'explanation': explanation}
```
| [
"Explain the topic 'PLACEHOLDER' in PLACEHOLDER programming language with examples and best practices."
] |
2024-01-10 | d68864767/CodeCompanion---AI-Powered-Coding-Assistant | debugger.py | ```python
# Import necessary libraries
import openai
from openai.api_resources.completion import Completion
class Debugger:
def __init__(self):
# Initialize OpenAI API with the secret key
openai.api_key = 'your-openai-api-key'
def debug(self, data):
# Extract the language and error message from the data
language = data.get('language')
error_message = data.get('error_message')
# Define the prompt for the OpenAI API
prompt = f"I am a {language} program and I am getting the following error:\n{error_message}\nWhat could be causing this error and how can I fix it?"
# Call the OpenAI API to generate the debugging advice
response = Completion.create(engine="text-davinci-002", prompt=prompt, max_tokens=200)
# Extract the debugging advice from the response
debugging_advice = response.choices[0].text.strip()
# Return the debugging advice
return {'advice': debugging_advice}
```
| [
"I am a PLACEHOLDER program and I am getting the following error:\nPLACEHOLDER\nWhat could be causing this error and how can I fix it?"
] |
2024-01-10 | d68864767/CodeCompanion---AI-Powered-Coding-Assistant | workflow_customizer.py | ```python
# Import necessary libraries
import openai
from openai.api_resources.completion import Completion
class WorkflowCustomizer:
def __init__(self):
# Initialize OpenAI API with the secret key
openai.api_key = 'your-openai-api-key'
def customize(self, data):
# Extract the user preferences from the data
user_preferences = data.get('preferences')
# Define the prompt for the OpenAI API
prompt = f"Given the following user preferences for a coding workflow:\n{user_preferences}\nGenerate a customized workflow."
# Call the OpenAI API to generate the customized workflow
response = Completion.create(engine="text-davinci-002", prompt=prompt, max_tokens=200)
# Extract the customized workflow from the response
customized_workflow = response.choices[0].text.strip()
# Return the customized workflow
return {'workflow': customized_workflow}
```
| [
"Given the following user preferences for a coding workflow:\nPLACEHOLDER\nGenerate a customized workflow."
] |
2024-01-10 | d68864767/CodeCompanion---AI-Powered-Coding-Assistant | code_generator.py | ```python
# Import necessary libraries
import openai
from openai.api_resources.completion import Completion
class CodeGenerator:
def __init__(self):
# Initialize OpenAI API with the secret key
openai.api_key = 'your-openai-api-key'
def generate(self, data):
# Extract the language and description from the data
language = data.get('language')
description = data.get('description')
# Define the prompt for the OpenAI API
prompt = f"Translate the following English text to {language} code:\n{description}"
# Call the OpenAI API to generate the code
response = Completion.create(engine="text-davinci-002", prompt=prompt, max_tokens=100)
# Extract the generated code from the response
generated_code = response.choices[0].text.strip()
# Return the generated code
return {'code': generated_code}
```
| [
"Translate the following English text to PLACEHOLDER code:\nPLACEHOLDER"
] |
2024-01-10 | EagleW/Contextualized-Literature-based-Discovery | idea-sentence~models~GPT3.5Retr~fewshot.py | import os
from tqdm import tqdm
import openai
import json
from time import time
import random
from eval import Evaluator
from transformers import GPT2TokenizerFast
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
openai.organization = "" # type your own organization
openai.api_key = "" # type your own api key
evaluator = Evaluator()
f = open('e2t.json','r')
e2t = json.load(f)
prompt_ = 'Consider the following context: '
relation_template = [
'In that context, which %s can be used for %s, and why?\n',
'In that context, which %s do we use %s, and why?\n'
]
start_time = time()
forward_ = []
backward_ = []
quesid2ans_forward = {}
quesid2ans_backward = {}
filename = 'local_dataset/test.json'
with open(filename, 'r') as f:
for line in tqdm(f):
cur_data = json.loads(line)
input_ = cur_data['input']
entity = cur_data['entity']
output = cur_data['output']
context = cur_data['context']
retrieve = cur_data['retrieve']
type_e = e2t[entity].lower()
type_o = e2t[output].lower()
cc = prompt_ + cur_data['context']
prompt = [retrieve,cc]
if cur_data['forward']:
prompt.append(relation_template[0] % (type_o, entity))
else:
prompt.append(relation_template[1] % (type_o, entity))
prompt = '\n'.join(prompt)
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=1,
top_p=1,
n=1,
max_tokens=100,
best_of=10,
frequency_penalty=0,
presence_penalty=0,
stop=["."]
)
choices = response['choices']
pred = choices[0]['text'].strip()
if len(pred) > 0:
if pred[-1] != '.':
pred += '.'
else:
pred = ''
ref = cur_data['rel_sent']
tmp = {
'input': prompt,
'pred': pred,
'ground_truth': []
}
qid = cur_data["src_ids"]
quesid2ans_tmp = {qid: (pred, ref)}
if cur_data['forward']:
tttmp = {
'tail': ref,
'sentence': json.dumps([cur_data['id'], cur_data['year'], cur_data['rel_sent']])
}
tmp['ground_truth'].append(tttmp)
forward_.append(tmp)
quesid2ans_forward.update(quesid2ans_tmp)
else:
tttmp = {
'head': ref,
'sentence': json.dumps([cur_data['id'], cur_data['year'], cur_data['rel_sent']])
}
tmp['ground_truth'].append(tttmp)
backward_.append(tmp)
quesid2ans_backward.update(quesid2ans_tmp)
output_ = 'GPT3.5Retr_checkpoint'
os.makedirs(output_, exist_ok=True)
with open('{}/eval_forward.json'.format(output_), 'w', encoding='utf-8') as writer:
writer.write(json.dumps(forward_, ensure_ascii=False, indent=4))
with open('{}/eval_backward.json'.format(output_), 'w', encoding='utf-8') as writer:
writer.write(json.dumps(backward_, ensure_ascii=False, indent=4))
forward_metrics = evaluator.evaluate(quesid2ans_forward)
backward_metrics = evaluator.evaluate(quesid2ans_backward)
quesid2ans_total = quesid2ans_forward.copy()
quesid2ans_total.update(quesid2ans_backward)
metrics = evaluator.evaluate(quesid2ans_total)
with open('{}/metrics.json'.format(output_), 'w', encoding='utf-8') as writer:
writer.write('forward metrics: {}\n'.format(json.dumps(forward_metrics)))
writer.write('backward metrics: {}\n'.format(json.dumps(backward_metrics)))
writer.write('average metrics: {}\n'.format(json.dumps(metrics)))
print('Evaluation takes {} seconds'.format(round(time() - start_time, 3))) | [
"['In that context, which %s can be used for %s, and why?\\n', 'In that context, which %s do we use %s, and why?\\n']",
"[PLACEHOLDER, PLACEHOLDER]",
"Consider the following context: ",
"\n"
] |
2024-01-10 | EagleW/Contextualized-Literature-based-Discovery | idea-node~models~GPT3.5RND~fewshot.py | import os
from tqdm import tqdm
import openai
import json
from time import time
import random
from transformers import GPT2TokenizerFast
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
openai.organization = "" # type your own organization
openai.api_key = "" # type your own api key
examples = [[],[]]
prompts = [
'Suggest a %s that can be used for a natural language processing %s.',
'Suggest a %s for a natural language processing %s.']
relation_template1 = [
'Q: Which %s can be used for %s ? A: %s is done by using %s.',
'Q: Which %s do we use %s? A: We use %s for %s.'
]
relation_template = [
'Q: Which %s can be used for %s ? A: %s is done by using ',
'Q: Which %s do we use %s? A: We use %s for '
]
f = open('e2t.json','r')
e2t = json.load(f)
start_time = time()
forward_ = []
backward_ = []
mrrf, hit1f, hit3f, hit5f, hit10f = 0, 0, 0, 0, 0
mrrb, hit1b, hit3b, hit5b, hit10b = 0, 0, 0, 0, 0
count_f = 0
count_b = 0
filename = '../Dual_Encoder/local_dataset/train.json'
with open(filename, 'r') as f:
for line in tqdm(f):
cur_data = json.loads(line)
input_ = cur_data['input']
entity = cur_data['entity']
output = cur_data['output']
type_e = e2t[entity].lower()
type_o = e2t[output].lower()
context = 'Context: ' + cur_data['context']
prompt = []
if cur_data['forward']:
prompt.append(context)
prompt.append(relation_template1[0] % (type_o, entity, entity, output))
prompt = '\n'.join(prompt)
examples[0].append(prompt)
else:
prompt.append(context)
prompt.append(relation_template1[1] % (type_o, entity, entity, output))
prompt = '\n'.join(prompt)
examples[1].append(prompt)
print('Finish processsing samples')
filename = '../Dual_Encoder/local_dataset/test.json'
with open(filename, 'r') as f:
for line in tqdm(f):
cur_data = json.loads(line)
input_ = cur_data['input']
entity = cur_data['entity']
output = cur_data['output']
type_e = e2t[entity].lower()
type_o = e2t[output].lower()
context = 'Context: ' + cur_data['context']
prompt = []
if cur_data['forward']:
prompt.append(prompts[0] % (type_o, type_e))
txts = "\n".join(random.sample(examples[0], k=5))
while len(tokenizer(txts)['input_ids']) > 2000:
txts = "\n".join(random.sample(examples[0], k=5))
prompt.append(txts)
prompt.append(context)
prompt.append(relation_template[0] % (type_o, entity, entity))
else:
prompt.append(prompts[1] % (type_o, type_e))
txts = " ".join(random.sample(examples[1], k=5))
while len(tokenizer(txts)['input_ids']) > 2000:
txts = " ".join(random.sample(examples[1], k=5))
prompt.append(txts)
prompt.append(context)
prompt.append(relation_template[1] % (type_o, entity, entity))
prompt = '\n'.join(prompt)
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=1,
top_p=1,
n=15,
best_of=15,
frequency_penalty=0,
presence_penalty=0,
stop=[".", '\n']
)
choices = response['choices']
topk_score_info = []
ttmp = []
tcount, tmrr, thit1, thit3, thit5, thit10 = 0, 0, 0, 0, 0, 0
cc = 0
for tmp in choices:
txt = tmp['text'].strip()
idx = tmp['index']
if len(txt) != 0:
ttmp.append((txt, idx))
cc += 1
if cc > 9:
break
ttmp.sort(key = lambda x: x[1])
for x,_ in ttmp:
topk_score_info.append(x)
if txt == output:
tmrr += 1/(idx + 1)
thit1 += 1 if idx + 1 <= 1 else 0
thit3 += 1 if idx + 1 <= 3 else 0
thit5 += 1 if idx + 1 <= 5 else 0
thit10 += 1 if idx + 1 <= 10 else 0
if len(topk_score_info) < 10:
for _ in range(10-len(ttmp)):
topk_score_info.append('')
tcount += 1
tmp = {
'input': prompt,
'pred': topk_score_info[0],
'ground_truth': [],
'topk_score_info': json.dumps(topk_score_info)
}
if cur_data['forward']:
mrrf += tmrr
count_f += tcount
hit1f += thit1
hit3f += thit3
hit5f += thit5
hit10f += thit10
tttmp = {
'tail': output,
'sentence': json.dumps([cur_data['id'], cur_data['year'], cur_data['rel_sent']])
}
tmp['ground_truth'].append(tttmp)
forward_.append(tmp)
else:
mrrb += tmrr
count_b += tcount
hit1b += thit1
hit3b += thit3
hit5b += thit5
hit10b += thit10
tttmp = {
'head': output,
'sentence': json.dumps([cur_data['id'], cur_data['year'], cur_data['rel_sent']])
}
tmp['ground_truth'].append(tttmp)
backward_.append(tmp)
tmp['hit1'] = thit1
tmp['hit5'] = thit5
tmp['hit10'] = thit10
output_ = 'GPT3.5RND_checkpoint'
os.makedirs(output_, exist_ok=True)
with open('{}/eval_forward.json'.format(output_), 'w', encoding='utf-8') as writer:
writer.write(json.dumps(forward_, ensure_ascii=False, indent=4))
with open('{}/eval_backward.json'.format(output_), 'w', encoding='utf-8') as writer:
writer.write(json.dumps(backward_, ensure_ascii=False, indent=4))
forward_metrics = {'mrr': mrrf, 'hit@1': hit1f, 'hit@3': hit3f, 'hit@5': hit5f, 'hit@10': hit10f}
forward_metrics = {k: round(v / count_f, 4) for k, v in forward_metrics.items()}
backward_metrics = {'mrr': mrrb, 'hit@1': hit1b, 'hit@3': hit3b, 'hit@5': hit5b, 'hit@10': hit10b}
backward_metrics = {k: round(v / count_b, 4) for k, v in backward_metrics.items()}
metrics = {k: round((forward_metrics[k] * count_f + backward_metrics[k] * count_b) / (count_b + count_f), 4) for k in forward_metrics}
with open('{}/metrics.json'.format(output_), 'w', encoding='utf-8') as writer:
writer.write('forward metrics: {}\n'.format(json.dumps(forward_metrics)))
writer.write('backward metrics: {}\n'.format(json.dumps(backward_metrics)))
writer.write('average metrics: {}\n'.format(json.dumps(metrics)))
print('Evaluation takes {} seconds'.format(round(time() - start_time, 3)))
| [
"\n",
"['Q: Which %s can be used for %s ? A: %s is done by using ', 'Q: Which %s do we use %s? A: We use %s for ']",
"['Q: Which %s can be used for %s ? A: %s is done by using %s.', 'Q: Which %s do we use %s? A: We use %s for %s.']",
"['Suggest a %s that can be used for a natural language processing %s.', 'Suggest a %s for a natural language processing %s.']",
"[]"
] |
2024-01-10 | EagleW/Contextualized-Literature-based-Discovery | idea-node~models~GPT3.5Retr~fewshot.py | import os
from tqdm import tqdm
import openai
import json
from time import time
openai.organization = "" # type your own organization
openai.api_key = "" # type your own api key
prompts = [
'Suggest a %s that can be used for a natural language processing %s.',
'Suggest a %s for a natural language processing %s.']
relation_template = [
'Q: Which %s can be used for %s ? A: %s is done by using ',
'Q: Which %s do we use %s? A: We use %s for '
]
f = open('e2t.json','r')
e2t = json.load(f)
filename = 'local_dataset/test.json'
start_time = time()
forward_ = []
backward_ = []
mrrf, hit1f, hit3f, hit5f, hit10f = 0, 0, 0, 0, 0
mrrb, hit1b, hit3b, hit5b, hit10b = 0, 0, 0, 0, 0
count_f = 0
count_b = 0
with open(filename, 'r') as f:
for line in tqdm(f):
cur_data = json.loads(line)
input_ = cur_data['input']
entity = cur_data['entity']
output = cur_data['output']
type_e = e2t[entity].lower()
type_o = e2t[output].lower()
context = 'Context: ' + cur_data['context']
prompt = []
if cur_data['forward']:
prompt.append(prompts[0] % (type_o, type_e))
prompt.append(cur_data['retrieve'])
prompt.append(context)
prompt.append(relation_template[0] % (type_o, entity, entity))
else:
prompt.append(prompts[1] % (type_o, type_e))
prompt.append(cur_data['retrieve'])
prompt.append(context)
prompt.append(relation_template[1] % (type_o, entity, entity))
prompt = '\n'.join(prompt)
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=1,
top_p=1,
n=15,
best_of=15,
frequency_penalty=0,
presence_penalty=0,
stop=[".", '\n']
)
choices = response['choices']
topk_score_info = []
ttmp = []
tcount, tmrr, thit1, thit3, thit5, thit10 = 0, 0, 0, 0, 0, 0
cc = 0
for tmp in choices:
txt = tmp['text'].strip()
idx = tmp['index']
if len(txt) != 0:
ttmp.append((txt, idx))
cc += 1
if cc > 9:
break
ttmp.sort(key = lambda x: x[1])
for x,_ in ttmp:
topk_score_info.append(x)
if txt == output:
tmrr += 1/(idx + 1)
thit1 += 1 if idx + 1 <= 1 else 0
thit3 += 1 if idx + 1 <= 3 else 0
thit5 += 1 if idx + 1 <= 5 else 0
thit10 += 1 if idx + 1 <= 10 else 0
if len(topk_score_info) < 10:
for _ in range(10-len(ttmp)):
topk_score_info.append('')
tcount += 1
tmp = {
'input': prompt,
'pred': topk_score_info[0],
'ground_truth': [],
'topk_score_info': json.dumps(topk_score_info)
}
if cur_data['forward']:
mrrf += tmrr
count_f += tcount
hit1f += thit1
hit3f += thit3
hit5f += thit5
hit10f += thit10
tttmp = {
'tail': output,
'sentence': json.dumps([cur_data['id'], cur_data['year'], cur_data['rel_sent']])
}
tmp['ground_truth'].append(tttmp)
forward_.append(tmp)
else:
mrrb += tmrr
count_b += tcount
hit1b += thit1
hit3b += thit3
hit5b += thit5
hit10b += thit10
tttmp = {
'head': output,
'sentence': json.dumps([cur_data['id'], cur_data['year'], cur_data['rel_sent']])
}
tmp['ground_truth'].append(tttmp)
backward_.append(tmp)
tmp['hit1'] = thit1
tmp['hit5'] = thit5
tmp['hit10'] = thit10
output_ = 'GPT3.5Retr_checkpoint'
os.makedirs(output_, exist_ok=True)
with open('{}/eval_forward.json'.format(output_), 'w', encoding='utf-8') as writer:
writer.write(json.dumps(forward_, ensure_ascii=False, indent=4))
with open('{}/eval_backward.json'.format(output_), 'w', encoding='utf-8') as writer:
writer.write(json.dumps(backward_, ensure_ascii=False, indent=4))
forward_metrics = {'mrr': mrrf, 'hit@1': hit1f, 'hit@3': hit3f, 'hit@5': hit5f, 'hit@10': hit10f}
forward_metrics = {k: round(v / count_f, 4) for k, v in forward_metrics.items()}
backward_metrics = {'mrr': mrrb, 'hit@1': hit1b, 'hit@3': hit3b, 'hit@5': hit5b, 'hit@10': hit10b}
backward_metrics = {k: round(v / count_b, 4) for k, v in backward_metrics.items()}
metrics = {k: round((forward_metrics[k] * count_f + backward_metrics[k] * count_b) / (count_b + count_f), 4) for k in forward_metrics}
with open('{}/metrics.json'.format(output_), 'w', encoding='utf-8') as writer:
writer.write('forward metrics: {}\n'.format(json.dumps(forward_metrics)))
writer.write('backward metrics: {}\n'.format(json.dumps(backward_metrics)))
writer.write('average metrics: {}\n'.format(json.dumps(metrics)))
print('Evaluation takes {} seconds'.format(round(time() - start_time, 3)))
| [
"['Suggest a %s that can be used for a natural language processing %s.', 'Suggest a %s for a natural language processing %s.']",
"\n",
"[]",
"['Q: Which %s can be used for %s ? A: %s is done by using ', 'Q: Which %s do we use %s? A: We use %s for ']"
] |
2024-01-10 | EagleW/Contextualized-Literature-based-Discovery | idea-node~models~GPT3.5RND%2BNBR~fewshot.py | import os
from tqdm import tqdm
import openai
import json
from pprint import pprint
from time import time
import random
from pprint import pprint
from transformers import GPT2TokenizerFast
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
openai.organization = "" # type your own organization
openai.api_key = "" # type your own api key
f = open('e2t.json','r')
e2t = json.load(f)
prompts = [
'Suggest a %s that can be used for a natural language processing %s.',
'Suggest a %s for a natural language processing %s.']
relation_template1 = [
'Q: Which %s can be used for %s? A: %s is done by using %s.',
'Q: Which %s do we use %s? A: We use %s for %s.'
]
relation_template = [
'Q: Which %s can be used for %s ? A: %s is done by using ',
'Q: Which %s do we use %s? A: We use %s for '
]
dir_path = '../T5+CL+NBR/local_%s_dataset/%s.json'
for fname in ['ct', 'sn', 'kg']:
start_time = time()
forward_ = []
backward_ = []
mrrf, hit1f, hit3f, hit5f, hit10f = 0, 0, 0, 0, 0
mrrb, hit1b, hit3b, hit5b, hit10b = 0, 0, 0, 0, 0
count_f = 0
count_b = 0
examples = [[],[]]
in_name = dir_path % (fname, 'train')
with open(in_name, 'r') as file_j:
for idxn, line in tqdm(enumerate(file_j), "Encoding"):
pdf_dict = json.loads(line)
input_ = pdf_dict['input'].lower()
if '| retrieve:' in input_:
input_t, other = input_.split(' | retrieve: ')
if ' | context: ' in other:
retrieve, context = other.split(' | context: ')
else:
retrieve, context = other.split('| context: ')
cc1 = 'Consider the following context: ' + context + ' The retrieval results are: ' + retrieve
elif ' | context:' in input_:
retrieve = ''
input_t, context = input_.split('| context: ')
cc1 = 'Consider the following context: ' + context
type_ = input_t.split()[-1]
entity = pdf_dict['entity']
output = pdf_dict['output']
rel_sent = pdf_dict['rel_sent']
type_e = e2t[entity].lower()
type_o = e2t[output].lower()
prompt = []
if pdf_dict['forward']:
prompt.append(cc1)
prompt.append(relation_template1[0] % (type_o, entity, entity, output))
prompt = '\n'.join(prompt)
examples[0].append(prompt)
else:
prompt.append(cc1)
prompt.append(relation_template1[1] % (type_o, entity, entity, output))
prompt = '\n'.join(prompt)
examples[1].append(prompt)
print('Finish processsing samples')
filename = dir_path % (fname, 'test')
with open(filename, 'r') as f:
for line in tqdm(f):
cur_data = json.loads(line)
input_ = cur_data['input'].lower()
if '| retrieve:' in input_:
input_t, other = input_.split(' | retrieve: ')
if ' | context: ' in other:
retrieve, context = other.split(' | context: ')
else:
retrieve, context = other.split('| context: ')
cc = 'Consider the following context: ' + context + ' The retrieval results are: ' + retrieve
elif ' | context:' in input_:
retrieve = ''
input_t, context = input_.split('| context: ')
cc = 'Consider the following context: ' + context
entity = cur_data['entity']
output = cur_data['output']
type_e = e2t[entity].lower()
type_o = e2t[output].lower()
if cur_data['forward']:
pt1 = prompts[0] % (type_o, type_e)
txts = "\n".join(random.sample(examples[0], k=5))
while len(tokenizer(txts)['input_ids']) > 1600:
txts = "\n".join(random.sample(examples[0], k=5))
prompt = [pt1, txts,cc]
prompt.append(relation_template[0] % (type_o, entity, entity))
else:
pt2 = prompts[1] % (type_o, type_e)
txts = " ".join(random.sample(examples[1], k=5))
while len(tokenizer(txts)['input_ids']) > 1600:
txts = " ".join(random.sample(examples[1], k=5))
prompt = [pt2,txts,cc]
prompt.append(relation_template[1] % (type_o, entity, entity))
prompt = '\n'.join(prompt)
if len(tokenizer(prompt)['input_ids']) > 2048:
if cur_data['forward']:
pt1 = prompts[0] % (type_o, type_e)
txts = "\n".join(random.sample(examples[0], k=5))
while len(tokenizer(txts)['input_ids']) > 1024:
txts = "\n".join(random.sample(examples[0], k=5))
prompt = [pt1, txts, cc]
prompt.append(relation_template[0] % (type_o, entity, entity))
else:
pt2 = prompts[1] % (type_o, type_e)
txts = " ".join(random.sample(examples[1], k=5))
while len(tokenizer(txts)['input_ids']) > 1024:
txts = " ".join(random.sample(examples[1], k=5))
prompt = [pt2, txts, cc]
prompt.append(relation_template[1] % (type_o, entity, entity))
prompt = '\n'.join(prompt)
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=1,
top_p=1,
n=15,
best_of=15,
frequency_penalty=0,
presence_penalty=0,
stop=[".", '\n']
)
choices = response['choices']
topk_score_info = []
ttmp = []
tcount, tmrr, thit1, thit3, thit5, thit10 = 0, 0, 0, 0, 0, 0
cc = 0
for tmp in choices:
txt = tmp['text'].strip()
idx = tmp['index']
if len(txt) != 0:
ttmp.append((txt, idx))
cc += 1
if cc > 9:
break
ttmp.sort(key = lambda x: x[1])
for x,_ in ttmp:
topk_score_info.append(x)
if txt == output:
tmrr += 1/(idx + 1)
thit1 += 1 if idx + 1 <= 1 else 0
thit3 += 1 if idx + 1 <= 3 else 0
thit5 += 1 if idx + 1 <= 5 else 0
thit10 += 1 if idx + 1 <= 10 else 0
if len(topk_score_info) < 10:
for _ in range(10-len(ttmp)):
topk_score_info.append('')
tcount += 1
tmp = {
'input': prompt,
'pred': topk_score_info[0],
'ground_truth': [],
'topk_score_info': json.dumps(topk_score_info)
}
if cur_data['forward']:
mrrf += tmrr
count_f += tcount
hit1f += thit1
hit3f += thit3
hit5f += thit5
hit10f += thit10
tttmp = {
'tail': output,
'sentence': json.dumps([cur_data['id'], cur_data['year'], cur_data['rel_sent']])
}
tmp['ground_truth'].append(tttmp)
forward_.append(tmp)
else:
mrrb += tmrr
count_b += tcount
hit1b += thit1
hit3b += thit3
hit5b += thit5
hit10b += thit10
tttmp = {
'head': output,
'sentence': json.dumps([cur_data['id'], cur_data['year'], cur_data['rel_sent']])
}
tmp['ground_truth'].append(tttmp)
backward_.append(tmp)
tmp['hit1'] = thit1
tmp['hit5'] = thit5
tmp['hit10'] = thit10
output_ = 'GPT3.5Rnd+%s_checkpoint' % fname
os.makedirs(output_, exist_ok=True)
with open('{}/eval_forward.json'.format(output_), 'w', encoding='utf-8') as writer:
writer.write(json.dumps(forward_, ensure_ascii=False, indent=4))
with open('{}/eval_backward.json'.format(output_), 'w', encoding='utf-8') as writer:
writer.write(json.dumps(backward_, ensure_ascii=False, indent=4))
forward_metrics = {'mrr': mrrf, 'hit@1': hit1f, 'hit@3': hit3f, 'hit@5': hit5f, 'hit@10': hit10f}
forward_metrics = {k: round(v / count_f, 4) for k, v in forward_metrics.items()}
backward_metrics = {'mrr': mrrb, 'hit@1': hit1b, 'hit@3': hit3b, 'hit@5': hit5b, 'hit@10': hit10b}
backward_metrics = {k: round(v / count_b, 4) for k, v in backward_metrics.items()}
metrics = {k: round((forward_metrics[k] * count_f + backward_metrics[k] * count_b) / (count_b + count_f), 4) for k in forward_metrics}
with open('{}/metrics.json'.format(output_), 'w', encoding='utf-8') as writer:
writer.write('forward metrics: {}\n'.format(json.dumps(forward_metrics)))
writer.write('backward metrics: {}\n'.format(json.dumps(backward_metrics)))
writer.write('average metrics: {}\n'.format(json.dumps(metrics)))
print('Evaluation takes {} seconds'.format(round(time() - start_time, 3)))
| [
"['Q: Which %s can be used for %s? A: %s is done by using %s.', 'Q: Which %s do we use %s? A: We use %s for %s.']",
"\n",
"[PLACEHOLDER, PLACEHOLDER, PLACEHOLDER]",
"['Q: Which %s can be used for %s ? A: %s is done by using ', 'Q: Which %s do we use %s? A: We use %s for ']",
"['Suggest a %s that can be used for a natural language processing %s.', 'Suggest a %s for a natural language processing %s.']",
"[]"
] |
2024-01-10 | EagleW/Contextualized-Literature-based-Discovery | idea-sentence~models~GPT3.5RND~fewshot.py | import os
from tqdm import tqdm
import openai
import json
from time import time
import random
from eval import Evaluator
from transformers import GPT2TokenizerFast
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
openai.organization = "" # type your own organization
openai.api_key = "" # type your own api key
evaluator = Evaluator()
f = open('e2t.json','r')
e2t = json.load(f)
prompt_ = 'Consider the following context: '
relation_template = [
'In that context, which %s can be used for %s, and why?\n',
'In that context, which %s do we use %s, and why?\n'
]
relation_template1 = [
'In that context, which %s can be used for %s, and why?\n%s',
'In that context, which %s do we use %s, and why?\n%s'
]
start_time = time()
forward_ = []
backward_ = []
quesid2ans_forward = {}
quesid2ans_backward = {}
examples = [[],[]]
in_name = '../T5/local_context_dataset/train.json'
with open(in_name, 'r') as file_j:
for idxn, line in tqdm(enumerate(file_j), "Encoding"):
pdf_dict = json.loads(line)
input_t, cc = pdf_dict['input'].split('| context: ')
type_ = input_t.split()[-1]
context = input_t + ' context: ' + cc
entity = pdf_dict['entity']
output = pdf_dict['output']
rel_sent = pdf_dict['rel_sent']
type_e = e2t[entity].lower()
type_o = e2t[output].lower()
prompt = []
cc1 = 'Consider the following context: ' + cc
if pdf_dict['forward']:
prompt.append(cc1)
prompt.append(relation_template1[0] % (type_o, entity, rel_sent))
prompt = '\n'.join(prompt)
examples[0].append(prompt)
else:
prompt.append(cc1)
prompt.append(relation_template1[1] % (type_o, entity, rel_sent))
prompt = '\n'.join(prompt)
examples[1].append(prompt)
print('Finish processsing samples')
filename = 'local_gpt3_dataset/test.json'
with open(filename, 'r') as f:
for line in tqdm(f):
cur_data = json.loads(line)
input_ = cur_data['input']
entity = cur_data['entity']
output = cur_data['output']
context = cur_data['context']
retrieve = cur_data['retrieve']
type_e = e2t[entity].lower()
type_o = e2t[output].lower()
cc = prompt_ + cur_data['context']
if cur_data['forward']:
txts = "\n".join(random.sample(examples[0], k=5))
while len(tokenizer(txts)['input_ids']) > 2000:
txts = "\n".join(random.sample(examples[0], k=5))
prompt = [txts,cc]
prompt.append(relation_template[0] % (type_o, entity))
else:
txts = " ".join(random.sample(examples[1], k=5))
while len(tokenizer(txts)['input_ids']) > 2000:
txts = " ".join(random.sample(examples[1], k=5))
prompt = [txts,cc]
prompt.append(relation_template[1] % (type_o, entity))
prompt = '\n'.join(prompt)
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=1,
top_p=1,
n=1,
max_tokens=100,
best_of=10,
frequency_penalty=0,
presence_penalty=0,
stop=["."]
)
choices = response['choices']
pred = choices[0]['text'].strip()
if pred[-1] != '.':
pred += '.'
ref = cur_data['rel_sent']
tmp = {
'input': prompt,
'pred': pred,
'ground_truth': []
}
qid = cur_data["src_ids"]
quesid2ans_tmp = {qid: (pred, ref)}
if cur_data['forward']:
tttmp = {
'tail': ref,
'sentence': json.dumps([cur_data['id'], cur_data['year'], cur_data['rel_sent']])
}
tmp['ground_truth'].append(tttmp)
forward_.append(tmp)
quesid2ans_forward.update(quesid2ans_tmp)
else:
tttmp = {
'head': ref,
'sentence': json.dumps([cur_data['id'], cur_data['year'], cur_data['rel_sent']])
}
tmp['ground_truth'].append(tttmp)
backward_.append(tmp)
quesid2ans_backward.update(quesid2ans_tmp)
output_ = 'GPT3.5RND_checkpoint'
os.makedirs(output_, exist_ok=True)
with open('{}/eval_forward.json'.format(output_), 'w', encoding='utf-8') as writer:
writer.write(json.dumps(forward_, ensure_ascii=False, indent=4))
with open('{}/eval_backward.json'.format(output_), 'w', encoding='utf-8') as writer:
writer.write(json.dumps(backward_, ensure_ascii=False, indent=4))
forward_metrics = evaluator.evaluate(quesid2ans_forward)
backward_metrics = evaluator.evaluate(quesid2ans_backward)
quesid2ans_total = quesid2ans_forward.copy()
quesid2ans_total.update(quesid2ans_backward)
metrics = evaluator.evaluate(quesid2ans_total)
with open('{}/metrics.json'.format(output_), 'w', encoding='utf-8') as writer:
writer.write('forward metrics: {}\n'.format(json.dumps(forward_metrics)))
writer.write('backward metrics: {}\n'.format(json.dumps(backward_metrics)))
writer.write('average metrics: {}\n'.format(json.dumps(metrics)))
print('Evaluation takes {} seconds'.format(round(time() - start_time, 3))) | [
"\n",
"[]",
"[PLACEHOLDER, PLACEHOLDER]",
"['In that context, which %s can be used for %s, and why?\\n%s', 'In that context, which %s do we use %s, and why?\\n%s']",
"['In that context, which %s can be used for %s, and why?\\n', 'In that context, which %s do we use %s, and why?\\n']",
"Consider the following context: "
] |
2024-01-10 | EagleW/Contextualized-Literature-based-Discovery | idea-sentence~models~GPT3.5RND%2BNBR~fewshot.py | import os
from tqdm import tqdm
import openai
import json
from time import time
import random
from eval import Evaluator
from transformers import GPT2TokenizerFast
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
openai.organization = "" # type your own organization
openai.api_key = "" # type your own api key
evaluator = Evaluator()
f = open('e2t.json','r')
e2t = json.load(f)
relation_template = [
'In that context, which %s can be used for %s, and why?\n',
'In that context, which %s do we use %s, and why?\n'
]
relation_template1 = [
'In that context, which %s can be used for %s, and why?\n%s',
'In that context, which %s do we use %s, and why?\n%s'
]
dir_path = '../T5+CL+NBR/local_%s_dataset/%s.json'
for fname in ['ct', 'sn', 'kg']:
start_time = time()
forward_ = []
backward_ = []
quesid2ans_forward = {}
quesid2ans_backward = {}
examples = [[],[]]
in_name = dir_path % (fname, 'train')
print(in_name)
with open(in_name, 'r') as file_j:
for idxn, line in tqdm(enumerate(file_j), "Encoding"):
pdf_dict = json.loads(line)
input_ = pdf_dict['input'].lower()
if '| retrieve:' in input_:
input_t, other = input_.split(' | retrieve: ')
retrieve, context = other.split(' | context: ')
cc1 = 'Consider the following context: ' + context + ' The retrieval results are: ' + retrieve
elif ' | context:' in input_:
retrieve = ''
input_t, context = pdf_dict['input'].split('| context: ')
cc1 = 'Consider the following context: ' + context
type_ = input_t.split()[-1]
entity = pdf_dict['entity']
output = pdf_dict['output']
rel_sent = pdf_dict['rel_sent']
type_e = e2t[entity].lower()
type_o = e2t[output].lower()
prompt = []
if pdf_dict['forward']:
prompt.append(cc1)
prompt.append(relation_template1[0] % (type_o, entity, rel_sent))
prompt = '\n'.join(prompt)
examples[0].append(prompt)
else:
prompt.append(cc1)
prompt.append(relation_template1[1] % (type_o, entity, rel_sent))
prompt = '\n'.join(prompt)
examples[1].append(prompt)
print('Finish processsing samples')
filename = dir_path % (fname, 'test')
with open(filename, 'r') as f:
for line in tqdm(f):
cur_data = json.loads(line)
input_ = cur_data['input'].lower()
entity = cur_data['entity']
output = cur_data['output'].lower()
if '| retrieve:' in input_:
input_t, other = input_.split(' | retrieve: ')
retrieve, context = other.split(' | context: ')
cc = 'Consider the following context: ' + context + ' The retrieval results are: ' + retrieve
elif ' | context:' in input_:
retrieve = ''
input_t, context = input_.split('| context: ')
cc = 'Consider the following context: ' + context
type_e = e2t[entity].lower()
type_o = e2t[output].lower()
if cur_data['forward']:
txts = "\n".join(random.sample(examples[0], k=5))
while len(tokenizer(txts)['input_ids']) > 1600:
txts = "\n".join(random.sample(examples[0], k=5))
prompt = [txts,cc]
prompt.append(relation_template[0] % (type_o, entity))
else:
txts = " ".join(random.sample(examples[1], k=5))
while len(tokenizer(txts)['input_ids']) > 1600:
txts = " ".join(random.sample(examples[1], k=5))
prompt = [txts,cc]
prompt.append(relation_template[1] % (type_o, entity))
prompt = '\n'.join(prompt)
if len(tokenizer(prompt)['input_ids']) > 2048:
if cur_data['forward']:
txts = "\n".join(random.sample(examples[0], k=5))
while len(tokenizer(txts)['input_ids']) > 1024:
txts = "\n".join(random.sample(examples[0], k=5))
prompt = [txts,cc]
prompt.append(relation_template[0] % (type_o, entity))
else:
txts = " ".join(random.sample(examples[1], k=5))
while len(tokenizer(txts)['input_ids']) > 1024:
txts = " ".join(random.sample(examples[1], k=5))
prompt = [txts,cc]
prompt.append(relation_template[1] % (type_o, entity))
prompt = '\n'.join(prompt)
response = openai.Completion.create(
model="text-davinci-003",
prompt=prompt,
temperature=1,
top_p=1,
n=1,
max_tokens=100,
best_of=10,
frequency_penalty=0,
presence_penalty=0,
stop=["."]
)
choices = response['choices']
pred = choices[0]['text'].strip()
if len(pred) > 0:
if pred[-1] != '.':
pred += '.'
else:
pred = ' '
ref = cur_data['rel_sent']
tmp = {
'input': prompt,
'pred': pred,
'ground_truth': []
}
qid = cur_data["src_ids"]
quesid2ans_tmp = {qid: (pred, ref)}
if cur_data['forward']:
tttmp = {
'tail': ref,
'sentence': json.dumps([cur_data['id'], cur_data['year'], cur_data['rel_sent']])
}
tmp['ground_truth'].append(tttmp)
forward_.append(tmp)
quesid2ans_forward.update(quesid2ans_tmp)
else:
tttmp = {
'head': ref,
'sentence': json.dumps([cur_data['id'], cur_data['year'], cur_data['rel_sent']])
}
tmp['ground_truth'].append(tttmp)
backward_.append(tmp)
quesid2ans_backward.update(quesid2ans_tmp)
output_ = 'GPT3.5Rnd+%s_checkpoint' % fname
os.makedirs(output_, exist_ok=True)
with open('{}/eval_forward.json'.format(output_), 'w', encoding='utf-8') as writer:
writer.write(json.dumps(forward_, ensure_ascii=False, indent=4))
with open('{}/eval_backward.json'.format(output_), 'w', encoding='utf-8') as writer:
writer.write(json.dumps(backward_, ensure_ascii=False, indent=4))
forward_metrics = evaluator.evaluate(quesid2ans_forward)
backward_metrics = evaluator.evaluate(quesid2ans_backward)
quesid2ans_total = quesid2ans_forward.copy()
quesid2ans_total.update(quesid2ans_backward)
metrics = evaluator.evaluate(quesid2ans_total)
with open('{}/metrics.json'.format(output_), 'w', encoding='utf-8') as writer:
writer.write('forward metrics: {}\n'.format(json.dumps(forward_metrics)))
writer.write('backward metrics: {}\n'.format(json.dumps(backward_metrics)))
writer.write('average metrics: {}\n'.format(json.dumps(metrics)))
print('Evaluation takes {} seconds'.format(round(time() - start_time, 3))) | [
"\n",
"[]",
"[PLACEHOLDER, PLACEHOLDER]",
"['In that context, which %s can be used for %s, and why?\\n%s', 'In that context, which %s do we use %s, and why?\\n%s']",
"['In that context, which %s can be used for %s, and why?\\n', 'In that context, which %s do we use %s, and why?\\n']"
] |
2024-01-10 | lehenbauer/karls_chatgpt_helpers | karls_chatgpt_helpers~gptshell~__main__.py | #!/usr/bin/env python3
import argparse
import code
import json
import os
import readline
import requests
import subprocess
import tempfile
import time
import openai
import karls_chatgpt_helpers
PROMPT = "gpt> "
def run_editor():
editor = os.environ.get('EDITOR', 'vi')
with tempfile.NamedTemporaryFile(mode='w+', delete=True) as tmpfile:
tmpfile.close() # close the file so that the editor can open it
# open the file in the user's preferred editor
os.system(f'{editor} {tmpfile.name}')
if os.path.exists(tmpfile.name):
# read the contents of the temporary file
with open(tmpfile.name, 'r') as f:
contents = f.read()
# delete the temporary file
os.unlink(tmpfile.name)
return contents.strip()
else:
return ''
def show_history(history):
for row in history:
print(row)
def converse(g):
# Prompt the user for input in a loop
while True:
try:
role = 'user'
print()
line = input(PROMPT)
print()
if line.startswith('%'):
if line.startswith('%list'):
print(g.history)
continue
elif line.startswith('%edit') or line.startswith('%sysedit'):
role = 'system' if line.startswith('%sysedit') else 'user'
# call the run_editor function to edit the input
line = run_editor()
# no continue here, we want to fall through and send the
# user's input to the chat API
elif line.startswith('%load'):
# Code for %load command
filename = line.split()[1]
g.load(filename)
continue
elif line.startswith('%save'):
# Code for %save command
filename = line.split()[1]
g.save(filename)
continue
elif line.startswith('%yload'):
# Code for %load command
filename = line.split()[1]
g.load_yaml(filename)
continue
elif line.startswith('%ysave'):
# Code for %ysave command
filename = line.split()[1]
g.save_yaml(filename)
continue
elif line.startswith('%jload'):
# Code for %jload command
filename = line.split()[1]
g.load_json(filename)
continue
elif line.startswith('%jsave'):
# Code for %jsave command
filename = line.split()[1]
g.save_json(filename)
continue
elif line.startswith('%history'):
# Code for %history command
show_history(g.history)
continue
elif line.startswith('%!'):
# Code for shell escape
command = line[2:].strip()
subprocess.run(command, shell=True)
continue
elif line.startswith('%interact'):
# Code for interactive Python interpreter
print("Entering Python interpreter interactively... Send EOF to resume, exit() or quit() to exit.")
code.interact(local=locals())
continue
elif line.startswith('%exit'):
# Code for exit command
break
else:
print("Unrecognized % command, % commands are %list, %edit, %sysedit, %load, %save, %jload, %jsave, %yload, %ysave, %!, %interact and %exit")
continue
if line.startswith('s:'):
role = 'system'
line = line[2:]
if line == '':
continue
# Send the user input to ChatGPT
g.streaming_chat(line, role=role)
except EOFError:
break
except KeyboardInterrupt:
print('interrupt')
continue
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--system-file', help='the system prompt file to use')
parser.add_argument('-i', '--load', help='load a session from a file')
parser.add_argument('-w', '--save', help='save the session to a session file')
parser.add_argument('-m', '--model', type=str, default="gpt-3.5-turbo", help='Model used for response generation')
parser.add_argument('-t', '--temperature', type=float, default=0.7, help='Temperature for response generation')
args = parser.parse_args()
karls_chatgpt_helpers.openai_api_key_set_or_die()
g = karls_chatgpt_helpers.GPTChatSession(
model=args.model,
temperature=args.temperature,
debug=False
)
if args.system_file:
with open(args.system_file, 'r') as f:
system_prompt = f.read()
g.streaming_chat(system_prompt, role='system')
if args.load:
g.load(args.load)
converse(g)
if args.save:
g.save(args.save)
| [
"gpt> "
] |
2024-01-10 | davidscanlan/Medical-Coding-Automation | server~run_generation.py | #!/usr/bin/env python3
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/Transformer-XL/XLNet)
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
from tqdm import trange
import torch
import torch.nn.functional as F
import numpy as np
from pytorch_transformers import GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig
from pytorch_transformers import GPT2LMHeadModel, GPT2Tokenizer
from pytorch_transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer
from pytorch_transformers import XLNetLMHeadModel, XLNetTokenizer
from pytorch_transformers import TransfoXLLMHeadModel, TransfoXLTokenizer
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (
GPT2Config, OpenAIGPTConfig, XLNetConfig, TransfoXLConfig)), ())
MODEL_CLASSES = {
'gpt2': (GPT2LMHeadModel, GPT2Tokenizer),
'openai-gpt': (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
'xlnet': (XLNetLMHeadModel, XLNetTokenizer),
'transfo-xl': (TransfoXLLMHeadModel, TransfoXLTokenizer),
}
# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
PADDING_TEXT = """ In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[
0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(
F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[...,
1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def sample_sequence(model, length, context, num_samples=1, temperature=1, top_k=0, top_p=0.0, is_xlnet=False, device='cpu'):
context = torch.tensor(context, dtype=torch.long, device=device)
context = context.unsqueeze(0).repeat(num_samples, 1)
generated = context
with torch.no_grad():
for _ in trange(length):
inputs = {'input_ids': generated}
if is_xlnet:
# XLNet is a direct (predict same token, not next token) and bi-directional model by default
# => need one additional dummy token in the input (will be masked), attention mask and target mapping (see model docstring)
input_ids = torch.cat((generated, torch.zeros(
(1, 1), dtype=torch.long, device=device)), dim=1)
perm_mask = torch.zeros(
(1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float, device=device)
# Previous tokens don't see last token
perm_mask[:, :, -1] = 1.0
target_mapping = torch.zeros(
(1, 1, input_ids.shape[1]), dtype=torch.float, device=device)
target_mapping[0, 0, -1] = 1.0 # predict last token
inputs = {'input_ids': input_ids, 'perm_mask': perm_mask,
'target_mapping': target_mapping}
# Note: we could also use 'past' with GPT-2/Transfo-XL/XLNet (cached hidden-states)
outputs = model(**inputs)
next_token_logits = outputs[0][0, -1, :] / temperature
filtered_logits = top_k_top_p_filtering(
next_token_logits, top_k=top_k, top_p=top_p)
next_token = torch.multinomial(
F.softmax(filtered_logits, dim=-1), num_samples=1)
generated = torch.cat((generated, next_token.unsqueeze(0)), dim=1)
return generated
def generate_text(
padding_text=None,
model_type='gpt2',
model_name_or_path='gpt2',
prompt='',
length=3,
temperature=1.0,
top_k=0,
top_p=0.9,
no_cuda=True,
seed=42,
):
device = torch.device(
"cuda" if torch.cuda.is_available() and not no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
np.random.seed(seed)
torch.manual_seed(seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(seed)
model_type = model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[model_type]
tokenizer = tokenizer_class.from_pretrained(model_name_or_path)
model = model_class.from_pretrained(model_name_or_path)
model.to(device)
model.eval()
if length < 0 and model.config.max_position_embeddings > 0:
length = model.config.max_position_embeddings
elif 0 < model.config.max_position_embeddings < length:
# No generation bigger than model size
length = model.config.max_position_embeddings
elif length < 0:
length = MAX_LENGTH # avoid infinite loop
while True:
raw_text = prompt if prompt else input("Model prompt >>> ")
if model_type in ["transfo-xl", "xlnet"]:
# Models with memory likes to have a long prompt for short inputs.
raw_text = (
padding_text if padding_text else PADDING_TEXT) + raw_text
context_tokens = tokenizer.encode(raw_text)
out = sample_sequence(
model=model,
context=context_tokens,
length=length,
temperature=temperature,
top_k=top_k,
top_p=top_p,
device=device,
is_xlnet=bool(model_type == "xlnet"),
)
out = out[0, len(context_tokens):].tolist()
text = tokenizer.decode(out, clean_up_tokenization_spaces=True)
print(text)
if prompt:
break
return text
| [] |
2024-01-10 | iloukou/langflow | tests~test_graph.py | import json
from typing import Type, Union
import pytest
from langchain.agents import AgentExecutor
from langchain.llms.fake import FakeListLLM
from langflow.graph import Edge, Graph, Node
from langflow.graph.nodes import (
AgentNode,
ChainNode,
FileToolNode,
LLMNode,
PromptNode,
ToolkitNode,
ToolNode,
WrapperNode,
)
from langflow.interface.run import get_result_and_thought_using_graph
from langflow.utils.payload import build_json, get_root_node
# Test cases for the graph module
# now we have three types of graph:
# BASIC_EXAMPLE_PATH, COMPLEX_EXAMPLE_PATH, OPENAPI_EXAMPLE_PATH
def get_graph(_type="basic"):
"""Get a graph from a json file"""
if _type == "basic":
path = pytest.BASIC_EXAMPLE_PATH
elif _type == "complex":
path = pytest.COMPLEX_EXAMPLE_PATH
elif _type == "openapi":
path = pytest.OPENAPI_EXAMPLE_PATH
with open(path, "r") as f:
flow_graph = json.load(f)
data_graph = flow_graph["data"]
nodes = data_graph["nodes"]
edges = data_graph["edges"]
return Graph(nodes, edges)
@pytest.fixture
def basic_graph():
return get_graph()
@pytest.fixture
def complex_graph():
return get_graph("complex")
@pytest.fixture
def openapi_graph():
return get_graph("openapi")
def get_node_by_type(graph, node_type: Type[Node]) -> Union[Node, None]:
"""Get a node by type"""
return next((node for node in graph.nodes if isinstance(node, node_type)), None)
def test_graph_structure(basic_graph):
assert isinstance(basic_graph, Graph)
assert len(basic_graph.nodes) > 0
assert len(basic_graph.edges) > 0
for node in basic_graph.nodes:
assert isinstance(node, Node)
for edge in basic_graph.edges:
assert isinstance(edge, Edge)
assert edge.source in basic_graph.nodes
assert edge.target in basic_graph.nodes
def test_circular_dependencies(basic_graph):
assert isinstance(basic_graph, Graph)
def check_circular(node, visited):
visited.add(node)
neighbors = basic_graph.get_nodes_with_target(node)
for neighbor in neighbors:
if neighbor in visited:
return True
if check_circular(neighbor, visited.copy()):
return True
return False
for node in basic_graph.nodes:
assert not check_circular(node, set())
def test_invalid_node_types():
graph_data = {
"nodes": [
{
"id": "1",
"data": {
"node": {
"base_classes": ["BaseClass"],
"template": {
"_type": "InvalidNodeType",
},
},
},
},
],
"edges": [],
}
with pytest.raises(Exception):
Graph(graph_data["nodes"], graph_data["edges"])
def test_get_nodes_with_target(basic_graph):
"""Test getting connected nodes"""
assert isinstance(basic_graph, Graph)
# Get root node
root = get_root_node(basic_graph)
assert root is not None
connected_nodes = basic_graph.get_nodes_with_target(root)
assert connected_nodes is not None
def test_get_node_neighbors_basic(basic_graph):
"""Test getting node neighbors"""
assert isinstance(basic_graph, Graph)
# Get root node
root = get_root_node(basic_graph)
assert root is not None
neighbors = basic_graph.get_node_neighbors(root)
assert neighbors is not None
assert isinstance(neighbors, dict)
# Root Node is an Agent, it requires an LLMChain and tools
# We need to check if there is a Chain in the one of the neighbors'
# data attribute in the type key
assert any(
"Chain" in neighbor.data["type"] for neighbor, val in neighbors.items() if val
)
# assert Serper Search is in the neighbors
assert any(
"Serper" in neighbor.data["type"] for neighbor, val in neighbors.items() if val
)
# Now on to the Chain's neighbors
chain = next(
neighbor
for neighbor, val in neighbors.items()
if "Chain" in neighbor.data["type"] and val
)
chain_neighbors = basic_graph.get_node_neighbors(chain)
assert chain_neighbors is not None
assert isinstance(chain_neighbors, dict)
# Check if there is a LLM in the chain's neighbors
assert any(
"OpenAI" in neighbor.data["type"]
for neighbor, val in chain_neighbors.items()
if val
)
# Chain should have a Prompt as a neighbor
assert any(
"Prompt" in neighbor.data["type"]
for neighbor, val in chain_neighbors.items()
if val
)
def test_get_node_neighbors_complex(complex_graph):
"""Test getting node neighbors"""
assert isinstance(complex_graph, Graph)
# Get root node
root = get_root_node(complex_graph)
assert root is not None
neighbors = complex_graph.get_nodes_with_target(root)
assert neighbors is not None
# Neighbors should be a list of nodes
assert isinstance(neighbors, list)
# Root Node is an Agent, it requires an LLMChain and tools
# We need to check if there is a Chain in the one of the neighbors'
assert any("Chain" in neighbor.data["type"] for neighbor in neighbors)
# assert Tool is in the neighbors
assert any("Tool" in neighbor.data["type"] for neighbor in neighbors)
# Now on to the Chain's neighbors
chain = next(neighbor for neighbor in neighbors if "Chain" in neighbor.data["type"])
chain_neighbors = complex_graph.get_nodes_with_target(chain)
assert chain_neighbors is not None
# Check if there is a LLM in the chain's neighbors
assert any("OpenAI" in neighbor.data["type"] for neighbor in chain_neighbors)
# Chain should have a Prompt as a neighbor
assert any("Prompt" in neighbor.data["type"] for neighbor in chain_neighbors)
# Now on to the Tool's neighbors
tool = next(neighbor for neighbor in neighbors if "Tool" in neighbor.data["type"])
tool_neighbors = complex_graph.get_nodes_with_target(tool)
assert tool_neighbors is not None
# Check if there is an Agent in the tool's neighbors
assert any("Agent" in neighbor.data["type"] for neighbor in tool_neighbors)
# This Agent has a Tool that has a PythonFunction as func
agent = next(
neighbor for neighbor in tool_neighbors if "Agent" in neighbor.data["type"]
)
agent_neighbors = complex_graph.get_nodes_with_target(agent)
assert agent_neighbors is not None
# Check if there is a Tool in the agent's neighbors
assert any("Tool" in neighbor.data["type"] for neighbor in agent_neighbors)
# This Tool has a PythonFunction as func
tool = next(
neighbor for neighbor in agent_neighbors if "Tool" in neighbor.data["type"]
)
tool_neighbors = complex_graph.get_nodes_with_target(tool)
assert tool_neighbors is not None
# Check if there is a PythonFunction in the tool's neighbors
assert any("PythonFunction" in neighbor.data["type"] for neighbor in tool_neighbors)
def test_get_node(basic_graph):
"""Test getting a single node"""
node_id = basic_graph.nodes[0].id
node = basic_graph.get_node(node_id)
assert isinstance(node, Node)
assert node.id == node_id
def test_build_nodes(basic_graph):
"""Test building nodes"""
assert len(basic_graph.nodes) == len(basic_graph._nodes)
for node in basic_graph.nodes:
assert isinstance(node, Node)
def test_build_edges(basic_graph):
"""Test building edges"""
assert len(basic_graph.edges) == len(basic_graph._edges)
for edge in basic_graph.edges:
assert isinstance(edge, Edge)
assert isinstance(edge.source, Node)
assert isinstance(edge.target, Node)
def test_get_root_node(basic_graph, complex_graph):
"""Test getting root node"""
assert isinstance(basic_graph, Graph)
root = get_root_node(basic_graph)
assert root is not None
assert isinstance(root, Node)
assert root.data["type"] == "ZeroShotAgent"
# For complex example, the root node is a ZeroShotAgent too
assert isinstance(complex_graph, Graph)
root = get_root_node(complex_graph)
assert root is not None
assert isinstance(root, Node)
assert root.data["type"] == "ZeroShotAgent"
def test_build_json(basic_graph):
"""Test building JSON from graph"""
assert isinstance(basic_graph, Graph)
root = get_root_node(basic_graph)
json_data = build_json(root, basic_graph)
assert isinstance(json_data, dict)
assert json_data["_type"] == "zero-shot-react-description"
assert isinstance(json_data["llm_chain"], dict)
assert json_data["llm_chain"]["_type"] == "llm_chain"
assert json_data["llm_chain"]["memory"] is None
assert json_data["llm_chain"]["verbose"] is False
assert isinstance(json_data["llm_chain"]["prompt"], dict)
assert isinstance(json_data["llm_chain"]["llm"], dict)
assert json_data["llm_chain"]["output_key"] == "text"
assert isinstance(json_data["allowed_tools"], list)
assert all(isinstance(tool, dict) for tool in json_data["allowed_tools"])
assert isinstance(json_data["return_values"], list)
assert all(isinstance(val, str) for val in json_data["return_values"])
def test_validate_edges(basic_graph):
"""Test validating edges"""
assert isinstance(basic_graph, Graph)
# all edges should be valid
assert all(edge.valid for edge in basic_graph.edges)
def test_matched_type(basic_graph):
"""Test matched type attribute in Edge"""
assert isinstance(basic_graph, Graph)
# all edges should be valid
assert all(edge.valid for edge in basic_graph.edges)
# all edges should have a matched_type attribute
assert all(hasattr(edge, "matched_type") for edge in basic_graph.edges)
# The matched_type attribute should be in the source_types attr
assert all(edge.matched_type in edge.source_types for edge in basic_graph.edges)
def test_build_params(basic_graph):
"""Test building params"""
assert isinstance(basic_graph, Graph)
# all edges should be valid
assert all(edge.valid for edge in basic_graph.edges)
# all edges should have a matched_type attribute
assert all(hasattr(edge, "matched_type") for edge in basic_graph.edges)
# The matched_type attribute should be in the source_types attr
assert all(edge.matched_type in edge.source_types for edge in basic_graph.edges)
# Get the root node
root = get_root_node(basic_graph)
# Root node is a ZeroShotAgent
# which requires an llm_chain, allowed_tools and return_values
assert isinstance(root.params, dict)
assert "llm_chain" in root.params
assert "allowed_tools" in root.params
assert "return_values" in root.params
# The llm_chain should be a Node
assert isinstance(root.params["llm_chain"], Node)
# The allowed_tools should be a list of Nodes
assert isinstance(root.params["allowed_tools"], list)
assert all(isinstance(tool, Node) for tool in root.params["allowed_tools"])
# The return_values is of type str so it should be a list of strings
assert isinstance(root.params["return_values"], list)
assert all(isinstance(val, str) for val in root.params["return_values"])
# The llm_chain should have a prompt and llm
llm_chain_node = root.params["llm_chain"]
assert isinstance(llm_chain_node.params, dict)
assert "prompt" in llm_chain_node.params
assert "llm" in llm_chain_node.params
# The prompt should be a Node
assert isinstance(llm_chain_node.params["prompt"], Node)
# The llm should be a Node
assert isinstance(llm_chain_node.params["llm"], Node)
# The prompt should have format_insctructions, suffix, prefix
prompt_node = llm_chain_node.params["prompt"]
assert isinstance(prompt_node.params, dict)
assert "format_instructions" in prompt_node.params
assert "suffix" in prompt_node.params
assert "prefix" in prompt_node.params
# All of them should be of type str
assert isinstance(prompt_node.params["format_instructions"], str)
assert isinstance(prompt_node.params["suffix"], str)
assert isinstance(prompt_node.params["prefix"], str)
# The llm should have a model
llm_node = llm_chain_node.params["llm"]
assert isinstance(llm_node.params, dict)
assert "model_name" in llm_node.params
# The model should be a str
assert isinstance(llm_node.params["model_name"], str)
def test_build(basic_graph, complex_graph, openapi_graph):
"""Test Node's build method"""
assert_agent_was_built(basic_graph)
assert_agent_was_built(complex_graph)
assert_agent_was_built(openapi_graph)
def assert_agent_was_built(graph):
"""Assert that the agent was built"""
assert isinstance(graph, Graph)
# Now we test the build method
# Build the Agent
result = graph.build()
# The agent should be a AgentExecutor
assert isinstance(result, AgentExecutor)
def test_agent_node_build(basic_graph):
agent_node = get_node_by_type(basic_graph, AgentNode)
assert agent_node is not None
built_object = agent_node.build()
assert built_object is not None
def test_tool_node_build(basic_graph):
tool_node = get_node_by_type(basic_graph, ToolNode)
assert tool_node is not None
built_object = tool_node.build()
assert built_object is not None
# Add any further assertions specific to the ToolNode's build() method
def test_chain_node_build(complex_graph):
chain_node = get_node_by_type(complex_graph, ChainNode)
assert chain_node is not None
built_object = chain_node.build()
assert built_object is not None
# Add any further assertions specific to the ChainNode's build() method
def test_prompt_node_build(complex_graph):
prompt_node = get_node_by_type(complex_graph, PromptNode)
assert prompt_node is not None
built_object = prompt_node.build()
assert built_object is not None
# Add any further assertions specific to the PromptNode's build() method
def test_llm_node_build(basic_graph):
llm_node = get_node_by_type(basic_graph, LLMNode)
assert llm_node is not None
built_object = llm_node.build()
assert built_object is not None
# Add any further assertions specific to the LLMNode's build() method
def test_toolkit_node_build(openapi_graph):
toolkit_node = get_node_by_type(openapi_graph, ToolkitNode)
assert toolkit_node is not None
built_object = toolkit_node.build()
assert built_object is not None
# Add any further assertions specific to the ToolkitNode's build() method
def test_file_tool_node_build(openapi_graph):
file_tool_node = get_node_by_type(openapi_graph, FileToolNode)
assert file_tool_node is not None
built_object = file_tool_node.build()
assert built_object is not None
# Add any further assertions specific to the FileToolNode's build() method
def test_wrapper_node_build(openapi_graph):
wrapper_node = get_node_by_type(openapi_graph, WrapperNode)
assert wrapper_node is not None
built_object = wrapper_node.build()
assert built_object is not None
# Add any further assertions specific to the WrapperNode's build() method
def test_get_result_and_thought(basic_graph):
"""Test the get_result_and_thought method"""
responses = [
"Final Answer: I am a response",
]
message = "Hello"
# Find the node that is an LLMNode and change the
# _built_object to a FakeListLLM
llm_node = get_node_by_type(basic_graph, LLMNode)
assert llm_node is not None
llm_node._built_object = FakeListLLM(responses=responses)
llm_node._built = True
langchain_object = basic_graph.build()
# assert all nodes are built
assert all(node._built for node in basic_graph.nodes)
# now build again and check if FakeListLLM was used
# Get the result and thought
result, thought = get_result_and_thought_using_graph(langchain_object, message)
# The result should be a str
assert isinstance(result, str)
# The thought should be a Thought
assert isinstance(thought, str)
| [] |
2024-01-10 | iloukou/langflow | src~backend~langflow~interface~loading.py | import json
from typing import Any, Callable, Dict, Optional
from langchain.agents import ZeroShotAgent
from langchain.agents import agent as agent_module
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_toolkits.base import BaseToolkit
from langchain.agents.load_tools import (
_BASE_TOOLS,
_EXTRA_LLM_TOOLS,
_EXTRA_OPTIONAL_TOOLS,
_LLM_TOOLS,
)
from langchain.agents.loading import load_agent_from_config
from langchain.agents.tools import Tool
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains.loading import load_chain_from_config
from langchain.llms.base import BaseLLM
from langchain.llms.loading import load_llm_from_config
from langflow.interface.agents.custom import CUSTOM_AGENTS
from langflow.interface.importing.utils import import_by_type
from langflow.interface.toolkits.base import toolkits_creator
from langflow.interface.types import get_type_list
from langflow.utils import util, validate
def instantiate_class(node_type: str, base_type: str, params: Dict) -> Any:
"""Instantiate class from module type and key, and params"""
if node_type in CUSTOM_AGENTS:
if custom_agent := CUSTOM_AGENTS.get(node_type):
return custom_agent.initialize(**params) # type: ignore
class_object = import_by_type(_type=base_type, name=node_type)
if base_type == "agents":
# We need to initialize it differently
return load_agent_executor(class_object, params)
elif node_type == "ZeroShotPrompt":
if "tools" not in params:
params["tools"] = []
return ZeroShotAgent.create_prompt(**params)
elif node_type == "PythonFunction":
# If the node_type is "PythonFunction"
# we need to get the function from the params
# which will be a str containing a python function
# and then we need to compile it and return the function
# as the instance
function_string = params["code"]
if isinstance(function_string, str):
return validate.eval_function(function_string)
raise ValueError("Function should be a string")
elif base_type == "toolkits":
loaded_toolkit = class_object(**params)
# Check if node_type has a loader
if toolkits_creator.has_create_function(node_type):
return load_toolkits_executor(node_type, loaded_toolkit, params)
return loaded_toolkit
elif base_type == "embeddings":
params.pop("model")
return class_object(**params)
elif base_type == "vectorstores":
return class_object.from_documents(**params)
elif base_type == "documentloaders":
return class_object(**params).load()
elif base_type == "textsplitters":
documents = params.pop("documents")
text_splitter = class_object(**params)
return text_splitter.split_documents(documents)
else:
return class_object(**params)
def load_flow_from_json(path: str):
# This is done to avoid circular imports
from langflow.graph import Graph
"""Load flow from json file"""
with open(path, "r") as f:
flow_graph = json.load(f)
data_graph = flow_graph["data"]
nodes = data_graph["nodes"]
# Substitute ZeroShotPrompt with PromptTemplate
# nodes = replace_zero_shot_prompt_with_prompt_template(nodes)
# Add input variables
# nodes = payload.extract_input_variables(nodes)
# Nodes, edges and root node
edges = data_graph["edges"]
graph = Graph(nodes, edges)
return graph.build()
def replace_zero_shot_prompt_with_prompt_template(nodes):
"""Replace ZeroShotPrompt with PromptTemplate"""
for node in nodes:
if node["data"]["type"] == "ZeroShotPrompt":
# Build Prompt Template
tools = [
tool
for tool in nodes
if tool["type"] != "chatOutputNode"
and "Tool" in tool["data"]["node"]["base_classes"]
]
node["data"] = build_prompt_template(prompt=node["data"], tools=tools)
break
return nodes
def load_langchain_type_from_config(config: Dict[str, Any]):
"""Load langchain type from config"""
# Get type list
type_list = get_type_list()
if config["_type"] in type_list["agents"]:
config = util.update_verbose(config, new_value=False)
return load_agent_executor_from_config(config, verbose=True)
elif config["_type"] in type_list["chains"]:
config = util.update_verbose(config, new_value=False)
return load_chain_from_config(config, verbose=True)
elif config["_type"] in type_list["llms"]:
config = util.update_verbose(config, new_value=True)
return load_llm_from_config(config)
else:
raise ValueError("Type should be either agent, chain or llm")
def load_agent_executor_from_config(
config: dict,
llm: Optional[BaseLLM] = None,
tools: Optional[list[Tool]] = None,
callback_manager: Optional[BaseCallbackManager] = None,
**kwargs: Any,
):
tools = load_tools_from_config(config["allowed_tools"])
config["allowed_tools"] = [tool.name for tool in tools] if tools else []
agent_obj = load_agent_from_config(config, llm, tools, **kwargs)
return AgentExecutor.from_agent_and_tools(
agent=agent_obj,
tools=tools,
callback_manager=callback_manager,
**kwargs,
)
def load_agent_executor(agent_class: type[agent_module.Agent], params, **kwargs):
"""Load agent executor from agent class, tools and chain"""
allowed_tools = params["allowed_tools"]
llm_chain = params["llm_chain"]
tool_names = [tool.name for tool in allowed_tools]
agent = agent_class(allowed_tools=tool_names, llm_chain=llm_chain)
return AgentExecutor.from_agent_and_tools(
agent=agent,
tools=allowed_tools,
**kwargs,
)
def load_toolkits_executor(node_type: str, toolkit: BaseToolkit, params: dict):
create_function: Callable = toolkits_creator.get_create_function(node_type)
if llm := params.get("llm"):
return create_function(llm=llm, toolkit=toolkit)
def load_tools_from_config(tool_list: list[dict]) -> list:
"""Load tools based on a config list.
Args:
config: config list.
Returns:
List of tools.
"""
tools = []
for tool in tool_list:
tool_type = tool.pop("_type")
llm_config = tool.pop("llm", None)
llm = load_llm_from_config(llm_config) if llm_config else None
kwargs = tool
if tool_type in _BASE_TOOLS:
tools.append(_BASE_TOOLS[tool_type]())
elif tool_type in _LLM_TOOLS:
if llm is None:
raise ValueError(f"Tool {tool_type} requires an LLM to be provided")
tools.append(_LLM_TOOLS[tool_type](llm))
elif tool_type in _EXTRA_LLM_TOOLS:
if llm is None:
raise ValueError(f"Tool {tool_type} requires an LLM to be provided")
_get_llm_tool_func, extra_keys = _EXTRA_LLM_TOOLS[tool_type]
if missing_keys := set(extra_keys).difference(kwargs):
raise ValueError(
f"Tool {tool_type} requires some parameters that were not "
f"provided: {missing_keys}"
)
tools.append(_get_llm_tool_func(llm=llm, **kwargs))
elif tool_type in _EXTRA_OPTIONAL_TOOLS:
_get_tool_func, extra_keys = _EXTRA_OPTIONAL_TOOLS[tool_type]
kwargs = {k: value for k, value in kwargs.items() if value}
tools.append(_get_tool_func(**kwargs))
else:
raise ValueError(f"Got unknown tool {tool_type}")
return tools
def build_prompt_template(prompt, tools):
"""Build PromptTemplate from ZeroShotPrompt"""
prefix = prompt["node"]["template"]["prefix"]["value"]
suffix = prompt["node"]["template"]["suffix"]["value"]
format_instructions = prompt["node"]["template"]["format_instructions"]["value"]
tool_strings = "\n".join(
[
f"{tool['data']['node']['name']}: {tool['data']['node']['description']}"
for tool in tools
]
)
tool_names = ", ".join([tool["data"]["node"]["name"] for tool in tools])
format_instructions = format_instructions.format(tool_names=tool_names)
value = "\n\n".join([prefix, tool_strings, format_instructions, suffix])
prompt["type"] = "PromptTemplate"
prompt["node"] = {
"template": {
"_type": "prompt",
"input_variables": {
"type": "str",
"required": True,
"placeholder": "",
"list": True,
"show": False,
"multiline": False,
},
"output_parser": {
"type": "BaseOutputParser",
"required": False,
"placeholder": "",
"list": False,
"show": False,
"multline": False,
"value": None,
},
"template": {
"type": "str",
"required": True,
"placeholder": "",
"list": False,
"show": True,
"multiline": True,
"value": value,
},
"template_format": {
"type": "str",
"required": False,
"placeholder": "",
"list": False,
"show": False,
"multline": False,
"value": "f-string",
},
"validate_template": {
"type": "bool",
"required": False,
"placeholder": "",
"list": False,
"show": False,
"multline": False,
"value": True,
},
},
"description": "Schema to represent a prompt for an LLM.",
"base_classes": ["BasePromptTemplate"],
}
return prompt
| [] |
2024-01-10 | iloukou/langflow | src~backend~langflow~template~nodes.py | from typing import Optional
from langchain.agents import loading
from langchain.agents.mrkl import prompt
from langflow.template.base import FrontendNode, Template, TemplateField
from langflow.template.constants import DEFAULT_PROMPT, HUMAN_PROMPT, SYSTEM_PROMPT
from langflow.utils.constants import DEFAULT_PYTHON_FUNCTION
NON_CHAT_AGENTS = {
agent_type: agent_class
for agent_type, agent_class in loading.AGENT_TO_CLASS.items()
if "chat" not in agent_type.value
}
class BasePromptFrontendNode(FrontendNode):
name: str
template: Template
description: str
base_classes: list[str]
def to_dict(self):
return super().to_dict()
class ZeroShotPromptNode(BasePromptFrontendNode):
name: str = "ZeroShotPrompt"
template: Template = Template(
type_name="zero_shot",
fields=[
TemplateField(
field_type="str",
required=False,
placeholder="",
is_list=False,
show=True,
multiline=True,
value=prompt.PREFIX,
name="prefix",
),
TemplateField(
field_type="str",
required=True,
placeholder="",
is_list=False,
show=True,
multiline=True,
value=prompt.SUFFIX,
name="suffix",
),
TemplateField(
field_type="str",
required=False,
placeholder="",
is_list=False,
show=True,
multiline=True,
value=prompt.FORMAT_INSTRUCTIONS,
name="format_instructions",
),
],
)
description: str = "Prompt template for Zero Shot Agent."
base_classes: list[str] = ["BasePromptTemplate"]
def to_dict(self):
return super().to_dict()
class PromptTemplateNode(FrontendNode):
name: str = "PromptTemplate"
template: Template
description: str
base_classes: list[str] = ["BasePromptTemplate"]
def to_dict(self):
return super().to_dict()
class PythonFunctionNode(FrontendNode):
name: str = "PythonFunction"
template: Template = Template(
type_name="python_function",
fields=[
TemplateField(
field_type="code",
required=True,
placeholder="",
is_list=False,
show=True,
value=DEFAULT_PYTHON_FUNCTION,
name="code",
)
],
)
description: str = "Python function to be executed."
base_classes: list[str] = ["function"]
def to_dict(self):
return super().to_dict()
class ToolNode(FrontendNode):
name: str = "Tool"
template: Template = Template(
type_name="tool",
fields=[
TemplateField(
field_type="str",
required=True,
placeholder="",
is_list=False,
show=True,
multiline=True,
value="",
name="name",
),
TemplateField(
field_type="str",
required=True,
placeholder="",
is_list=False,
show=True,
multiline=True,
value="",
name="description",
),
TemplateField(
field_type="str",
required=True,
placeholder="",
is_list=False,
show=True,
multiline=True,
value="",
name="func",
),
],
)
description: str = "Tool to be used in the flow."
base_classes: list[str] = ["BaseTool"]
def to_dict(self):
return super().to_dict()
class JsonAgentNode(FrontendNode):
name: str = "JsonAgent"
template: Template = Template(
type_name="json_agent",
fields=[
TemplateField(
field_type="BaseToolkit",
required=True,
show=True,
name="toolkit",
),
TemplateField(
field_type="BaseLanguageModel",
required=True,
show=True,
name="llm",
),
],
)
description: str = """Construct a json agent from an LLM and tools."""
base_classes: list[str] = ["AgentExecutor"]
def to_dict(self):
return super().to_dict()
class InitializeAgentNode(FrontendNode):
name: str = "initialize_agent"
template: Template = Template(
type_name="initailize_agent",
fields=[
TemplateField(
field_type="str",
required=True,
is_list=True,
show=True,
multiline=False,
options=list(NON_CHAT_AGENTS.keys()),
value=list(NON_CHAT_AGENTS.keys())[0],
name="agent",
),
TemplateField(
field_type="BaseChatMemory",
required=False,
show=True,
name="memory",
),
TemplateField(
field_type="Tool",
required=False,
show=True,
name="tools",
is_list=True,
),
TemplateField(
field_type="BaseLanguageModel",
required=True,
show=True,
name="llm",
),
],
)
description: str = """Construct a json agent from an LLM and tools."""
base_classes: list[str] = ["AgentExecutor"]
def to_dict(self):
return super().to_dict()
@staticmethod
def format_field(field: TemplateField, name: Optional[str] = None) -> None:
# do nothing and don't return anything
pass
class CSVAgentNode(FrontendNode):
name: str = "CSVAgent"
template: Template = Template(
type_name="csv_agent",
fields=[
TemplateField(
field_type="file",
required=True,
show=True,
name="path",
value="",
suffixes=[".csv"],
fileTypes=["csv"],
),
TemplateField(
field_type="BaseLanguageModel",
required=True,
show=True,
name="llm",
),
],
)
description: str = """Construct a json agent from a CSV and tools."""
base_classes: list[str] = ["AgentExecutor"]
def to_dict(self):
return super().to_dict()
class VectorStoreAgentNode(FrontendNode):
name: str = "VectorStoreAgent"
template: Template = Template(
type_name="vectorstore_agent",
fields=[
TemplateField(
field_type="VectorStoreInfo",
required=True,
show=True,
name="vectorstoreinfo",
display_name="Vector Store Info",
),
TemplateField(
field_type="BaseLanguageModel",
required=True,
show=True,
name="llm",
display_name="LLM",
),
],
)
description: str = """Construct an agent from a Vector Store."""
base_classes: list[str] = ["AgentExecutor"]
def to_dict(self):
return super().to_dict()
class VectorStoreRouterAgentNode(FrontendNode):
name: str = "VectorStoreRouterAgent"
template: Template = Template(
type_name="vectorstorerouter_agent",
fields=[
TemplateField(
field_type="VectorStoreRouterToolkit",
required=True,
show=True,
name="vectorstoreroutertoolkit",
display_name="Vector Store Router Toolkit",
),
TemplateField(
field_type="BaseLanguageModel",
required=True,
show=True,
name="llm",
display_name="LLM",
),
],
)
description: str = """Construct an agent from a Vector Store Router."""
base_classes: list[str] = ["AgentExecutor"]
def to_dict(self):
return super().to_dict()
class PromptFrontendNode(FrontendNode):
@staticmethod
def format_field(field: TemplateField, name: Optional[str] = None) -> None:
# if field.field_type == "StringPromptTemplate"
# change it to str
PROMPT_FIELDS = [
"template",
"suffix",
"prefix",
"examples",
]
if field.field_type == "StringPromptTemplate" and "Message" in str(name):
field.field_type = "prompt"
field.multiline = True
field.value = HUMAN_PROMPT if "Human" in field.name else SYSTEM_PROMPT
if field.name == "template" and field.value == "":
field.value = DEFAULT_PROMPT
if field.name in PROMPT_FIELDS:
field.field_type = "prompt"
if (
"Union" in field.field_type
and "BaseMessagePromptTemplate" in field.field_type
):
field.field_type = "BaseMessagePromptTemplate"
# All prompt fields should be password=False
field.password = False
class MemoryFrontendNode(FrontendNode):
@staticmethod
def format_field(field: TemplateField, name: Optional[str] = None) -> None:
FrontendNode.format_field(field, name)
if not isinstance(field.value, str):
field.value = None
if field.name == "k":
field.required = True
field.show = True
field.field_type = "int"
field.value = 10
field.display_name = "Memory Size"
class ChainFrontendNode(FrontendNode):
@staticmethod
def format_field(field: TemplateField, name: Optional[str] = None) -> None:
FrontendNode.format_field(field, name)
if "key" in field.name:
field.password = False
field.show = False
class LLMFrontendNode(FrontendNode):
@staticmethod
def format_field(field: TemplateField, name: Optional[str] = None) -> None:
display_names_dict = {
"huggingfacehub_api_token": "HuggingFace Hub API Token",
}
FrontendNode.format_field(field, name)
SHOW_FIELDS = ["repo_id", "task", "model_kwargs"]
if field.name in SHOW_FIELDS:
field.show = True
if "api" in field.name and ("key" in field.name or "token" in field.name):
field.password = True
field.show = True
field.required = True
if field.name == "task":
field.required = True
field.show = True
field.is_list = True
field.options = ["text-generation", "text2text-generation"]
if display_name := display_names_dict.get(field.name):
field.display_name = display_name
if field.name == "model_kwargs":
field.field_type = "code"
| [
"BaseLanguageModel",
"vectorstoreroutertoolkit",
"initailize_agent",
"VectorStoreInfo",
"name",
"format_instructions",
"json_agent",
"description",
"func",
"Vector Store Info",
"agent",
"python_function",
"BaseChatMemory",
"vectorstorerouter_agent",
"BaseToolkit",
"['template', 'suffix', 'prefix', 'examples']",
"Vector Store Router Toolkit",
"csv_agent",
"vectorstoreinfo",
"VectorStoreRouterToolkit",
"vectorstore_agent"
] |
2024-01-10 | iloukou/langflow | src~backend~langflow~interface~tools~constants.py | from langchain.agents import Tool
from langchain.agents.load_tools import (
_BASE_TOOLS,
_EXTRA_LLM_TOOLS,
_EXTRA_OPTIONAL_TOOLS,
_LLM_TOOLS,
)
from langchain.tools.json.tool import JsonSpec
from langflow.interface.tools.custom import PythonFunction
FILE_TOOLS = {"JsonSpec": JsonSpec}
CUSTOM_TOOLS = {"Tool": Tool, "PythonFunction": PythonFunction}
ALL_TOOLS_NAMES = {
**_BASE_TOOLS,
**_LLM_TOOLS, # type: ignore
**{k: v[0] for k, v in _EXTRA_LLM_TOOLS.items()}, # type: ignore
**{k: v[0] for k, v in _EXTRA_OPTIONAL_TOOLS.items()},
**CUSTOM_TOOLS,
**FILE_TOOLS, # type: ignore
}
| [] |
2024-01-10 | AI-Jie01/scikit-llm | skllm~models~gpt_zero_shot_clf.py | from typing import Optional, Union, List, Any
import numpy as np
import pandas as pd
from collections import Counter
import random
from tqdm import tqdm
from abc import ABC, abstractmethod
from sklearn.base import BaseEstimator, ClassifierMixin
from skllm.openai.prompts import get_zero_shot_prompt_slc, get_zero_shot_prompt_mlc
from skllm.openai.chatgpt import (
construct_message,
get_chat_completion,
extract_json_key,
)
from skllm.config import SKLLMConfig as _Config
from skllm.utils import to_numpy as _to_numpy
from skllm.openai.mixin import OpenAIMixin as _OAIMixin
class _BaseZeroShotGPTClassifier(ABC, BaseEstimator, ClassifierMixin, _OAIMixin):
def __init__(
self,
openai_key: Optional[str] = None,
openai_org: Optional[str] = None,
openai_model: str = "gpt-3.5-turbo",
):
self._set_keys(openai_key, openai_org)
self.openai_model = openai_model
def _to_np(self, X):
return _to_numpy(X)
def fit(
self,
X: Optional[Union[np.ndarray, pd.Series, List[str]]],
y: Union[np.ndarray, pd.Series, List[str], List[List[str]]],
):
X = self._to_np(X)
self.classes_, self.probabilities_ = self._get_unique_targets(y)
return self
def predict(self, X: Union[np.ndarray, pd.Series, List[str]]):
X = self._to_np(X)
predictions = []
for i in tqdm(range(len(X))):
predictions.append(self._predict_single(X[i]))
return predictions
@abstractmethod
def _extract_labels(self, y: Any) -> List[str]:
pass
def _get_unique_targets(self, y):
labels = self._extract_labels(y)
counts = Counter(labels)
total = sum(counts.values())
classes, probs = [], []
for l, c in counts.items():
classes.append(l)
probs.append(c / total)
return classes, probs
def _get_chat_completion(self, x):
prompt = self._get_prompt(x)
msgs = []
msgs.append(construct_message("system", "You are a text classification model."))
msgs.append(construct_message("user", prompt))
completion = get_chat_completion(
msgs, self._get_openai_key(), self._get_openai_org(), self.openai_model
)
return completion
class ZeroShotGPTClassifier(_BaseZeroShotGPTClassifier):
def __init__(
self,
openai_key: Optional[str] = None,
openai_org: Optional[str] = None,
openai_model: str = "gpt-3.5-turbo",
):
super().__init__(openai_key, openai_org, openai_model)
def _extract_labels(self, y: Any) -> List[str]:
if isinstance(y, (pd.Series, np.ndarray)):
labels = y.tolist()
else:
labels = y
return labels
def _get_prompt(self, x) -> str:
return get_zero_shot_prompt_slc(x, self.classes_)
def _predict_single(self, x):
completion = self._get_chat_completion(x)
try:
label = str(
extract_json_key(completion.choices[0].message["content"], "label")
)
except Exception as e:
label = ""
if label not in self.classes_:
label = random.choices(self.classes_, self.probabilities_)[0]
return label
def fit(
self,
X: Optional[Union[np.ndarray, pd.Series, List[str]]],
y: Union[np.ndarray, pd.Series, List[str]],
):
y = self._to_np(y)
return super().fit(X, y)
class MultiLabelZeroShotGPTClassifier(_BaseZeroShotGPTClassifier):
def __init__(
self,
openai_key: Optional[str] = None,
openai_org: Optional[str] = None,
openai_model: str = "gpt-3.5-turbo",
max_labels: int = 3,
):
super().__init__(openai_key, openai_org, openai_model)
if max_labels < 2:
raise ValueError("max_labels should be at least 2")
self.max_labels = max_labels
def _extract_labels(self, y) -> List[str]:
labels = []
for l in y:
for j in l:
labels.append(j)
return labels
def _get_prompt(self, x) -> str:
return get_zero_shot_prompt_mlc(x, self.classes_, self.max_labels)
def _predict_single(self, x):
completion = self._get_chat_completion(x)
try:
labels = extract_json_key(completion.choices[0].message["content"], "label")
if not isinstance(labels, list):
raise RuntimeError("Invalid labels type, expected list")
except Exception as e:
labels = []
labels = list(filter(lambda l: l in self.classes_, labels))
if len(labels) > self.max_labels:
labels = labels[: self.max_labels - 1]
elif len(labels) < 1:
labels = [random.choices(self.classes_, self.probabilities_)[0]]
return labels
def fit(
self,
X: Optional[Union[np.ndarray, pd.Series, List[str]]],
y: List[List[str]],
):
return super().fit(X, y)
| [] |
2024-01-10 | smaldd14/temporal-qa-chatbot | embed-docs.py | import importlib
import openai
import pinecone
import os
from dotenv import load_dotenv
from langchain.vectorstores import Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
load_dotenv()
openai.api_key = os.environ["OPENAI_API_KEY"]
pinecone.init(
api_key = os.environ["PINECONE_API_KEY"],
environment = os.environ["PINECONE_ENV"]
)
index_name = os.environ["PINECONE_INDEX_NAME"]
# First, check if our index already exists. If it doesn't, we create it
if index_name not in pinecone.list_indexes():
print("Index does not exist, creating it")
# we create a new index
pinecone.create_index(
name=index_name,
metric='cosine',
dimension=1536
)
embeddings = OpenAIEmbeddings()
# Import the chunk-docs module
chunk_docs = importlib.import_module('chunk-docs')
# # Get the markdown files
markdown_files = chunk_docs.get_markdown_files()
# # Split the markdown files
split_docs = chunk_docs.split_markdown_files(markdown_files)
# split_docs is a list of lists of langchain docs, loop thru them and add them to the index
for doc in split_docs:
vector_store = Pinecone.from_documents(doc, embeddings, index_name=index_name)
| [] |
2024-01-10 | smaldd14/temporal-qa-chatbot | common.py | import streamlit as st
import os
from langchain.chat_models import ChatOpenAI
import pinecone
from dotenv import load_dotenv
load_dotenv()
DEFAULT_SELECT_VALUE = "Select repository"
MODEL_NAME = "gpt-3.5-turbo-16k"
def initialize_session():
if not "initialized" in st.session_state:
st.session_state["initialized"] = True
st.session_state["repo_name"] = DEFAULT_SELECT_VALUE
st.session_state["user_name"] = ""
st.session_state["repo_url"] = ""
st.session_state["visitied_list"] = []
st.session_state["messages"] = []
st.session_state["chat_memory"] = None
st.session_state["llm"] = ChatOpenAI(
temperature = 0.0
)
pinecone.init(
api_key = os.environ["PINECONE_API_KEY"],
environment = os.environ["PINECONE_ENV"]
)
st.session_state["index_name"] = os.environ["PINECONE_INDEX_NAME"]
def handling_user_change():
st.session_state["repo_name"] = DEFAULT_SELECT_VALUE
st.session_state["repo_url"] = "" | [] |
2024-01-10 | smaldd14/temporal-qa-chatbot | chunk-docs.py | # Here we want to utilize Langchain MarkdownTextSplitter to split the markdown text into chunks
from langchain.text_splitter import MarkdownHeaderTextSplitter
import os
headers_to_split_on = [
("#", "Header 1"),
("##", "Header 2")
]
# Get each markdown file from the text directory
def get_markdown_files():
markdown_files = []
for root, dirs, files in os.walk("text"):
for file in files:
if file.endswith(".md"):
markdown_files.append(os.path.join(root, file))
return markdown_files
# Split the markdown files into chunks
def split_markdown_files(markdown_files):
markdown_splitter = MarkdownHeaderTextSplitter(headers_to_split_on=headers_to_split_on)
split_docs = []
for file in markdown_files:
# Read the markdown file
with open(file, "r") as f:
markdown_text = f.read()
md_header_splits = markdown_splitter.split_text(markdown_text)
split_docs.append(md_header_splits)
return split_docs
| [] |
2024-01-10 | ramnathv/vectorsearch-applications | weaviate_interface.py | from weaviate import Client, AuthApiKey
from dataclasses import dataclass
from openai import OpenAI
from sentence_transformers import SentenceTransformer
from typing import List, Union, Callable
from torch import cuda
from tqdm import tqdm
import time
class WeaviateClient(Client):
'''
A python native Weaviate Client class that encapsulates Weaviate functionalities
in one object. Several convenience methods are added for ease of use.
Args
----
api_key: str
The API key for the Weaviate Cloud Service (WCS) instance.
https://console.weaviate.cloud/dashboard
endpoint: str
The url endpoint for the Weaviate Cloud Service instance.
model_name_or_path: str='sentence-transformers/all-MiniLM-L6-v2'
The name or path of the SentenceTransformer model to use for vector search.
Will also support OpenAI text-embedding-ada-002 model. This param enables
the use of most leading models on MTEB Leaderboard:
https://huggingface.co/spaces/mteb/leaderboard
openai_api_key: str=None
The API key for the OpenAI API. Only required if using OpenAI text-embedding-ada-002 model.
'''
def __init__(self,
api_key: str,
endpoint: str,
model_name_or_path: str='sentence-transformers/all-MiniLM-L6-v2',
openai_api_key: str=None,
**kwargs
):
auth_config = AuthApiKey(api_key=api_key)
super().__init__(auth_client_secret=auth_config,
url=endpoint,
**kwargs)
self.model_name_or_path = model_name_or_path
self.openai_model = False
if self.model_name_or_path == 'text-embedding-ada-002':
if not openai_api_key:
raise ValueError(f'OpenAI API key must be provided to use this model: {self.model_name_or_path}')
self.model = OpenAI(api_key=openai_api_key)
self.openai_model = True
else:
self.model = SentenceTransformer(self.model_name_or_path) if self.model_name_or_path else None
self.display_properties = ['title', 'video_id', 'length', 'thumbnail_url', 'views', 'episode_url', \
'doc_id', 'guest', 'content'] # 'playlist_id', 'channel_id', 'author'
def show_classes(self) -> Union[List[dict], str]:
'''
Shows all available classes (indexes) on the Weaviate instance.
'''
classes = self.cluster.get_nodes_status()[0]['shards']
if classes:
return [d['class'] for d in classes]
else:
return "No classes found on cluster."
def show_class_info(self) -> Union[List[dict], str]:
'''
Shows all information related to the classes (indexes) on the Weaviate instance.
'''
classes = self.cluster.get_nodes_status()[0]['shards']
if classes:
return [d for d in classes]
else:
return "No classes found on cluster."
def show_class_properties(self, class_name: str) -> Union[dict, str]:
'''
Shows all properties of a class (index) on the Weaviate instance.
'''
classes = self.schema.get()
if classes:
all_classes = classes['classes']
for d in all_classes:
if d['class'] == class_name:
return d['properties']
return f'Class "{class_name}" not found on host'
return f'No Classes found on host'
def show_class_config(self, class_name: str) -> Union[dict, str]:
'''
Shows all configuration of a class (index) on the Weaviate instance.
'''
classes = self.schema.get()
if classes:
all_classes = classes['classes']
for d in all_classes:
if d['class'] == class_name:
return d
return f'Class "{class_name}" not found on host'
return f'No Classes found on host'
def delete_class(self, class_name: str) -> str:
'''
Deletes a class (index) on the Weaviate instance, if it exists.
'''
available = self._check_class_avialability(class_name)
if isinstance(available, bool):
if available:
self.schema.delete_class(class_name)
not_deleted = self._check_class_avialability(class_name)
if isinstance(not_deleted, bool):
if not_deleted:
return f'Class "{class_name}" was not deleted. Try again.'
else:
return f'Class "{class_name}" deleted'
return f'Class "{class_name}" deleted and there are no longer any classes on host'
return f'Class "{class_name}" not found on host'
return available
def _check_class_avialability(self, class_name: str) -> Union[bool, str]:
'''
Checks if a class (index) exists on the Weaviate instance.
'''
classes = self.schema.get()
if classes:
all_classes = classes['classes']
for d in all_classes:
if d['class'] == class_name:
return True
return False
else:
return f'No Classes found on host'
def format_response(self,
response: dict,
class_name: str
) -> List[dict]:
'''
Formats json response from Weaviate into a list of dictionaries.
Expands _additional fields if present into top-level dictionary.
'''
if response.get('errors'):
return response['errors'][0]['message']
results = []
hits = response['data']['Get'][class_name]
for d in hits:
temp = {k:v for k,v in d.items() if k != '_additional'}
if d.get('_additional'):
for key in d['_additional']:
temp[key] = d['_additional'][key]
results.append(temp)
return results
def keyword_search(self,
request: str,
class_name: str,
properties: List[str]=['content'],
limit: int=10,
where_filter: dict=None,
display_properties: List[str]=None,
return_raw: bool=False) -> Union[dict, List[dict]]:
'''
Executes Keyword (BM25) search.
Args
----
query: str
User query.
class_name: str
Class (index) to search.
properties: List[str]
List of properties to search across.
limit: int=10
Number of results to return.
display_properties: List[str]=None
List of properties to return in response.
If None, returns all properties.
return_raw: bool=False
If True, returns raw response from Weaviate.
'''
display_properties = display_properties if display_properties else self.display_properties
response = (self.query
.get(class_name, display_properties)
.with_bm25(query=request, properties=properties)
.with_additional(['score', "id"])
.with_limit(limit)
)
response = response.with_where(where_filter).do() if where_filter else response.do()
if return_raw:
return response
else:
return self.format_response(response, class_name)
def vector_search(self,
request: str,
class_name: str,
limit: int=10,
where_filter: dict=None,
display_properties: List[str]=None,
return_raw: bool=False,
device: str='cuda:0' if cuda.is_available() else 'cpu'
) -> Union[dict, List[dict]]:
'''
Executes vector search using embedding model defined on instantiation
of WeaviateClient instance.
Args
----
query: str
User query.
class_name: str
Class (index) to search.
limit: int=10
Number of results to return.
display_properties: List[str]=None
List of properties to return in response.
If None, returns all properties.
return_raw: bool=False
If True, returns raw response from Weaviate.
'''
display_properties = display_properties if display_properties else self.display_properties
query_vector = self._create_query_vector(request, device=device)
response = (
self.query
.get(class_name, display_properties)
.with_near_vector({"vector": query_vector})
.with_limit(limit)
.with_additional(['distance'])
)
response = response.with_where(where_filter).do() if where_filter else response.do()
if return_raw:
return response
else:
return self.format_response(response, class_name)
def _create_query_vector(self, query: str, device: str) -> List[float]:
'''
Creates embedding vector from text query.
'''
return self.get_openai_embedding(query) if self.openai_model else self.model.encode(query, device=device).tolist()
def get_openai_embedding(self, query: str) -> List[float]:
'''
Gets embedding from OpenAI API for query.
'''
embedding = self.model.embeddings.create(input=query, model='text-embedding-ada-002').model_dump()
if embedding:
return embedding['data'][0]['embedding']
else:
raise ValueError(f'No embedding found for query: {query}')
def hybrid_search(self,
request: str,
class_name: str,
properties: List[str]=['content'],
alpha: float=0.5,
limit: int=10,
where_filter: dict=None,
display_properties: List[str]=None,
return_raw: bool=False,
device: str='cuda:0' if cuda.is_available() else 'cpu'
) -> Union[dict, List[dict]]:
'''
Executes Hybrid (BM25 + Vector) search.
Args
----
query: str
User query.
class_name: str
Class (index) to search.
properties: List[str]
List of properties to search across (using BM25)
alpha: float=0.5
Weighting factor for BM25 and Vector search.
alpha can be any number from 0 to 1, defaulting to 0.5:
alpha = 0 executes a pure keyword search method (BM25)
alpha = 0.5 weighs the BM25 and vector methods evenly
alpha = 1 executes a pure vector search method
limit: int=10
Number of results to return.
display_properties: List[str]=None
List of properties to return in response.
If None, returns all properties.
return_raw: bool=False
If True, returns raw response from Weaviate.
'''
display_properties = display_properties if display_properties else self.display_properties
query_vector = self._create_query_vector(request, device=device)
response = (
self.query
.get(class_name, display_properties)
.with_hybrid(query=request,
alpha=alpha,
vector=query_vector,
properties=properties,
fusion_type='relativeScoreFusion') #hard coded option for now
.with_additional(["score", "explainScore"])
.with_limit(limit)
)
response = response.with_where(where_filter).do() if where_filter else response.do()
if return_raw:
return response
else:
return self.format_response(response, class_name)
class WeaviateIndexer:
def __init__(self,
client: WeaviateClient,
batch_size: int=150,
num_workers: int=4,
dynamic: bool=True,
creation_time: int=5,
timeout_retries: int=3,
connection_error_retries: int=3,
callback: Callable=None,
):
'''
Class designed to batch index documents into Weaviate. Instantiating
this class will automatically configure the Weaviate batch client.
'''
self._client = client
self._callback = callback if callback else self._default_callback
self._client.batch.configure(batch_size=batch_size,
num_workers=num_workers,
dynamic=dynamic,
creation_time=creation_time,
timeout_retries=timeout_retries,
connection_error_retries=connection_error_retries,
callback=self._callback
)
def _default_callback(self, results: dict):
"""
Check batch results for errors.
Parameters
----------
results : dict
The Weaviate batch creation return value.
"""
if results is not None:
for result in results:
if "result" in result and "errors" in result["result"]:
if "error" in result["result"]["errors"]:
print(result["result"])
def batch_index_data(self,
data: List[dict],
class_name: str,
vector_property: str='content_embedding'
) -> None:
'''
Batch function for fast indexing of data onto Weaviate cluster.
This method assumes that self._client.batch is already configured.
'''
start = time.perf_counter()
with self._client.batch as batch:
for d in tqdm(data):
#define single document
properties = {k:v for k,v in d.items() if k != vector_property}
try:
#add data object to batch
batch.add_data_object(
data_object=properties,
class_name=class_name,
vector=d[vector_property]
)
except Exception as e:
print(e)
continue
end = time.perf_counter() - start
print(f'Batch job completed in {round(end/60, 2)} minutes.')
class_info = self._client.show_class_info()
for i, c in enumerate(class_info):
if c['class'] == class_name:
print(class_info[i])
self._client.batch.shutdown()
@dataclass
class WhereFilter:
'''
Simplified interface for constructing a WhereFilter object.
Args
----
path: List[str]
List of properties to filter on.
operator: str
Operator to use for filtering. Options: ['And', 'Or', 'Equal', 'NotEqual',
'GreaterThan', 'GreaterThanEqual', 'LessThan', 'LessThanEqual', 'Like',
'WithinGeoRange', 'IsNull', 'ContainsAny', 'ContainsAll']
value[dataType]: Union[int, bool, str, float, datetime]
Value to filter on. The dataType suffix must match the data type of the
property being filtered on. At least and only one value type must be provided.
'''
path: List[str]
operator: str
valueInt: int=None
valueBoolean: bool=None
valueText: str=None
valueNumber: float=None
valueDate = None
def post_init(self):
operators = ['And', 'Or', 'Equal', 'NotEqual','GreaterThan', 'GreaterThanEqual', 'LessThan',\
'LessThanEqual', 'Like', 'WithinGeoRange', 'IsNull', 'ContainsAny', 'ContainsAll']
if self.operator not in operators:
raise ValueError(f'operator must be one of: {operators}, got {self.operator}')
values = [self.valueInt, self.valueBoolean, self.valueText, self.valueNumber, self.valueDate]
if not any(values):
raise ValueError('At least one value must be provided.')
if len(values) > 1:
raise ValueError('At most one value can be provided.')
def todict(self):
return {k:v for k,v in self.__dict__.items() if v is not None} | [] |
2024-01-10 | Whackathon-2023/TEAM-6 | flask-server~flask_server.py | # SQL - "I need a ticket where I was the reporter and the assignee was 'John Smith'"
# Semantic ID - "Find a me a similar ticket to [ticket_id]"
# Semantic Search - "A user can't log into the wifi. Find me a ticket that is similar to this one."
# Flask is a framework for creating a web server, using Python
# Flask - framework
# Server - Something that listens for requests and sends responses
# Python - programming language
# This server will accept:
# The Question - and will reply with a markdown response
# We import modules - these are libraries of code that we can use
from datetime import date
import json
import os
from flask import Flask, request, jsonify
import numpy as np
import openai
import sqlite3
from dotenv import load_dotenv
load_dotenv()
PYTHON_EXECUTABLE = "temp/temp_file.py"
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
assert OPENAI_API_KEY, "OPENAI_API_KEY environment variable is missing from .env"
openai.api_key = OPENAI_API_KEY
# Additional information about the schema
# Tickets are considered closed only when their status is 'Resolved.'
# Timestamps difference will return seconds
# The assignee is the person who is currently assigned to the ticket
# The reporter is the person who reported the ticket
# The creator is the person who created the ticket
# Inputs the current date using the SQLite function date('now'), in memory
today = date.today()
schema = '''
{"JIRA_ITSD_FY23_FULL":[{"Column Name":"Summary","Data Type":"TEXT"},{"Column Name":"Issue_id","Data Type":"REAL"},{"Column Name":"Issue_Type","Data Type":"TEXT","Enumerations":["Service Request","Purchase","Incident","Access","Change","Problem"],"Comments":"This is an enumerated field."},{"Column Name":"Status","Data Type":"TEXT","Enumerations":["Resolved","With Support","New","Procuring","With Approver","With Customer","Approved","Configuring"],"Comments":"This is an enumerated field."},{"Column Name":"Priority","Data Type":"TEXT","Enumerations":["Low","High","Medium","Highest","Lowest","Blocker"],"Comments":"This is an enumerated field."},{"Column Name":"Assignee","Data Type":"TEXT"},{"Column Name":"Reporter","Data Type":"TEXT"},{"Column Name":"Creator","Data Type":"TEXT"},{"Column Name":"Created","Data Type":"TIMESTAMP"},{"Column Name":"Resolved","Data Type":"TIMESTAMP"},{"Column Name":"Custom_field_Business_Unit","Data Type":"TEXT","Enumerations":["Fertilisers","Shared Services","Kleenheat","Australian Vinyls","Chemicals","Decipher"],"Comments":"This is an enumerated field."},{"Column Name":"Custom_field_Category","Data Type":"TEXT","Enumerations":["User Access","Client Application","Computer","Mobile Device","Business System","Peripheral Device","Cyber Security","Server Infrastructure","Network"],"Comments":"This is an enumerated field."},{"Column Name":"Custom_field_ReporterBU","Data Type":"TEXT","Enumerations":["Company: Fertilisers, ","Company: Sodium Cyanide, ","Company: Shared Services, ","Company: Kleenheat, ","Company: Ammonia/AN, ","Company: Support Services, ","Company: Australian Vinyls, ","Company: Chemicals, ","Company: Decipher, "],"Comments":"This is an enumerated field."}{"Column Name: Time_To_Complete_Hours","Data Type":"REAL","Comments":"This is a calculated field of how long a ticket took to resolve, if it is empty the ticket has not been resolved'}]}
Tickets are considered closed only when their status is 'Resolved.', The time it took to finish a ticket can be found in the "Time_To_Complete_Hours column, this value is in hours. The assignee is the person who is currently assigned to the ticket. The reporter is the person who reported the ticket. The creator is the person who created the ticket.]}
The important fields are Summary, Status, Assignee, Reporter, Created, Custom_field_ReporterBU, Custom_field_ReporterDivision
The current date is ''' + str(today)
app = Flask(__name__)
# Loadings embeddings into memory
print("Loading embeddings into memory...")
EMBEDDINGS_FILE = "issue_description_embeddings.json"
with open(EMBEDDINGS_FILE, "r") as f:
embeddings = json.load(f)
# Converts into a numpy array
for key in embeddings:
embeddings[key] = np.array(embeddings[key])
print("Embeddings loaded.")
# Create a route - this is a URL that we can visit
@app.route('/question', methods=['POST'])
def question():
# JSON is a way of representing data
request_data = request.get_json()
print(request_data)
question = request_data['question']
function = decide_function_call(question)
print(f"Function called: {function}")
if function == None:
return jsonify({"content": "I don't know how to answer that question.", "error": "No function was called."})
elif function == "generate_sql_for_fixed_columns":
result = generate_sql_for_fixed_columns(question)
if result is None:
return jsonify({"content": "I don't know how to answer that question.", "error": "No SQL query was generated."})
query_string = result['query_string']
explanation = result['explanation']
print(f"SQL Query: {query_string}")
print(f"Explanation: {explanation}")
result = query_database(query_string) # Can return None
if result is None:
return jsonify({"content": "I don't know how to answer that question.", "error": "No results were returned from the database."})
print(f"Result: {result}")
# Turn into conversational response formatted as markdown
conversational_response = create_conversational_response(
result, question, f"SQL Query: {query_string}")
print(f"Conversational Response: {conversational_response}")
return jsonify({"content": conversational_response})
elif function == "extract_ticket_id_for_similarity_search":
# We want to perform an vector similarity search
# We first get the embedding for the ticket_id, then we perform a vector similarity search
result = extract_ticket_id_for_similarity_search(question)
if result is None:
return jsonify({"content": "I don't know how to answer that question.", "error": "No ticket ID was extracted."})
ticket_id = result['ticket_id']
embedding = embeddings[ticket_id]
most_similar = get_most_similar(ticket_id, embedding, embeddings, 3)
print(f"Most similar tickets: {most_similar}")
result = select_tickets(most_similar)
print(f"Result: {result}")
return jsonify({"content": result})
# Need to turn conversational / markdown
elif function == "extract_description_and_find_similarity":
# We want to perform an vector similarity search on the ticket description
result = extract_description_and_find_similarity(question)
if result is None:
return jsonify({"content": "I don't know how to answer that question.", "error": "No description was extracted."})
print(f"Ticket Description: {result['ticket_description']}")
ticket_description = result['ticket_description']
embedding = process_embedding(ticket_description) # Can return None
if embedding is None:
print("I don't know how to answer that question.")
return jsonify({"content": "I don't know how to answer that question."})
most_similar = get_most_similar(
ticket_description, embedding, embeddings, 2)
print(f"Most similar tickets: {most_similar}")
result = select_tickets(most_similar)
print(f"Result: {result}")
# Return the top tickets as markdown, along with a conversational response
conversational_response = create_conversational_response(
result, question, ' **Sure! I have found some similar tickets regarding [issue] for your reference**')
return jsonify({"content": conversational_response})
elif function == "no_functon_called":
result = no_functon_called(question)
return jsonify({"content": result})
elif function == "generate_visuals":
# First, we generate a explanation query for what we are going to do
explanation = explanation_query(question)
if explanation is None:
return jsonify({"content": "I don't know how to answer that question.", "error": "No explanation was generated."})
print(f"Explanation: {explanation['explanation']}")
# Then, we fetch the data using the query using `generate_sql_for_fixed_columns`
result = generate_sql_for_fixed_columns(f"{explanation['explanation']} {question}")
if result is None:
return jsonify({"content": "I don't know how to answer that question.", "error": "No SQL query was generated."})
print(f"SQL Query: {result['query_string']}")
# Then, we fetch the data using the query using `generate_sql_for_fixed_columns`
result = query_database(result['query_string'])
if result is None:
return jsonify({"content": "I don't know how to answer that question.", "error": "No results were returned from the database."})
print(f"Result: {result}")
# Then, we generate a visual using the data
visual = generate_matplotlib_visual(result, question, explanation['explanation'])
if visual is None:
return jsonify({"content": "I don't know how to answer that question.", "error": "No visual was generated."})
print(f"Visual: {visual}")
code = visual['python_code']
description = visual['description']
file_path = visual['file_path']
# Saves code to PYTHON_EXECUTABLE
with open(PYTHON_EXECUTABLE, "w") as f:
f.write(code)
# Executes code
os.system(f"python {PYTHON_EXECUTABLE}")
# eval(code)
# Uploads visual to share.sh and returns the link
url = os.popen(f"curl --upload-file {file_path} https://free.keep.sh").read().strip()
print(f"URL: {url}")
return jsonify({"content": description, "url": url+"/download"});
else:
print("I don't know how to answer that question.")
return jsonify({"content": "I don't know how to answer that question.", "error": "No function was called."})
return jsonify({"content": "I don't know how to answer that question."})
def generate_matplotlib_visual(data, question,explanation):
structure = [
{
"name": "generate_matplotlib_visual",
"description": "This function creates a visual representation of data using Matplotlib. The generated visual is saved to a specified location, and the function provides a comprehensive description of what the visual represents.",
"parameters": {
"type": "object",
"properties": {
"python_code": {
"type": "string",
"description": "This parameter should contain the complete Python code necessary for generating the visual. This includes import statements, data preparation steps, and Matplotlib commands for rendering the visual."
},
"file_path": {
"type": "string",
"description": "Indicates the absolute or relative file path where the generated visual will be saved. The path should include the filename and the extension (e.g., '/path/to/save/image.png')."
},
"description": {
"type": "string",
"description": "Provides a explanation of what the generated visual aims to represent. This should include the type of visual (e.g., bar chart, line graph), the data being visualized, and any specific insights the visual is intended to convey."
}
},
"required": ["python_code", "file_path"]
}
}
]
prompt = f"""
DATA:
```{data}```
GOAL:
The purpose of the visualisation is to {explanation}. It should be a .png file saved to the current directory.
You are Service Genie, an IT chatbot that calls functions to help answer a users question: `{question}`
"""
messages = [
{"role": "user", "content": prompt},
]
response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo-16k-0613",
# model="gpt-3.5-turbo-0613",
model="gpt-4-0613",
messages=messages,
functions=structure,
function_call={
"name": "generate_matplotlib_visual",
}
)
try:
text_string = response.choices[0].message.function_call.arguments
text_data = json.loads(text_string)
return text_data
except Exception as e:
print(response.choices[0].message.function_call.arguments)
print(e)
return None
def explanation_query(question):
structure = [
{
"name": "explanation_query",
"description": "Generates a detailed explanation of what the visualisation shows and why it was generated.",
"parameters": {
"type": "object",
"properties": {
"explanation": {
"type": "string",
"description": "A detailed explanation of what the visualisation shows and why it was generated."
}
},
"required": ["explanation"]
}
}
]
prompt = f"""
```
{schema}
```
GOAL:
You are Service Genie, an IT chatbot that calls functions to help answer a users question: `{question}`
"""
messages = [
{"role": "user", "content": prompt},
]
response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo-16k-0613",
model="gpt-3.5-turbo-0613",
# model="gpt-4-0613",
messages=messages,
functions=structure,
function_call={
"name": "explanation_query",
}
)
try:
text_string = response.choices[0].message.function_call.arguments
text_data = json.loads(text_string)
return text_data
except Exception as e:
print(response.choices[0].message.function_call.arguments)
print(e)
return None
def generate_sql_for_fixed_columns(question):
structure = [
{
"name": "generate_sql_for_fixed_columns",
"description": "Generates an SQLite query based on specific columns in the database when the user query explicitly refers to columns or states.",
"parameters": {
"type": "object",
"properties": {
"query_string": {
"type": "string",
"description": "The generated SQLite query that will fetch the desired data based on the specific columns."
},
"explanation": {
"type": "string",
"description": "A detailed explanation for why this specific SQL query was generated."
}
},
"required": ["query_string", "explanation"]
}
},
]
prompt = f"""
```
{schema}
```
GOAL:
You are Service Genie, an IT chatbot that calls functions to help answer a users question: `{question}`
"""
messages = [
{"role": "user", "content": prompt},
]
response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo-16k-0613",
#model="gpt-3.5-turbo-0613",
model="gpt-4-0613",
messages=messages,
functions=structure,
function_call={
"name": "generate_sql_for_fixed_columns",
}
)
try:
text_string = response.choices[0].message.function_call.arguments
text_data = json.loads(text_string)
return text_data
except Exception as e:
print(response.choices[0].message.function_call.arguments)
print(e)
return None
def extract_ticket_id_for_similarity_search(question):
structure = [
{
"name": "extract_ticket_id_for_similarity_search",
"description": "Identifies and extracts the ticket ID from the user's query to perform a similarity search using embeddings. Ticket ID: ITSD-******",
"parameters": {
"type": "object",
"properties": {
"ticket_id": {
"type": "string",
"description": "The extracted ticket ID that will be used for a similarity search."
},
},
"required": ["ticket_id"]
}
}
]
prompt = f"""
```
{schema}
```
Example Question: "Find me a ticket similar to ITSD-123456."
Function Called: extract_ticket_id_for_similarity_search
Justification: The user's query includes an explicit ticket ID and asks for similar tickets. The task here is straightforward: extract the ticket ID and use it as a basis for a similarity search. No SQL query or natural language description is required. GOAL:
You are Service Genie, an IT chatbot tthat calls functions to help answer a users question: `{question}`
"""
print(prompt)
messages = [
{"role": "user", "content": prompt},
]
response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo-16k-0613",
#model="gpt-3.5-turbo-0613",
model="gpt-4-0613",
messages=messages,
functions=structure,
function_call={
"name": "extract_ticket_id_for_similarity_search",
}
)
try:
text_string = response.choices[0].message.function_call.arguments
text_data = json.loads(text_string)
return text_data
except Exception as e:
print(response.choices[0].message.function_call.arguments)
print(e)
return None
def extract_description_and_find_similarity(question):
structure = [
{
"name": "extract_description_and_find_similarity",
"description": "Processes the user's natural language query to extract the core issue description.",
"parameters": {
"type": "object",
"properties": {
"ticket_description": {
"type": "string",
"description": "The extracted issue description that forms the basis for searching similar tickets. This is a cleaned-up and normalized version of the user's query, retaining only the crucial elements that define the problem. Example: 'User can't log into the wifi on their laptop after changing their password.'"
},
},
"required": ["description_embedding"]
}
}
]
prompt = f"""
```
{schema}
```
Example Question: "A user can't log into the wifi. Find me a ticket that is similar to this problem."
Function Called: extract_description_and_find_similarity
Justification: The user describes a problem in natural language without referring to a specific ticket ID or database column. The problem description needs to be extracted, possibly cleaned up, and converted into an embedding for a similarity search.
GOAL:
You are Service Genie, an IT chatbot tthat calls functions to help answer a users question: `{question}`
"""
messages = [
{"role": "user", "content": prompt},
]
response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo-16k-0613",
#model="gpt-3.5-turbo-0613",
model="gpt-4-0613",
messages=messages,
functions=structure,
function_call={
"name": "extract_description_and_find_similarity",
}
)
try:
text_string = response.choices[0].message.function_call.arguments
text_data = json.loads(text_string)
return text_data
except Exception as e:
print(response.choices[0].message.function_call.arguments)
print(e)
return None
# Decides which function to call
def decide_function_call(question):
structure = [
{
"name": "decide_function_call",
"description": "Decides which function to call based on the user's question.",
"parameters": {
"type": "object",
"properties": {
"function_name": {
"type": "string",
"enum": [
"generate_sql_for_fixed_columns",
"extract_ticket_id_for_similarity_search",
"extract_description_and_find_similarity",
"generate_visuals"
],
"description": "The name of the function that will be called to answer the user's question."
},
}
}
}
]
prompt = f"""
```
{schema}
```
Example Question: "How many unresolved tickets are there?"
Function Called: generate_sql_for_fixed_columns
Justification: The user's question specifically refers to a known column in the database, "unresolved tickets." The query can be answered directly with an SQL operation. There's no need for similarity search or text processing; the columns needed are explicitly stated
Example Question: "Find me a ticket similar to ITSD-123456."
Function Called: extract_ticket_id_for_similarity_search
Justification: The user's query includes an explicit ticket ID and asks for similar tickets. The task here is straightforward: extract the ticket ID and use it as a basis for a similarity search. No SQL query or natural language description is required.
Example Question: "A user can't log into the wifi. Find me a ticket that is similar to this problem."
Function Called: extract_description_and_find_similarity
Justification: The user describes a problem in natural language without referring to a specific ticket ID or database column. The problem description needs to be extracted, possibly cleaned up, and converted into an embedding for a similarity search.
Example Question: "Show me a graph of how many tickets each user has answered."
Function Called: generate_visuals
Justification: The user specifically requests a visual representation of data regarding ticket distribution among users. The task here is to generate the appropriate visual (e.g., a bar graph) to fulfill the user's request.
GOAL:
You are Service Genie, an IT chatbot that calls functions to help answer a users question: `{question}`
"""
print(prompt)
messages = [
{"role": "user", "content": prompt},
]
response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo-16k-0613",
#model="gpt-3.5-turbo-0613",
model="gpt-4-0613",
messages=messages,
functions=structure,
function_call={
"name": "decide_function_call",
}
)
try:
result = response.choices[0].message.function_call.arguments
function_name = json.loads(result)
return function_name['function_name']
except Exception as e:
print(e)
return None
def create_conversational_response(result, question, additional_content):
# Turn into conversational response formatted as markdown
prompt = f"""
Result: {result}
{additional_content}
GOAL:
You are Service Genie, a friendly and knowledgeable IT chatbot. Your ultimate aim is to assist users in resolving their IT issues quickly and efficiently.
Attributes:
- Knowledgeable but not condescending
- Friendly but professional
- Quick to assist but thorough in explanations
Your task is to turn the result into a Service Genie-approved, Markdown-structured, conversational response to the user's question: `{question}`
"""
# Need to pass query and ressponse
print(prompt)
messages = [
{"role": "user", "content": prompt},
]
response = openai.ChatCompletion.create(
# model="gpt-3.5-turbo",
model="gpt-4-0613",
messages=messages,
)
try:
content = response['choices'][0]['message']['content']
print(f"Conversational Response: {content}")
return content
except Exception as e:
print(e)
return None
DATABASE_PATH = "database.db"
def query_database(sql_query):
# Queries our sqlite database and returns the results
conn = sqlite3.connect(DATABASE_PATH)
c = conn.cursor()
try:
c.execute(sql_query)
results = c.fetchall()
conn.close()
return results
except Exception as e:
print(e)
conn.close()
return None
# This is a function that takes in a ticket_id and returns the most similar tickets
def get_most_similar(original_ticket_id, embedding, embeddings, n):
# Initialize an empty list to store similarities
similarities = []
# Normalize the input embedding
norm_embedding = embedding / np.linalg.norm(embedding)
for issue_id, issue_embedding in embeddings.items():
# Skip the original ticket
if issue_id == original_ticket_id:
continue
# Normalize each stored embedding
norm_issue_embedding = issue_embedding / \
np.linalg.norm(issue_embedding)
# Calculate cosine similarity
similarity = np.dot(norm_embedding, norm_issue_embedding)
# Append similarity and issue_id to list
similarities.append((issue_id, similarity))
# Sort by similarity and take the top n most similar issue_ids
most_similar = sorted(similarities, key=lambda x: x[1], reverse=True)[:n]
# Return just the issue IDs
return [issue_id for issue_id, _ in most_similar]
# Improve this by not selecting all columns
def select_tickets(ticket_ids):
results = []
for ticket_id in ticket_ids:
sql_query = f'SELECT * FROM JIRA_ITSD_FY23_FULL WHERE Issue_key = "{ticket_id}"'
results.append(query_database(sql_query))
return results
def process_embedding(text):
response = openai.Embedding.create(
input=text,
model="text-embedding-ada-002"
)
try:
embedding = response['data'][0]['embedding']
return embedding
except Exception as e:
print(e)
return None
def no_functon_called(text):
return "Sorry but that question has nothing to do with service tickets. Try rephrasing your question"
if __name__ == '__main__':
app.run(port=5000)
"""
structure = [
{
"name": "question_to_query",
"description": "This takes in a user's question and returns a SQLite query that get data to answer the question.",
"parameters": {
"type": "object",
"properties": {
"sqlite_query": {
"type": "string",
"description": "A SQLite query that will return the data needed to answer the user's question."
},
"explanation": {
"type": "string",
"description": "A detailed explanation of why the query was generated."
}
},
"required": ["sqlite_query", "explanation"]
}
},
{
"name": "extract_ticket_id",
"description": "Ticket ID: ITSD-****** - Extracts the ticket ID from a user's question when the question is in the format 'Find me a ticket similar to [ticket_id]'",
"parameters": {
"type": "object",
"properties": {
"ticket_id": {
"type": "string",
"description": "The ticket ID that was extracted from the user's question. Example: ITSD-******"
},
}
}
},
{
"name": "extract_ticket_description",
"description": "Extracts the issue description from a user's query when the user is searching for tickets similar to a particular problem. The function uses natural language processing to identify the core issue from the query and disregards auxiliary words or phrases.",
"parameters": {
"type": "object",
"properties": {
"ticket_description": {
"type": "string",
"description": "The extracted issue description that forms the basis for searching similar tickets. This is a cleaned-up and normalized version of the user's query, retaining only the crucial elements that define the problem. Example: 'User can't log into the wifi on their laptop after changing their password.'"
},
},
"required": ["ticket_description"]
}
}
]
"""
| [
"\n ```\n PLACEHOLDER\n ```\n Example Question: \"How many unresolved tickets are there?\"\n Function Called: generate_sql_for_fixed_columns\n Justification: The user's question specifically refers to a known column in the database, \"unresolved tickets.\" The query can be answered directly with an SQL operation. There's no need for similarity search or text processing; the columns needed are explicitly stated\n\n Example Question: \"Find me a ticket similar to ITSD-123456.\"\n Function Called: extract_ticket_id_for_similarity_search\n Justification: The user's query includes an explicit ticket ID and asks for similar tickets. The task here is straightforward: extract the ticket ID and use it as a basis for a similarity search. No SQL query or natural language description is required.\n\n Example Question: \"A user can't log into the wifi. Find me a ticket that is similar to this problem.\"\n Function Called: extract_description_and_find_similarity\n Justification: The user describes a problem in natural language without referring to a specific ticket ID or database column. The problem description needs to be extracted, possibly cleaned up, and converted into an embedding for a similarity search.\n \n Example Question: \"Show me a graph of how many tickets each user has answered.\"\n Function Called: generate_visuals\n Justification: The user specifically requests a visual representation of data regarding ticket distribution among users. The task here is to generate the appropriate visual (e.g., a bar graph) to fulfill the user's request.\n\n GOAL:\n You are Service Genie, an IT chatbot that calls functions to help answer a users question: `PLACEHOLDER`\n ",
"\n ```\n PLACEHOLDER\n ```\n Example Question: \"Find me a ticket similar to ITSD-123456.\"\n Function Called: extract_ticket_id_for_similarity_search\n Justification: The user's query includes an explicit ticket ID and asks for similar tickets. The task here is straightforward: extract the ticket ID and use it as a basis for a similarity search. No SQL query or natural language description is required. GOAL:\n You are Service Genie, an IT chatbot tthat calls functions to help answer a users question: `PLACEHOLDER`\n ",
"\n ```\n PLACEHOLDER\n ```\n GOAL:\n You are Service Genie, an IT chatbot that calls functions to help answer a users question: `PLACEHOLDER`\n ",
"\n Result: PLACEHOLDER\n PLACEHOLDER\n\n GOAL:\n You are Service Genie, a friendly and knowledgeable IT chatbot. Your ultimate aim is to assist users in resolving their IT issues quickly and efficiently.\n\n Attributes:\n - Knowledgeable but not condescending\n - Friendly but professional\n - Quick to assist but thorough in explanations\n\n Your task is to turn the result into a Service Genie-approved, Markdown-structured, conversational response to the user's question: `PLACEHOLDER`\n ",
"\n ```\n PLACEHOLDER\n ```\n\n GOAL:\n You are Service Genie, an IT chatbot that calls functions to help answer a users question: `PLACEHOLDER`\n ",
"\n ```\n PLACEHOLDER\n ```\n Example Question: \"A user can't log into the wifi. Find me a ticket that is similar to this problem.\"\n Function Called: extract_description_and_find_similarity\n Justification: The user describes a problem in natural language without referring to a specific ticket ID or database column. The problem description needs to be extracted, possibly cleaned up, and converted into an embedding for a similarity search.\n GOAL:\n You are Service Genie, an IT chatbot tthat calls functions to help answer a users question: `PLACEHOLDER`\n ",
"\n DATA:\n ```PLACEHOLDER```\n GOAL:\n The purpose of the visualisation is to PLACEHOLDER. It should be a .png file saved to the current directory.\n You are Service Genie, an IT chatbot that calls functions to help answer a users question: `PLACEHOLDER`\n ",
"I don't know how to answer that question."
] |
2024-01-10 | ralabarta/gpt4all_embeddings | doc_learn.py | import os
from dotenv import load_dotenv
from langchain.document_loaders import TextLoader, PDFMinerLoader, CSVLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings import LlamaCppEmbeddings
from constants import CHROMA_SETTINGS
load_dotenv()
def main():
llama_embeddings_model = os.environ.get('LLAMA_EMBEDDINGS_MODEL')
persist_directory = os.environ.get('PERSIST_DIRECTORY')
model_n_ctx = os.environ.get('MODEL_N_CTX')
# Load document and split in chunks
for root, dirs, files in os.walk("source_documents"):
for file in files:
if file.endswith(".txt"):
loader = TextLoader(os.path.join(root, file), encoding="utf8")
elif file.endswith(".pdf"):
loader = PDFMinerLoader(os.path.join(root, file))
elif file.endswith(".csv"):
loader = CSVLoader(os.path.join(root, file))
documents = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
texts = text_splitter.split_documents(documents)
# Create embeddings
llama = LlamaCppEmbeddings(model_path=llama_embeddings_model, n_ctx=model_n_ctx)
# Create and store locally vectorstore
db = Chroma.from_documents(texts, llama, persist_directory=persist_directory, client_settings=CHROMA_SETTINGS)
db.persist()
db = None
if __name__ == "__main__":
main()
| [] |
2024-01-10 | xocelyk/select-icl-prompts | few_shot.py | import numpy as np
import pandas as pd
import numpy as np
import openai
from utils import get_response, create_prompt, parse_response
from dotenv import load_dotenv
import os
from config import load_config
from data_loader import split_data
import multiprocessing
config = load_config()
prompts = config['prompts']
# probably should put this in the config file
TIMEOUT = 10
def get_response_with_timeout(prompt, temperature):
return get_response(prompt, temperature)
def get_few_shot_prompt(icl_data, test_data):
system_content = prompts['SYSTEM_CONTENT_2']
assistant_content_1 = prompts['ASSISTANT_CONTENT_1']
user_content_2 = prompts['USER_CONTENT_2']
messages = [{"role": "system", "content": system_content}, {"role": "assistant", "content": assistant_content_1}, {"role": "user", "content": user_content_2}]
prompt = create_prompt(icl_data=icl_data, test_data=test_data, messages=messages, train_mode=True, test_mode=True)
return prompt
def few_shot_one_example(icl_data, test_validation_data):
assert 'Label' in test_validation_data.keys(), 'Few Shot One Example takes one example at a time'
test_icl_data_keys = list(icl_data.keys())
np.random.shuffle(test_icl_data_keys)
test_icl_data_keys = test_icl_data_keys
icl_data = {key: icl_data[key] for key in test_icl_data_keys}
test_label = test_validation_data['Label']
prompt = get_few_shot_prompt(icl_data, test_validation_data)
print(prompt[-1]['content'])
response_text = get_response(prompt, temperature=0, timeout=TIMEOUT)[0]
response = parse_response(response_text)
if response == test_label:
correct = 1
else:
if response == -1:
correct = -1
else:
correct = 0
return {'response': response, 'correct': correct, 'label': test_label, 'text': response_text}
| [] |
2024-01-10 | BUEC500C1/news-analyzer-lukasrosario | api~extensions.py | import flask_praetorian
import flask_sqlalchemy
import boto3
import openai
import os
from newsapi import NewsApiClient
guard = flask_praetorian.Praetorian()
db = flask_sqlalchemy.SQLAlchemy()
storage_client = boto3.client(
"s3",
region_name="us-east-2",
aws_access_key_id=os.getenv("AWS_ACCESS_KEY"),
aws_secret_access_key=os.getenv("AWS_SECRET_KEY"),
)
openai.api_key = os.getenv("OPENAI_API_KEY")
news_client = NewsApiClient(api_key=os.getenv("NEWS_API_KEY"))
| [] |
2024-01-10 | bryansparks/quivr | backend~core~models~settings.py | from langchain.embeddings.openai import OpenAIEmbeddings
from models.databases.supabase.supabase import SupabaseDB
from pydantic import BaseSettings
from supabase.client import Client, create_client
from vectorstore.supabase import SupabaseVectorStore
class BrainRateLimiting(BaseSettings):
max_brain_size: int = 52428800
max_brain_per_user: int = 5
class BrainSettings(BaseSettings):
openai_api_key: str
anthropic_api_key: str
supabase_url: str
supabase_service_key: str
pg_database_url: str
resend_api_key: str = "null"
resend_email_address: str = "[email protected]"
class LLMSettings(BaseSettings):
private: bool = False
model_path: str = "./local_models/ggml-gpt4all-j-v1.3-groovy.bin"
def get_supabase_client() -> Client:
settings = BrainSettings() # pyright: ignore reportPrivateUsage=none
supabase_client: Client = create_client(
settings.supabase_url, settings.supabase_service_key
)
return supabase_client
def get_supabase_db() -> SupabaseDB:
supabase_client = get_supabase_client()
return SupabaseDB(supabase_client)
def get_embeddings() -> OpenAIEmbeddings:
settings = BrainSettings() # pyright: ignore reportPrivateUsage=none
embeddings = OpenAIEmbeddings(
openai_api_key=settings.openai_api_key
) # pyright: ignore reportPrivateUsage=none
return embeddings
def get_documents_vector_store() -> SupabaseVectorStore:
settings = BrainSettings() # pyright: ignore reportPrivateUsage=none
embeddings = get_embeddings()
supabase_client: Client = create_client(
settings.supabase_url, settings.supabase_service_key
)
documents_vector_store = SupabaseVectorStore(
supabase_client, embeddings, table_name="vectors"
)
return documents_vector_store
| [] |
2024-01-10 | bryansparks/quivr | backend~core~models~files.py | import os
import tempfile
from typing import Any, Optional
from uuid import UUID
from fastapi import UploadFile
from langchain.text_splitter import RecursiveCharacterTextSplitter
from logger import get_logger
from models.brains import Brain
from models.databases.supabase.supabase import SupabaseDB
from models.settings import get_supabase_db
from pydantic import BaseModel
from utils.file import compute_sha1_from_file
logger = get_logger(__name__)
class File(BaseModel):
id: Optional[UUID] = None
file: Optional[UploadFile]
file_name: Optional[str] = ""
file_size: Optional[int] = None
file_sha1: Optional[str] = ""
vectors_ids: Optional[list] = []
file_extension: Optional[str] = ""
content: Optional[Any] = None
chunk_size: int = 500
chunk_overlap: int = 0
documents: Optional[Any] = None
@property
@property
def supabase_db(self) -> SupabaseDB:
return get_supabase_db()
def __init__(self, **kwargs):
super().__init__(**kwargs)
if self.file:
self.file_name = self.file.filename
self.file_size = (
self.file.file._file.tell() # pyright: ignore reportPrivateUsage=none
)
self.file_extension = os.path.splitext(
self.file.filename # pyright: ignore reportPrivateUsage=none
)[-1].lower()
async def compute_file_sha1(self):
"""
Compute the sha1 of the file using a temporary file
"""
with tempfile.NamedTemporaryFile(
delete=False,
suffix=self.file.filename, # pyright: ignore reportPrivateUsage=none
) as tmp_file:
await self.file.seek(0) # pyright: ignore reportPrivateUsage=none
self.content = (
await self.file.read() # pyright: ignore reportPrivateUsage=none
)
tmp_file.write(self.content)
tmp_file.flush()
self.file_sha1 = compute_sha1_from_file(tmp_file.name)
os.remove(tmp_file.name)
def compute_documents(self, loader_class):
"""
Compute the documents from the file
Args:
loader_class (class): The class of the loader to use to load the file
"""
logger.info(f"Computing documents from file {self.file_name}")
documents = []
with tempfile.NamedTemporaryFile(
delete=False,
suffix=self.file.filename, # pyright: ignore reportPrivateUsage=none
) as tmp_file:
tmp_file.write(self.content) # pyright: ignore reportPrivateUsage=none
tmp_file.flush()
loader = loader_class(tmp_file.name)
documents = loader.load()
print("documents", documents)
os.remove(tmp_file.name)
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap
)
self.documents = text_splitter.split_documents(documents)
print(self.documents)
def set_file_vectors_ids(self):
"""
Set the vectors_ids property with the ids of the vectors
that are associated with the file in the vectors table
"""
self.vectors_ids = self.supabase_db.get_vectors_by_file_sha1(
self.file_sha1
).data
def file_already_exists(self):
"""
Check if file already exists in vectors table
"""
self.set_file_vectors_ids()
print("file_sha1", self.file_sha1)
print("vectors_ids", self.vectors_ids)
print(
"len(vectors_ids)",
len(self.vectors_ids), # pyright: ignore reportPrivateUsage=none
)
# if the file does not exist in vectors then no need to go check in brains_vectors
if len(self.vectors_ids) == 0: # pyright: ignore reportPrivateUsage=none
return False
return True
def file_already_exists_in_brain(self, brain_id):
"""
Check if file already exists in a brain
Args:
brain_id (str): Brain id
"""
response = self.supabase_db.get_brain_vectors_by_brain_id_and_file_sha1(
brain_id, self.file_sha1
)
print("response.data", response.data)
if len(response.data) == 0:
return False
return True
def file_is_empty(self):
"""
Check if file is empty by checking if the file pointer is at the beginning of the file
"""
return (
self.file.file._file.tell() < 1 # pyright: ignore reportPrivateUsage=none
)
def link_file_to_brain(self, brain: Brain):
self.set_file_vectors_ids()
if self.vectors_ids is None:
return
for vector_id in self.vectors_ids: # pyright: ignore reportPrivateUsage=none
brain.create_brain_vector(vector_id["id"], self.file_sha1)
print(f"Successfully linked file {self.file_sha1} to brain {brain.id}")
| [] |
2024-01-10 | bryansparks/quivr | backend~core~routes~chat_routes.py | import os
import time
from typing import List
from uuid import UUID
from venv import logger
from auth import AuthBearer, get_current_user
from fastapi import APIRouter, Depends, HTTPException, Query, Request
from fastapi.responses import StreamingResponse
from llm.openai import OpenAIBrainPicking
from models.brains import Brain
from models.chat import Chat, ChatHistory
from models.chats import ChatQuestion
from models.databases.supabase.supabase import SupabaseDB
from models.settings import LLMSettings, get_supabase_db
from models.users import User
from repository.brain.get_brain_details import get_brain_details
from repository.brain.get_default_user_brain_or_create_new import (
get_default_user_brain_or_create_new,
)
from repository.chat.create_chat import CreateChatProperties, create_chat
from repository.chat.get_chat_by_id import get_chat_by_id
from repository.chat.get_chat_history import get_chat_history
from repository.chat.get_user_chats import get_user_chats
from repository.chat.update_chat import ChatUpdatableProperties, update_chat
from repository.user_identity.get_user_identity import get_user_identity
chat_router = APIRouter()
class NullableUUID(UUID):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v) -> UUID | None:
if v == "":
return None
try:
return UUID(v)
except ValueError:
return None
def delete_chat_from_db(supabase_db: SupabaseDB, chat_id):
try:
supabase_db.delete_chat_history(chat_id)
except Exception as e:
print(e)
pass
try:
supabase_db.delete_chat(chat_id)
except Exception as e:
print(e)
pass
def check_user_limit(
user: User,
):
if user.user_openai_api_key is None:
date = time.strftime("%Y%m%d")
max_requests_number = int(os.getenv("MAX_REQUESTS_NUMBER", 1000))
user.increment_user_request_count(date)
if int(user.requests_count) >= int(max_requests_number):
raise HTTPException(
status_code=429, # pyright: ignore reportPrivateUsage=none
detail="You have reached the maximum number of requests for today.", # pyright: ignore reportPrivateUsage=none
)
else:
pass
# get all chats
@chat_router.get("/chat", dependencies=[Depends(AuthBearer())], tags=["Chat"])
async def get_chats(current_user: User = Depends(get_current_user)):
"""
Retrieve all chats for the current user.
- `current_user`: The current authenticated user.
- Returns a list of all chats for the user.
This endpoint retrieves all the chats associated with the current authenticated user. It returns a list of chat objects
containing the chat ID and chat name for each chat.
"""
chats = get_user_chats(current_user.id) # pyright: ignore reportPrivateUsage=none
return {"chats": chats}
# delete one chat
@chat_router.delete(
"/chat/{chat_id}", dependencies=[Depends(AuthBearer())], tags=["Chat"]
)
async def delete_chat(chat_id: UUID):
"""
Delete a specific chat by chat ID.
"""
supabase_db = get_supabase_db()
delete_chat_from_db(supabase_db=supabase_db, chat_id=chat_id)
return {"message": f"{chat_id} has been deleted."}
# update existing chat metadata
@chat_router.put(
"/chat/{chat_id}/metadata", dependencies=[Depends(AuthBearer())], tags=["Chat"]
)
async def update_chat_metadata_handler(
chat_data: ChatUpdatableProperties,
chat_id: UUID,
current_user: User = Depends(get_current_user),
) -> Chat:
"""
Update chat attributes
"""
chat = get_chat_by_id(chat_id) # pyright: ignore reportPrivateUsage=none
if str(current_user.id) != chat.user_id:
raise HTTPException(
status_code=403, # pyright: ignore reportPrivateUsage=none
detail="You should be the owner of the chat to update it.", # pyright: ignore reportPrivateUsage=none
)
return update_chat(chat_id=chat_id, chat_data=chat_data)
# create new chat
@chat_router.post("/chat", dependencies=[Depends(AuthBearer())], tags=["Chat"])
async def create_chat_handler(
chat_data: CreateChatProperties,
current_user: User = Depends(get_current_user),
):
"""
Create a new chat with initial chat messages.
"""
return create_chat(user_id=current_user.id, chat_data=chat_data)
# add new question to chat
@chat_router.post(
"/chat/{chat_id}/question",
dependencies=[
Depends(
AuthBearer(),
),
],
tags=["Chat"],
)
async def create_question_handler(
request: Request,
chat_question: ChatQuestion,
chat_id: UUID,
brain_id: NullableUUID
| UUID
| None = Query(..., description="The ID of the brain"),
current_user: User = Depends(get_current_user),
) -> ChatHistory:
"""
Add a new question to the chat.
"""
# Retrieve user's OpenAI API key
current_user.user_openai_api_key = request.headers.get("Openai-Api-Key")
brain = Brain(id=brain_id)
if not current_user.user_openai_api_key:
if brain_id:
brain_details = get_brain_details(brain_id)
if brain_details:
current_user.user_openai_api_key = brain_details.openai_api_key
if not current_user.user_openai_api_key:
user_identity = get_user_identity(current_user.id)
if user_identity is not None:
current_user.user_openai_api_key = user_identity.openai_api_key
# Retrieve chat model (temperature, max_tokens, model)
if (
not chat_question.model
or not chat_question.temperature
or not chat_question.max_tokens
):
# TODO: create ChatConfig class (pick config from brain or user or chat) and use it here
chat_question.model = chat_question.model or brain.model or "gpt-3.5-turbo-0613"
chat_question.temperature = chat_question.temperature or brain.temperature or 0
chat_question.max_tokens = chat_question.max_tokens or brain.max_tokens or 256
try:
check_user_limit(current_user)
LLMSettings()
if not brain_id:
brain_id = get_default_user_brain_or_create_new(current_user).brain_id
gpt_answer_generator = OpenAIBrainPicking(
chat_id=str(chat_id),
model=chat_question.model,
max_tokens=chat_question.max_tokens,
temperature=chat_question.temperature,
brain_id=str(brain_id),
user_openai_api_key=current_user.user_openai_api_key, # pyright: ignore reportPrivateUsage=none
)
chat_answer = gpt_answer_generator.generate_answer( # pyright: ignore reportPrivateUsage=none
chat_question.question
)
return chat_answer
except HTTPException as e:
raise e
# stream new question response from chat
@chat_router.post(
"/chat/{chat_id}/question/stream",
dependencies=[
Depends(
AuthBearer(),
),
],
tags=["Chat"],
)
async def create_stream_question_handler(
request: Request,
chat_question: ChatQuestion,
chat_id: UUID,
brain_id: NullableUUID
| UUID
| None = Query(..., description="The ID of the brain"),
current_user: User = Depends(get_current_user),
) -> StreamingResponse:
# TODO: check if the user has access to the brain
# Retrieve user's OpenAI API key
current_user.user_openai_api_key = request.headers.get("Openai-Api-Key")
brain = Brain(id=brain_id)
if not current_user.user_openai_api_key and brain_id:
brain_details = get_brain_details(brain_id)
if brain_details:
current_user.user_openai_api_key = brain_details.openai_api_key
if not current_user.user_openai_api_key:
user_identity = get_user_identity(current_user.id)
if user_identity is not None:
current_user.user_openai_api_key = user_identity.openai_api_key
# Retrieve chat model (temperature, max_tokens, model)
if (
not chat_question.model
or not chat_question.temperature
or not chat_question.max_tokens
):
# TODO: create ChatConfig class (pick config from brain or user or chat) and use it here
chat_question.model = chat_question.model or brain.model or "gpt-3.5-turbo-0613"
chat_question.temperature = chat_question.temperature or brain.temperature or 0
chat_question.max_tokens = chat_question.max_tokens or brain.max_tokens or 256
try:
logger.info(f"Streaming request for {chat_question.model}")
check_user_limit(current_user)
if not brain_id:
brain_id = get_default_user_brain_or_create_new(current_user).brain_id
gpt_answer_generator = OpenAIBrainPicking(
chat_id=str(chat_id),
model=chat_question.model,
max_tokens=chat_question.max_tokens,
temperature=chat_question.temperature,
brain_id=str(brain_id),
user_openai_api_key=current_user.user_openai_api_key, # pyright: ignore reportPrivateUsage=none
streaming=True,
)
print("streaming")
return StreamingResponse(
gpt_answer_generator.generate_stream( # pyright: ignore reportPrivateUsage=none
chat_question.question
),
media_type="text/event-stream",
)
except HTTPException as e:
raise e
# get chat history
@chat_router.get(
"/chat/{chat_id}/history", dependencies=[Depends(AuthBearer())], tags=["Chat"]
)
async def get_chat_history_handler(
chat_id: UUID,
) -> List[ChatHistory]:
# TODO: RBAC with current_user
return get_chat_history(chat_id) # pyright: ignore reportPrivateUsage=none
| [] |
2024-01-10 | c4a-ri/dialbb | sample_apps~lab_app_ja~scenario_functions.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# scenario_functions.py
# functions used in sample Japanese app
# 日本語サンプルアプリで用いるシナリオ関数
__version__ = '0.1'
__author__ = 'Mikio Nakano'
__copyright__ = 'C4A Research Institute, Inc.'
from datetime import datetime
from typing import Dict, Any
import os
use_openai: bool = False
openai_key: str = os.environ.get('OPENAI_KEY', "")
if openai_key:
import openai
use_openai = True
openai.api_key = openai_key
# 知っているラーメンの種類
known_ramens = ("豚骨ラーメン", "味噌ラーメン", "塩ラーメン", "醤油ラーメン")
def is_known_ramen(ramen: str, context: Dict[str, Any]) -> bool:
"""
知っているラーメンかどうか
:param ramen: ラーメンの種類名
:param context: 対話文脈(未使用)
:return: 知っていたらTrue, さもなくばFalse
"""
return ramen in known_ramens
def is_novel_ramen(ramen: str, context: Dict[str, Any]) -> bool:
"""
知らないラーメンかどうか
:param ramen: ラーメンの種類名
:param context: 対話文脈(未使用)
:return: 知らないならTrue, 知っていればFalse
"""
return ramen not in known_ramens
# ラーメンの種類と地域の関係
ramen_map = {"豚骨ラーメン": "博多",
"味噌ラーメン": "札幌",
"塩ラーメン": "函館",
"醤油ラーメン": "東京"}
def get_ramen_location(ramen: str, variable: str, context: Dict[str, Any]) -> None:
"""
ラーメンの種類からその発祥の地域を得て、対話文脈に保持する
:param ramen: ラーメンの種類
:param variable: 地域名を蓄える変数の名前
:param context: 対話文脈
"""
location:str = ramen_map.get(ramen, "日本")
context[variable] = location
def decide_greeting(greeting_variable: str, context: Dict[str, Any]) -> None:
"""
挨拶文を時間帯に応じて決める
:param greeting_variable: 挨拶を表す変数の名前
:param context: 対話文脈
"""
hour: int = datetime.now().hour
if hour < 4:
context[greeting_variable] = "こんばんは"
elif hour < 10:
context[greeting_variable] = "おはようございます"
elif hour <= 19:
context[greeting_variable] = "こんにちは"
else:
context[greeting_variable] = "こんばんは"
def generate_with_openai_gpt(prompt: str):
chat_completion = None
while True:
try:
chat_completion = self._openai_client.with_options(timeout=10).chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": prompt}],
temperature=0.7,
)
except openai.APITimeoutError:
continue
except Exception as e:
self.log_error("OpenAI Error: " + traceback.format_exc())
sys.exit(1)
finally:
if not chat_completion:
continue
else:
break
generated_utterance: str = chat_completion.choices[0].message.content
return generated_utterance
def set_impression_of_dialogue(impression_key: str, context: Dict[str, Any]) -> None:
if use_openai:
prompt = ""
for turn in context["_dialogue_history"]:
if turn["speaker"] == 'user':
prompt += f"ユーザ「{turn['utterance']}」\n"
else:
prompt += f"システム「{turn['utterance']}」\n"
prompt += "の後、システムが感想を短く言う発話を生成してください。"
generated_utterance: str = generate_with_openai_gpt(prompt)
impression = generated_utterance.replace("システム「", "").replace("「", "").replace("」", "")
else:
impression = "そうなんですね"
context[impression_key] = impression
def generate_confirmation_request(nlu_result: Dict[str, Any], context: Dict[str, Any]) -> str:
if nlu_result.get("type") == "特定のラーメンが好き" and nlu_result["slots"].get("favorite_ramen"):
return f'{nlu_result["slots"]["favorite_ramen"]}がお好きなんですか?'
else:
return "もう一度言って頂けますか?"
| [
"の後、システムが感想を短く言う発話を生成してください。",
"ユーザ「PLACEHOLDER」\n",
"システム「PLACEHOLDER」\n"
] |
2024-01-10 | lenaas/travel_chatbot | rasa~actions~actions.py | # This files contains your custom actions which can be used to run
# custom Python code.
#
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/custom-actions
# This is a simple example for a custom action which utters "Hello World!"
from typing import Any, Text, Dict, List
from rasa_sdk.events import AllSlotsReset
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
import os
from dotenv import load_dotenv
import openai
import json
import numpy as np
map_time = {
"1. Quarter": ["Jan", "Feb", "Mär"],
"2. Quarter": ["Apr", "Mai", "Jun"],
"3. Quarter": ["Jul", "Aug", "Sep"],
"4. Quarter": ["Okt", "Nov", "Dez"]
}
map_climate = {
"Warm and sunny": "warm",
"Cold Weather": "kalt"
}
map_activity = {
"Relaxing on the beach": "Strandurlaub",
"Exploring a city": "Städtereise",
"Experiencing adventures": "Rundreise",
"Experiencing culture": "Kultur"
}
map_interest = {
'History': 'Geschichte',
'Nature': 'Natur',
'Culture': 'Kultur',
'Great food': 'Kulinarik',
'Party': 'Party',
'Wellness': 'Wellness',
'Adventure': 'Abenteuer'
}
map_budget = {
'Lower': 'Günstiger als Deutschland',
'Equal': 'Durchschnitt Deutschland',
'Higher': 'Teurer als Deutschland',
}
map_housing = {
'Camping': 'Camping',
'Hotel/Hostel/Vacation house': 'Ferienhaus/Hotel/Hostel',
}
map_months = {
"Jan": "Januar",
"Feb": "Februar",
"Mär": "März",
"Apr": "April",
"Mai": "Mai",
"Jun": "Juni",
"Jul": "Juli",
"Aug": "August",
"Sep": "September",
"Okt": "Oktober",
"Nov": "November",
"Dez": "Dezember"
}
class ActionGetDestinations(Action):
def name(self) -> Text:
return "action_get_destinations"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
# load input
slot_keys = ["time","climate","activity","interest_1","interest_2","budget","housing","use_GPT"]
slot_values = [tracker.get_slot(i) for i in slot_keys]
slot_dict = dict(zip(slot_keys, slot_values))
# load database
with open("./actions/dataset/dataset_destination.json") as f:
data = json.load(f)
length=len(data['Reiseziel'])
#Logik für Datenbank
def easy_count(key,input,mapper,length=length):
'''checks for each destination if the activity is available and returns a list of 1 and 0'''
return [1 if mapper[input] in data[key][str(dest)] else 0 for dest in range(length)]
def weighted_count(key, input,mapper,weight=0.5,length=length):
'''function to weight the differing interests priorities. Otherwise same count as easy_count'''
# Interest 1
int_1 = [1 if mapper[input[0]] in data[key][str(dest)] else 0 for dest in range(length)]
# Interest 2
int_2 = [round(weight,1) if mapper[input[1]] in data[key][str(dest)] else 0 for dest in range(length)]
return np.array(int_1) + np.array(int_2)
def evaluate_climate(key, input, mapper_a,mapper_b,length=length):
''''''
# get climate score
# iterate months of quarter
climate = []
rain = []
for month in mapper_a[input[0]]:
# get bool array with climate hit for each month
climate.append([1 if mapper_b[input[1]] in data[key][str(dest)][month]['Klima'] else 0 for dest in range(length)])
# get rain score for each month
rain.append([data[key][str(dest)][month]['Regenwahrscheinlichkeit']for dest in range(length)])
climate_sum = np.round(np.array(np.array(climate[0])+np.array(climate[1])+np.array(climate[2]))/3,1)
rain_sum = np.round(np.array(np.array(rain[0])+np.array(rain[1])+np.array(rain[2]))/3,1)
# Reverse Rain score - little rain is good
rain_sum = np.absolute(1 - rain_sum)
return climate_sum, rain_sum
def evaluate_time(key, input, mapper_a,mapper_b,length=length):
# for each month in chosen quarter
time= []
for month in mapper_a[input]:
# check if travel is recommended for that month
time.append([1 if mapper_b[month] in data[key][str(dest)] else 0 for dest in range(length)])
return np.round(np.array(np.array(time[0])+np.array(time[1])+np.array(time[2]))/3,1)
def compute_total():
activity = np.array(easy_count(key="Reiseart",input=slot_dict['activity'],mapper=map_activity))
budget = np.array(easy_count(key="Preisniveau",input=slot_dict['budget'],mapper=map_budget))
housing = np.array(easy_count(key="Unterkunft",input=slot_dict['housing'],mapper=map_housing))
interest = np.round(np.array(weighted_count(key="Interessen",input=[slot_dict['interest_1'],slot_dict['interest_2']],mapper=map_interest))/1.5,1)
climate, rain = evaluate_climate('Klima und Regenwahrscheinlichkeit',[slot_dict['time'],slot_dict['climate']],mapper_a=map_time,mapper_b=map_climate)
time = np.array(evaluate_time("Beste Monate zum Reisen", slot_dict['time'],mapper_a=map_time, mapper_b=map_months))
assert len(activity) == len(budget) == len(housing) == len(interest) == len(climate) == len(rain) == len(time), "Length of score lists not equal"
return np.round(np.array(activity + budget + housing + interest + np.array(climate) + np.array(rain) + time)/7,4)
def get_top5(score_list):
sorted = np.flip(np.argsort(score_list))
destinations = [data["Reiseziel"][str(dest)]for dest in sorted[:5]]
scores = score_list[sorted[:5]]
return destinations,scores
dest,scores = get_top5(compute_total())
output_string= f'- 1. {dest[0]} - Score: {scores[0]} \n' + f'- 2. {dest[1]} - Score: {scores[1]} \n' + f'- 3. {dest[2]} - Score: {scores[2]} \n' + f'- 4. {dest[3]} - Score: {scores[3]} \n' + f'- 5. {dest[4]} - Score: {scores[4]} \n'
dispatcher.utter_message(text="Thank you for providing all the necessary details. Based on my internal database, , I recommend considering the following travel destinations: \n"+output_string+" If none of these destinations are suitable for you, I can also do a quick internet search based on your criteria.",
buttons= [
{"payload":'/GPT{"use_GPT":"Yes"}', "title": "Yes, do it!"},
{"payload":'/GPT{"use_GPT":"No"}', "title": "No, no further help is needed"},
])
return []
class Conduct_GPT_search(Action):
def name(self) -> Text:
return "action_conduct_GPT_search"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
slot_keys = ["time","climate","activity","interest_1","interest_2","budget","housing","use_GPT"]
slot_values = [tracker.get_slot(i) for i in slot_keys]
slot_dict = dict(zip(slot_keys, slot_values))
if slot_dict["use_GPT"]=="Yes":
load_dotenv()
openai.api_key = os.getenv('OPENAI_API_KEY')
assert openai.api_key!=None, "API_KEY is not set"
prompt = f"""Imagine you are a Travel Agent. \n
I will list you some criteria and you should give me the top 5 travel destinations based on the criteria as a bulletpoint list.\n
Keep the answer short and simple. I dont want any explanations concerning the destinations \n
Compute me a score with a range of [0-1] for each destination based on the criteria and sort the destinations by the score. \n
Return the destinations in the following format: State/City, Country, Score \n
1. Travel Time: {slot_dict["time"]}\n
2. Climate and Weather: {slot_dict["climate"]}\n
3. Activity: {slot_dict["activity"]}\n
4. Primary Interest: {slot_dict["interest_1"]}\n
5. Secondary Interest: {slot_dict["interest_2"]}\n
6. Budget: {slot_dict["budget"]} to Germany\n
7. Housing: {slot_dict["housing"]}\n
"""
output = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role":"user",
"content": prompt
}])
dispatcher.utter_message(text="I consulted ChatGPT and it recommends the following destinations: \n"+str(output["choices"][0]["message"]["content"])+ "\n I hope you find these recommendations helpful! If you have any other questions or need further assistance, feel free to ask. Enjoy your vacation planning!")
else:
dispatcher.utter_message(text="I hope you found what you were looking for. If you need further assistance, please let me know.")
return []
class Reset_Slots(Action):
def name(self) -> Text:
return "action_reset_slots"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
return [AllSlotsReset()]
| [
"Imagine you are a Travel Agent. \n\n I will list you some criteria and you should give me the top 5 travel destinations based on the criteria as a bulletpoint list.\n\n Keep the answer short and simple. I dont want any explanations concerning the destinations \n\n Compute me a score with a range of [0-1] for each destination based on the criteria and sort the destinations by the score. \n\n Return the destinations in the following format: State/City, Country, Score \n\n 1. Travel Time: PLACEHOLDER\n\n 2. Climate and Weather: PLACEHOLDER\n\n 3. Activity: PLACEHOLDER\n\n 4. Primary Interest: PLACEHOLDER\n\n 5. Secondary Interest: PLACEHOLDER\n\n 6. Budget: PLACEHOLDER to Germany\n\n 7. Housing: PLACEHOLDER\n\n "
] |
2024-01-10 | vincm1/IQSheets | iqsheets_app~dashboard~routes.py | """Routes for dashboard"""
from datetime import datetime
import re
import boto3
from sqlalchemy import func
from flask import Blueprint, current_app, render_template, send_file, redirect, url_for, request
from flask_login import login_required, current_user
from iqsheets_app import db
from iqsheets_app.models import Prompt, Template
from iqsheets_app.utils.decorators import check_confirmed_mail
from iqsheets_app.openai import openai_chat
from .forms import FormelForm, SkriptForm, SqlForm, RegExForm
################
#### config ####
################
dashboard_blueprint = Blueprint('dashboard', __name__)
# initialize S3 client using boto3
s3_client = boto3.client(
's3',
aws_access_key_id=current_app.config['AWS_ACCESS_KEY'],
aws_secret_access_key=current_app.config['AWS_SECRET_ACCESS_KEY'],
region_name=current_app.config['AWS_REGION']
)
# Mapping of prompt types to their respective forms
FORM_MAP = {
"formula": FormelForm,
"skripte": SkriptForm,
"sql": SqlForm,
"regex": RegExForm,
}
CLEAN_MAP = {
"Excel - VBA": "vba",
"GSheets - Apps": "javascript",
}
################
#### helpers ####
################
def find_function(text, prompt_type):
"""
Function collect the formula of openai response only.
Parameters:
text (str): The text from which the pattern will be removed.
prompt_type (str): The type of prompt to be removed from the text.
Returns:
str: The text with the specified pattern removed.
"""
pattern = r"```" + prompt_type + "(.*?)```"
# Using re.findall to find all occurrences of the pattern
matches = re.findall(pattern, text, re.DOTALL)
return matches
def remove_pattern_from_text(text, prompt_type):
"""
Function to remove a specific pattern from a given text string.
Parameters:
text (str): The text from which the pattern will be removed.
prompt_type (str): The type of prompt to be removed from the text.
Returns:
str: The text with the specified pattern removed.
"""
start_pattern = re.escape(r"```") + prompt_type
end_pattern = re.escape(r"```")
text = re.sub(start_pattern, '', text, flags=re.DOTALL)
text = re.sub(end_pattern, '', text, flags=re.DOTALL)
return text.strip()
def process_form_data(form_data, prompt_type):
'''Function to handle prompt input form data from inserted form.
Parameters:
form data: The text from which the pattern will be removed.
Returns:
Input for Prompt DB Model
'''
form_data['prompt_type'] = prompt_type
keys = ["prompt_type", "excel_google", "vba_app", "formula_explain", "prompt"]
input_prompt = []
for key in keys:
if key in form_data:
input_prompt.append(form_data[key])
# Form user info to prompt for OpenAI
input_prompt = " ".join(input_prompt)
result = openai_chat(input_prompt)
answer = result.choices[0].message.content
category, prompt = form_data["formula_explain"], form_data["prompt"]
# Increasing the amount of prompts and total tokens when prompt is generated
current_user.num_prompts += 1
current_user.num_tokens += result.usage.total_tokens
return prompt_type, category, prompt, answer
def prompt_output_handler(prompt_result, prompt_type, form_data):
"""
Function to handle the OpenAi response for user.
Parameters:
text (str): The text from which the pattern will be removed.
prompt_type (str): The type of prompt to be removed from the text.
Returns:
str: The text with the specified pattern removed.
"""
# Extracting the part of the string from "sql" to "19"7
print(form_data)
if "vba_app" in form_data:
print(CLEAN_MAP[form_data["vba_app"]].lower())
formulas = find_function(prompt_result, CLEAN_MAP[form_data["vba_app"]].lower())
reduced_answer = remove_pattern_from_text(prompt_result, CLEAN_MAP[form_data["vba_app"]].lower())
print(prompt_result, prompt_type.lower(),formulas, reduced_answer)
else:
formulas = find_function(prompt_result, prompt_type.lower())
reduced_answer = remove_pattern_from_text(prompt_result, prompt_type.lower())
print(prompt_result, prompt_type.lower(),formulas, reduced_answer)
return formulas, reduced_answer
################
#### routes ####
################
@dashboard_blueprint.route('/dashboard', methods=['GET'])
@login_required
@check_confirmed_mail
def dashboard():
"""User Dashboard page"""
num_prompts = Prompt.query.filter_by(user_id=current_user.id).count()
favorite_prompt = Prompt.query.filter_by(user_id=current_user.id, favorite=True).count()
time_saved = num_prompts * 0.5
fav_prompt_type = db.session.query(Prompt.prompt_type,func.count(Prompt.id)).filter(
Prompt.user_id == current_user.id).group_by(Prompt.prompt_type).all()
if fav_prompt_type:
most_used = max(fav_prompt_type, key=lambda item: item[1])
most_used = most_used[0].capitalize()
else:
most_used = "/"
return render_template('dashboard/dashboard.html', num_prompts=num_prompts,
favorites=favorite_prompt, most_used=most_used,
time_saved=time_saved)
@dashboard_blueprint.route('/<prompt_type>', methods=['GET', 'POST'])
@login_required
@check_confirmed_mail
def prompter(prompt_type):
"""User Dashboard page"""
if prompt_type not in FORM_MAP:
# Redirect to the default dashboard page for invalid prompt types
return redirect(url_for('dashboard.dashboard'))
form = FORM_MAP[prompt_type]()
return render_template(f"dashboard/{prompt_type}_page.html", form=form)
@dashboard_blueprint.route('/<prompt_type>/result', methods=['GET', 'POST'])
@login_required
@check_confirmed_mail
def formel(prompt_type):
"""User Dashboard page"""
if prompt_type not in FORM_MAP:
# Redirect to the default dashboard page for invalid prompt types
return redirect(url_for('dashboard.dashboard'))
form = FORM_MAP[prompt_type]()
if request.method == 'POST' and form.validate_on_submit():
form_data = form.data
prompt_type, category, prompt, answer = process_form_data(form_data, prompt_type)
# Creating prompt instance
prompt = Prompt(user_id = current_user.id, prompt_type=prompt_type,
category=category, prompt=prompt, result=answer)
# Commiting prompt and numbers to db
db.session.add(prompt)
db.session.commit()
if prompt.category == 'Erstellen' and prompt.prompt_type != "formula":
formulas, reduced_answer = prompt_output_handler(prompt.result, prompt.prompt_type, form.data)
return render_template(f'dashboard/{prompt_type}_page.html', answer=reduced_answer, form=form, prompt_id=prompt.id, formulas=formulas)
else:
return render_template(f'dashboard/{prompt_type}_page.html', answer=prompt.result, form=form, prompt_id=prompt.id)
return render_template(f'dashboard/{prompt_type}_page.html', form=form)
@dashboard_blueprint.route('/dashboard/favorite/<int:prompt_id>', methods=['GET'])
@login_required
@check_confirmed_mail
def prompt_favorite(prompt_id):
''' handles user feedback per prompt '''
prompt = Prompt.query.filter_by(id=prompt_id).first()
prompt.favorite = True
db.session.commit()
return redirect(url_for('dashboard.favorites'))
@dashboard_blueprint.route('/dashboard/negative/<int:prompt_id>', methods=['GET'])
@login_required
@check_confirmed_mail
def negative_feedback(prompt_id):
''' handles user feedback per prompt '''
prompt = Prompt.query.filter_by(id=prompt_id).first()
prompt.feedback = False
db.session.add(prompt)
db.session.commit()
return redirect(request.referrer or '/default-page')
@dashboard_blueprint.route('/favoriten', methods=['GET', 'POST'])
@login_required
@check_confirmed_mail
def favorites():
"""User favorite Excel Formulas"""
page = request.args.get('page', 1, type=int)
favorite_formulas = Prompt.query.filter_by(user_id=current_user.id,
favorite=True).order_by(Prompt.created_at).paginate(page=page,
per_page=30)
# prompt_types = db.session.query(Prompt.prompt_type).distinct().all()
prompt_types = ["formula", "skripte", "sql", "regex"]
today = datetime.now()
if request.method == 'POST' and request.form['filter_value'] == "Alle":
page = request.args.get('page', 1, type=int)
favorite_formulas = Prompt.query.filter_by(user_id=current_user.id, favorite=True).order_by(Prompt.created_at).paginate(page=page, per_page=9)
elif request.method == 'POST':
filter_value = request.form['filter_value']
page = request.args.get('page', 1, type=int)
favorite_formulas = Prompt.query.filter_by(user_id=current_user.id, favorite=True,
prompt_type=filter_value).order_by(Prompt.created_at).paginate(page=page, per_page=30)
return render_template('dashboard/favorites.html', favorite_formulas=favorite_formulas, prompt_types=prompt_types, today=today)
@dashboard_blueprint.route('/formel_<int:favorite_id>/delete', methods=['GET'])
@login_required
@check_confirmed_mail
def delete_favorite(favorite_id):
"""Delete Formula/VBA to User favorites"""
favorite = Prompt.query.filter_by(id=favorite_id).first()
db.session.delete(favorite)
db.session.commit()
return redirect(url_for('dashboard.favorites'))
@dashboard_blueprint.route('/templates', methods=['GET', 'POST'])
@login_required
@check_confirmed_mail
def templates():
""" Route for templates """
page = request.args.get('page', 1, type=int)
templates = Template.query.order_by(Template.created_at).paginate(page=page, per_page=12)
categorys = db.session.query(Template.template_category).distinct().all()
if request.method == 'POST' and request.form['filter_value'] == "Alle":
page = request.args.get('page', 1, type=int)
templates = Template.query.order_by(Template.created_at).paginate(page=page, per_page=12)
elif request.method == 'POST':
filter_value = request.form['filter_value']
page = request.args.get('page', 1, type=int)
templates = Template.query.filter_by(template_category=filter_value).order_by(Template.created_at).paginate(page=page, per_page=12)
return render_template('dashboard/templates.html', templates=templates, categorys=categorys)
@dashboard_blueprint.route('/download', methods=['GET'])
@login_required
@check_confirmed_mail
def download():
""" Route for templates download """
filename = 'static/xlxs_templates/Calendar-Template.xlsx'
try:
return send_file(filename)
except Exception as e:
return str(e)
| [
"['formula', 'skripte', 'sql', 'regex']",
"[]",
" "
] |
2024-01-10 | vincm1/IQSheets | iqsheets_app~user~routes.py | """Routes for user"""
from datetime import datetime
import stripe
from flask import Blueprint, Markup, render_template, redirect, request, flash, url_for
from flask_login import login_user, login_required, logout_user, current_user
from werkzeug.security import generate_password_hash, check_password_hash
from flask_mail import Message
from iqsheets_app import db
from iqsheets_app.models import User
from .forms import RegistrationForm, LoginForm, EditUserForm, ChangePasswordForm, ResetPasswordRequestForm, ResetPasswordForm
from .token import generate_confirmation_token, confirm_token
from .email import send_email
from werkzeug.utils import secure_filename
from iqsheets_app.utils.decorators import check_confirmed_mail, non_oauth_required
from iqsheets_app.openai import openai_chat
from iqsheets_app.core.forms import NewsletterForm
################
#### config ####
################
user_blueprint = Blueprint('user', __name__)
stripe.api_key = "sk_test_51MpD8VHjForJHjCtVZ317uTWseSh0XxZkuguQKo9Ei3WjaQdMDpo2AbKIYPWl2LXKPW3U3h6Lu71E94Gf1NvrHKE00xPsZzRZZ"
YOUR_DOMAIN = 'http://localhost:5000'
################
#### routes ####
################
@user_blueprint.route('/register', methods=['GET', 'POST'])
def register():
""" Registering a local user """
form = RegistrationForm()
form_nl = NewsletterForm()
if form.validate_on_submit() and request.method == "POST":
user = User(email=form.email.data, password=form.password.data)
db.session.add(user)
db.session.commit()
token = generate_confirmation_token(user.email)
confirm_url = url_for('user.confirm_email', token=token, _external=True)
html = render_template('user/email/activate.html', confirm_url=confirm_url)
subject = "Bitte bestätige Deine Email für IQSheets!"
send_email(user.email, subject, html)
flash(f'Eine Bestätigungs-Email wurde an {user.email} geschickt.', 'success')
return redirect(url_for('user.login'))
return render_template('user/signup.html', form=form, form_nl=form_nl)
@user_blueprint.route('/login', methods=['GET', 'POST'])
def login():
"""Login User"""
form = LoginForm()
form_2 = ResetPasswordForm()
form_nl = NewsletterForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
# Check if user exist
if user:
# Check if user confirmed registration link
if user.is_confirmed:
# Check password of user
if user.check_password(form.password.data):
if user.is_admin:
login_user(user, remember=True)
return redirect(url_for('dashboard.dashboard'))
else:
user.check_payment()
if user.stripe_customer_id and user.stripe_sub_id is not None:
login_user(user, remember=True)
return redirect(url_for('dashboard.dashboard'))
else:
return redirect(f"https://buy.stripe.com/test_aEU7t68NY7Dm6ukbIL?prefilled_email={user.email}")
else:
flash('Prüfe deine Anmeldedaten!', 'danger')
return redirect(url_for('user.login'))
else:
flash('Bitte bestätige deine Anmeldung per Link', 'danger')
else:
flash('Prüfe deine Anmeldedaten!', 'danger')
return redirect(url_for('user.login'))
return render_template('user/login.html', form=form, form_2=form_2, form_nl=form_nl)
@user_blueprint.route('/logout')
@login_required
def logout():
"""Logout"""
logout_user()
return redirect(url_for('core.index'))
@user_blueprint.route('/confirm/<token>')
def confirm_email(token):
""" Sending confirmation Email to user after signup """
email = confirm_token(token)
user = User.query.filter_by(email=email).first_or_404()
print(user.email)
form_nl = NewsletterForm()
if user.email == email:
user.is_confirmed = True
user.confirmed_on = datetime.now()
db.session.add(user)
db.session.commit()
flash('Account bestätigt', 'success')
stripe_link = f"https://buy.stripe.com/test_aEU7t68NY7Dm6ukbIL?prefilled_email={user.email}"
# return render_template('stripe/checkout.html', user_email=email, form_nl=form_nl)
else:
flash('Der Bestätigungslink ist abgelaufen oder invalide.', 'danger')
return redirect(url_for('core.index'))
@user_blueprint.route('/unconfirmed')
@login_required
def unconfirmed():
""" Checking if user confirmed email link """
if current_user.is_confirmed:
return redirect('user.login')
flash('Bitte Account bestätigen', 'warning')
return render_template('user/unconfirmed.html')
@user_blueprint.route('/resend')
@login_required
def resend_confirmation():
""" Resending a confirmation link after signup """
token = generate_confirmation_token(current_user.email)
confirm_url = url_for('user.confirm_email', token=token, _external=True)
html = render_template('user/email/activate.html', confirm_url=confirm_url)
subject = "Bitte bestätige Deine Email für IQSheets!"
send_email(current_user.email, subject, html)
flash(f'Eine Bestätigungs-Email wurde an {current_user.email} geschickt.', 'success')
return redirect(url_for('user.unconfirmed'))
@user_blueprint.route('/einstellungen/profil', methods=['GET','POST'])
@login_required
@check_confirmed_mail
def edit_user():
"""Edit user profile"""
form = EditUserForm()
if form.validate_on_submit() and request.method == 'POST':
if form.firstname.data and form.firstname.data != current_user.firstname:
current_user.firstname = form.firstname.data
if form.lastname.data and form.lastname.data != current_user.lastname:
current_user.lastname = form.lastname.data
if form.lastname.data and form.job_description.data != current_user.job_description:
current_user.job_description = form.job_description.data
db.session.add(current_user)
db.session.commit()
flash("Profil erfolgreich bearbeitet", "success")
return render_template('user/profil.html', form=form, active_page='edit_user')
@user_blueprint.route('/einstellungen/passwort', methods=['GET', 'POST'])
@login_required
@check_confirmed_mail
@non_oauth_required
def edit_password():
"""Edit user profile"""
form = ChangePasswordForm()
if form.validate_on_submit() and request.method == 'POST':
if current_user.check_password(form.old_password.data):
if current_user.check_password(form.password.data):
flash("Neues Passwort identisch!", 'danger')
else:
current_user.password_hash = generate_password_hash(form.password.data)
db.session.add(current_user)
db.session.commit()
flash("Passwort erfolgreich geändert!", "success")
else:
flash('Altes Passwort stimmt nicht!', 'danger')
return render_template('user/change_password.html', form=form, active_page='edit_password')
@user_blueprint.route('/payments', methods=['GET'])
@login_required
@check_confirmed_mail
def user_payments():
""" User payments routes """
stripe_sub_id = current_user.stripe_sub_id
sub = stripe.Subscription.retrieve(id=stripe_sub_id)
stripe_cust_id = current_user.stripe_customer_id
created_date, days_to_due = datetime.utcfromtimestamp(sub['created']).strftime('%d.%m.%Y'), sub['days_until_due']
current_payment_start, current_payment_end = datetime.utcfromtimestamp(sub["current_period_start"]).strftime('%d.%m.%Y'), datetime.utcfromtimestamp(sub["current_period_end"]).strftime('%d.%m.%Y')
return render_template('user/payments.html', sub=sub, stripe_cust_id=stripe_cust_id,
stripe_sub_id=stripe_sub_id, created_date=created_date, days_to_due=days_to_due,
current_payment_start=current_payment_start, current_payment_end=current_payment_end)
@user_blueprint.route('/passwort_zuruecksetzen', methods=['GET', 'POST'])
def reset_password_request():
""" Sending a password request """
form = ResetPasswordRequestForm()
form_nl = NewsletterForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and user.is_confirmed:
token = generate_confirmation_token(user.email)
confirm_url = url_for('user.reset_password', token=token, _external=True)
subject = f"Passwort Reset für {user.email} IQSheets!"
html = render_template('user/email/reset_password.html', confirm_url=confirm_url, form_nl=form_nl)
send_email(user.email, subject, html)
flash('Prüfe deine Emails', 'success')
redirect(url_for('user.login'))
else:
flash('Kein Profil unter dieser Emailadresse', 'warning')
#return redirect(url_for('core.index'))
return render_template('user/reset_password_request.html',
title='Passwort zurücksetzen', form=form, form_nl=form_nl)
@user_blueprint.route('/reset_password/<token>', methods=['GET', 'POST'])
def reset_password(token):
""" Resetting password after clicking on tokenized mail link """
email = confirm_token(token)
user = User.query.filter_by(email=email).first_or_404()
if not user:
return redirect(url_for('core.index'))
form = ResetPasswordForm()
if form.validate_on_submit():
user.password_hash = generate_password_hash(form.password.data)
db.session.add(user)
db.session.commit()
flash('Passwort zurückgesetzt', 'success')
return redirect(url_for('user.login'))
return render_template('user/reset_password.html', form=form)
| [] |
2024-01-10 | SidU/WhisperLOL | dadjoke-buddy.py | import os
import queue
import openai
import sounddevice as sd
import soundfile as sf
from pydub.playback import play
from langdetect import detect
from termcolor import colored
import threading
# Read the OpenAI API key from an environment variable
API_KEY = os.environ.get('OPENAI_API_KEY')
# Set the number of seconds to record and send to OpenAI
RECORD_SECONDS = 5
# Set the sample rate of the audio data
SAMPLE_RATE = 32000
# Define the OpenAI Completion API parameters
model_engine = "text-davinci-003"
temperature = 0.7
max_tokens = 60
# Define the text-to-speech parameters
tts_language = 'en'
tts_slow = False
# Create an instance of the OpenAI API client
openai.api_key = API_KEY
# Define the prompt template for our main purpose, i.e. telling jokes.
prompt_template = """You are a dad-joke assistant. Reply with a funny dad-joke related to the transcription below:
{summary}"""
# Define the prompt template for summarization
summarization_template = """Summarize the following transcription of a conversation:
{transcript}
"""
# Circular buffer of last 30 / RECORD_SECONDS user utterances
transcript_queue = queue.Queue(maxsize=30 // RECORD_SECONDS)
def transcript_queue_processor():
# Keep a buffer of last 10 transcriptions that we constantly summarize for continuity
transcript_buffer = []
while True:
# Wait for the next transcript to be added to the queue
transcript_latest = transcript_queue.get()
# Add the latest transcript to the transcript buffer
transcript_buffer.append(transcript_latest)
# If the buffer is full, remove the oldest transcript
if len(transcript_buffer) > 10:
transcript_buffer.pop(0)
# Print transcript buffer
#print("Transcript buffer: " + str(transcript_buffer))
# Replace the {transcript} placeholder in the summarization template with a newline joined transcript_buffer
summarization_prompt = summarization_template.format(transcript='\n'.join(transcript_buffer))
# Send the prompt to the OpenAI Summarization API
summarization_response = openai.Completion.create(
engine=model_engine,
prompt=summarization_prompt,
temperature=temperature,
max_tokens=max_tokens
)
# Save the summary
summary = summarization_response.choices[0].text.strip()
# Print the summary
print("Summary: " + summary)
# Replace the {summary} placeholder in the prompt template
prompt = prompt_template.format(summary=summary)
# Send the prompt to the OpenAI Completion API
response = openai.Completion.create(
engine=model_engine,
prompt=prompt,
temperature=temperature,
max_tokens=max_tokens
)
response_text = response.choices[0].text.strip()
# Print the response from the OpenAI Completion API
colored_response_text = colored(response_text, 'green')
print("Suggested joke: " + colored_response_text)
# Start the thread to process the transcript queue
threading.Thread(target=transcript_queue_processor).start()
print("Listening... (say bye to stop)")
# Loop forever, recording and sending audio data every RECORD_SECONDS
while True:
# Record audio data from the microphone
audio_data = sd.rec(int(SAMPLE_RATE * RECORD_SECONDS), samplerate=SAMPLE_RATE, channels=1, dtype='int16')
# Wait for the recording to complete
sd.wait()
# Save the audio data to a file
file_name = 'temp.wav'
sf.write(file_name, audio_data, SAMPLE_RATE)
# Transcribe the audio file using the OpenAI API
with open(file_name, "rb") as audio_file:
transcript = openai.Audio.transcribe("whisper-1", audio_file)
# Save the transcribed text into input.
input = transcript.text.strip()
# If the input string is empty or blank, skip the rest of the loop
if not input:
continue
# Print the transcribed text
#print("Human: " + input)
# Keep a circular buffer of last 30 / RECORD_SECONDS user utterances
transcript_queue.put(input, block=False)
# If the user said "bye", regardless of case, stop the program
if input.lower() == "bye.":
break
# Wait for queue to finish processing
transcript_queue.join() | [
"\n",
"Summarize the following transcription of a conversation:\n{transcript}\n",
"You are a dad-joke assistant. Reply with a funny dad-joke related to the transcription below:\nPLACEHOLDER",
"You are a dad-joke assistant. Reply with a funny dad-joke related to the transcription below:\n{summary}"
] |
2024-01-10 | TheUberBaum/Joyia | src~talk.py | import sounddevice as sd
import soundfile as sf
import numpy as np
import openai
import os
import requests
import re
from colorama import Fore, Style, init
import datetime
import base64
from pydub import AudioSegment
from pydub.playback import play
import webbrowser
url_pattern = r'https?://\S+'
init()
def open_file(filepath):
with open(filepath, 'r', encoding='utf-8') as infile:
return infile.read()
api_key = open_file('openaiapikey2.txt')
elapikey = open_file('elabapikey.txt')
conversation1 = []
chatbot1 = open_file('chatbot1.txt')
def chatgpt(api_key, conversation, chatbot, user_input, temperature=0.9, frequency_penalty=0.2, presence_penalty=0):
openai.api_key = api_key
conversation.append({"role": "user","content": user_input})
messages_input = conversation.copy()
prompt = [{"role": "system", "content": chatbot}]
messages_input.insert(0, prompt[0])
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
temperature=temperature,
frequency_penalty=frequency_penalty,
presence_penalty=presence_penalty,
messages=messages_input)
chat_response = completion['choices'][0]['message']['content']
conversation.append({"role": "assistant", "content": chat_response})
return chat_response
def text_to_speech(text, voice_id, api_key):
url = f'https://api.elevenlabs.io/v1/text-to-speech/{voice_id}'
headers = {
'Accept': 'audio/mpeg',
'xi-api-key': api_key,
'Content-Type': 'application/json'
}
data = {
'text': text,
'model_id': 'eleven_monolingual_v1',
'voice_settings': {
'stability': 0.6,
'similarity_boost': 0.85
}
}
response = requests.post(url, headers=headers, json=data)
if response.status_code == 200:
with open('output.mp3', 'wb') as f:
f.write(response.content)
audio = AudioSegment.from_mp3('output.mp3')
play(audio)
else:
print('Error:', response.text)
def print_colored(agent, text):
agent_colors = {
"Joyia:": Fore.YELLOW,
}
color = agent_colors.get(agent, "")
print(color + f"{agent}: {text}" + Style.RESET_ALL, end="")
voice_id1 = 'ThT5KcBeYPX3keUQqHPh'
def record_and_transcribe(duration=8, fs=44100):
print('Recording...')
myrecording = sd.rec(int(duration * fs), samplerate=fs, channels=1)
sd.wait()
print('Recording complete.')
filename = 'myrecording.wav'
sf.write(filename, myrecording, fs)
with open(filename, "rb") as file:
openai.api_key = api_key
result = openai.Audio.transcribe("whisper-1", file)
transcription = result['text']
return transcription
user_message=''
while True:
response = chatgpt(api_key, conversation1, chatbot1, user_message)
print_colored("Joyia:", f"{response}\n\n")
um = re.sub(r'(Response:|Narration:|Image: generate_image:.*|)', '', response).strip()
user_message_without_generate_image = re.sub(r'^Link:.*\n?', '', um, flags=re.MULTILINE)
text_to_speech(user_message_without_generate_image, voice_id1, elapikey)
urls = re.findall(url_pattern, response)
for url in urls:
print(url)
webbrowser.open(url)
user_message = record_and_transcribe()
| [] |
2024-01-10 | vhasik/curriculai | test-ai.py | from dotenv import load_dotenv
import os
import openai
# Load the environment variables from the .env file
load_dotenv()
# Retrieve the OpenAI API key from the environment variables
openai.api_key = os.getenv("OPENAI_API_KEY")
# Load the prompt from the text file
with open('data/prompt-email.txt', 'r', encoding='utf-8') as file:
prompt_prep = file.read().strip()
# Load the prompt from the text file
with open('data/prompt-test.txt', 'r', encoding='utf-8') as file:
prompt_content = file.read().strip()
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{
"role": "user",
"content": prompt_prep + prompt_content
}
],
temperature=0,
max_tokens=4096,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# Print the response from OpenAI
print(response.choices[0].message['content'])
| [
"PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | hanselpetter/AnythingLLM | collector~scripts~substack_utils.py | import os, json, requests, tempfile
from requests_html import HTMLSession
from langchain.document_loaders import UnstructuredHTMLLoader
def fetch_all_publications(subdomain):
file_path = f"./outputs/substack-logs/substack-{subdomain}.json"
if os.path.isdir("./outputs/substack-logs") == False:
os.makedirs("./outputs/substack-logs")
if os.path.exists(file_path):
with open(file_path, "r") as file:
print(f"Returning cached data for substack {subdomain}.substack.com. If you do not wish to use stored data then delete the file for this newsletter to allow refetching.")
return json.load(file)
collecting = True
offset = 0
publications = []
while collecting is True:
url = f"https://{subdomain}.substack.com/api/v1/archive?sort=new&offset={offset}"
response = requests.get(url)
if(response.ok == False):
print("Bad response - exiting collection")
collecting = False
continue
data = response.json()
if(len(data) ==0 ):
collecting = False
continue
for publication in data:
publications.append(publication)
offset = len(publications)
with open(file_path, 'w+', encoding='utf-8') as json_file:
json.dump(publications, json_file, ensure_ascii=True, indent=2)
print(f"{len(publications)} publications found for author {subdomain}.substack.com. Saved to substack-logs/channel-{subdomain}.json")
return publications
def only_valid_publications(publications= []):
valid_publications = []
for publication in publications:
is_paid = publication.get('audience') != 'everyone'
if (is_paid and publication.get('should_send_free_preview') != True) or publication.get('type') != 'newsletter': continue
valid_publications.append(publication)
return valid_publications
def get_content(article_link):
print(f"Fetching {article_link}")
if(len(article_link) == 0):
print("Invalid URL!")
return None
session = HTMLSession()
req = session.get(article_link)
if(req.ok == False):
print("Could not reach this url!")
return None
req.html.render()
full_text = None
with tempfile.NamedTemporaryFile(mode = "w") as tmp:
tmp.write(req.html.html)
tmp.seek(0)
loader = UnstructuredHTMLLoader(tmp.name)
data = loader.load()[0]
full_text = data.page_content
tmp.close()
return full_text
def append_meta(publication, text):
meta = {
'url': publication.get('canonical_url'),
'thumbnail': publication.get('cover_image'),
'title': publication.get('title'),
'subtitle': publication.get('subtitle'),
'description': publication.get('description'),
'createdAt': publication.get('post_date'),
'wordCount': publication.get('wordcount')
}
return "Newsletter Metadata:\n"+json.dumps(meta)+"\n\nArticle Content:\n" + text | [] |
2024-01-10 | hanselpetter/AnythingLLM | collector~scripts~gitbook.py | import os, json
from langchain.document_loaders import GitbookLoader
from urllib.parse import urlparse
from datetime import datetime
from alive_progress import alive_it
from .utils import tokenize
from uuid import uuid4
def gitbook():
url = input("Enter the URL of the GitBook you want to collect: ")
if(url == ''):
print("Not a gitbook URL")
exit(1)
primary_source = urlparse(url)
output_path = f"./outputs/gitbook-logs/{primary_source.netloc}"
transaction_output_dir = f"../server/storage/documents/gitbook-{primary_source.netloc}"
if os.path.exists(output_path) == False:os.makedirs(output_path)
if os.path.exists(transaction_output_dir) == False: os.makedirs(transaction_output_dir)
loader = GitbookLoader(url, load_all_paths= primary_source.path in ['','/'])
for doc in alive_it(loader.load()):
metadata = doc.metadata
content = doc.page_content
source = urlparse(metadata.get('source'))
name = 'home' if source.path in ['','/'] else source.path.replace('/','_')
output_filename = f"doc-{name}.json"
transaction_output_filename = f"doc-{name}.json"
data = {
'id': str(uuid4()),
'url': metadata.get('source'),
"title": metadata.get('title'),
"description": metadata.get('title'),
"published": datetime.today().strftime('%Y-%m-%d %H:%M:%S'),
"wordCount": len(content),
'pageContent': content,
'token_count_estimate': len(tokenize(content))
}
with open(f"{output_path}/{output_filename}", 'w', encoding='utf-8') as file:
json.dump(data, file, ensure_ascii=True, indent=4)
with open(f"{transaction_output_dir}/{transaction_output_filename}", 'w', encoding='utf-8') as file:
json.dump(data, file, ensure_ascii=True, indent=4)
| [] |
2024-01-10 | hanselpetter/AnythingLLM | collector~scripts~link.py | import os, json, tempfile
from urllib.parse import urlparse
from requests_html import HTMLSession
from langchain.document_loaders import UnstructuredHTMLLoader
from .link_utils import append_meta
from .utils import tokenize, ada_v2_cost
import requests
from bs4 import BeautifulSoup
# Example Channel URL https://tim.blog/2022/08/09/nft-insider-trading-policy/
def link():
print("[NOTICE]: The first time running this process it will download supporting libraries.\n\n")
fqdn_link = input("Paste in the URL of an online article or blog: ")
if(len(fqdn_link) == 0):
print("Invalid URL!")
exit(1)
session = HTMLSession()
req = session.get(fqdn_link)
if(req.ok == False):
print("Could not reach this url!")
exit(1)
req.html.render()
full_text = None
with tempfile.NamedTemporaryFile(mode = "w") as tmp:
tmp.write(req.html.html)
tmp.seek(0)
loader = UnstructuredHTMLLoader(tmp.name)
data = loader.load()[0]
full_text = data.page_content
tmp.close()
link = append_meta(req, full_text, True)
if(len(full_text) > 0):
source = urlparse(req.url)
output_filename = f"website-{source.netloc}-{source.path.replace('/','_')}.json"
output_path = f"./outputs/website-logs"
transaction_output_filename = f"article-{source.path.replace('/','_')}.json"
transaction_output_dir = f"../server/storage/documents/website-{source.netloc}"
if os.path.isdir(output_path) == False:
os.makedirs(output_path)
if os.path.isdir(transaction_output_dir) == False:
os.makedirs(transaction_output_dir)
full_text = append_meta(req, full_text)
tokenCount = len(tokenize(full_text))
link['pageContent'] = full_text
link['token_count_estimate'] = tokenCount
with open(f"{output_path}/{output_filename}", 'w', encoding='utf-8') as file:
json.dump(link, file, ensure_ascii=True, indent=4)
with open(f"{transaction_output_dir}/{transaction_output_filename}", 'w', encoding='utf-8') as file:
json.dump(link, file, ensure_ascii=True, indent=4)
else:
print("Could not parse any meaningful data from this link or url.")
exit(1)
print(f"\n\n[Success]: article or link content fetched!")
print(f"////////////////////////////")
print(f"Your estimated cost to embed this data using OpenAI's text-embedding-ada-002 model at $0.0004 / 1K tokens will cost {ada_v2_cost(tokenCount)} using {tokenCount} tokens.")
print(f"////////////////////////////")
exit(0)
def crawler():
prompt = "Paste in root URI of the pages of interest: "
new_link = input(prompt)
filter_value = input("Add a filter value for the url to ensure links don't wander too far. eg: 'my-domain.com': ")
#extract this from the uri provided
root_site = urlparse(new_link).scheme + "://" + urlparse(new_link).hostname
links = []
urls = new_link
links.append(new_link)
grab = requests.get(urls)
soup = BeautifulSoup(grab.text, 'html.parser')
# traverse paragraphs from soup
for link in soup.find_all("a"):
data = link.get('href')
if (data is not None):
fullpath = data if data[0] != '/' else f"{root_site}{data}"
try:
destination = urlparse(fullpath).scheme + "://" + urlparse(fullpath).hostname + (urlparse(fullpath).path if urlparse(fullpath).path is not None else '')
if filter_value in destination:
data = destination.strip()
print (data)
links.append(data)
else:
print (data + " does not apply for linking...")
except:
print (data + " does not apply for linking...")
#parse the links found
parse_links(links)
def links():
links = []
prompt = "Paste in the URL of an online article or blog: "
done = False
while(done == False):
new_link = input(prompt)
if(len(new_link) == 0):
done = True
links = [*set(links)]
continue
links.append(new_link)
prompt = f"\n{len(links)} links in queue. Submit an empty value when done pasting in links to execute collection.\nPaste in the next URL of an online article or blog: "
if(len(links) == 0):
print("No valid links provided!")
exit(1)
parse_links(links)
# parse links from array
def parse_links(links):
totalTokens = 0
for link in links:
print(f"Working on {link}...")
session = HTMLSession()
req = session.get(link, timeout=20)
if not req.ok:
print(f"Could not reach {link} - skipping!")
continue
req.html.render(timeout=10)
full_text = None
with tempfile.NamedTemporaryFile(mode="w") as tmp:
tmp.write(req.html.html)
tmp.seek(0)
loader = UnstructuredHTMLLoader(tmp.name)
data = loader.load()[0]
full_text = data.page_content
tmp.close()
link = append_meta(req, full_text, True)
if len(full_text) > 0:
source = urlparse(req.url)
output_filename = f"website-{source.netloc}-{source.path.replace('/','_')}.json"
output_path = f"./outputs/website-logs"
transaction_output_filename = f"article-{source.path.replace('/','_')}.json"
transaction_output_dir = f"../server/storage/documents/website-{source.netloc}"
if not os.path.isdir(output_path):
os.makedirs(output_path)
if not os.path.isdir(transaction_output_dir):
os.makedirs(transaction_output_dir)
full_text = append_meta(req, full_text)
tokenCount = len(tokenize(full_text))
link['pageContent'] = full_text
link['token_count_estimate'] = tokenCount
totalTokens += tokenCount
with open(f"{output_path}/{output_filename}", 'w', encoding='utf-8') as file:
json.dump(link, file, ensure_ascii=True, indent=4)
with open(f"{transaction_output_dir}/{transaction_output_filename}", 'w', encoding='utf-8') as file:
json.dump(link, file, ensure_ascii=True, indent=4)
req.session.close()
else:
print(f"Could not parse any meaningful data from {link}.")
continue
print(f"\n\n[Success]: {len(links)} article or link contents fetched!")
print(f"////////////////////////////")
print(f"Your estimated cost to embed this data using OpenAI's text-embedding-ada-002 model at $0.0004 / 1K tokens will cost {ada_v2_cost(totalTokens)} using {totalTokens} tokens.")
print(f"////////////////////////////") | [
"\n1 links in queue. Submit an empty value when done pasting in links to execute collection.\nPaste in the next URL of an online article or blog: ",
"Paste in root URI of the pages of interest: ",
"Paste in the URL of an online article or blog: "
] |
2024-01-10 | hanselpetter/AnythingLLM | collector~scripts~watch~convert~as_pdf.py | import os, time
from langchain.document_loaders import PyPDFLoader
from slugify import slugify
from ..utils import guid, file_creation_time, write_to_server_documents, move_source
from ...utils import tokenize
# Process all text-related documents.
def as_pdf(**kwargs):
parent_dir = kwargs.get('directory', 'hotdir')
filename = kwargs.get('filename')
ext = kwargs.get('ext', '.txt')
remove = kwargs.get('remove_on_complete', False)
fullpath = f"{parent_dir}/{filename}{ext}"
destination = f"../server/storage/documents/{slugify(filename)}-{int(time.time())}"
loader = PyPDFLoader(fullpath)
pages = loader.load_and_split()
print(f"-- Working {fullpath} --")
for page in pages:
pg_num = page.metadata.get('page')
print(f"-- Working page {pg_num} --")
content = page.page_content
data = {
'id': guid(),
'url': "file://"+os.path.abspath(f"{parent_dir}/processed/{filename}{ext}"),
'title': f"{filename}_pg{pg_num}{ext}",
'description': "a custom file uploaded by the user.",
'published': file_creation_time(fullpath),
'wordCount': len(content),
'pageContent': content,
'token_count_estimate': len(tokenize(content))
}
write_to_server_documents(data, f"{slugify(filename)}-pg{pg_num}-{data.get('id')}", destination)
move_source(parent_dir, f"{filename}{ext}", remove=remove)
print(f"[SUCCESS]: {filename}{ext} converted & ready for embedding.\n") | [] |
2024-01-10 | hanselpetter/AnythingLLM | collector~scripts~watch~convert~as_docx.py | import os
from langchain.document_loaders import Docx2txtLoader, UnstructuredODTLoader
from slugify import slugify
from ..utils import guid, file_creation_time, write_to_server_documents, move_source
from ...utils import tokenize
# Process all text-related documents.
def as_docx(**kwargs):
parent_dir = kwargs.get('directory', 'hotdir')
filename = kwargs.get('filename')
ext = kwargs.get('ext', '.txt')
remove = kwargs.get('remove_on_complete', False)
fullpath = f"{parent_dir}/{filename}{ext}"
loader = Docx2txtLoader(fullpath)
data = loader.load()[0]
content = data.page_content
print(f"-- Working {fullpath} --")
data = {
'id': guid(),
'url': "file://"+os.path.abspath(f"{parent_dir}/processed/{filename}{ext}"),
'title': f"{filename}{ext}",
'description': "a custom file uploaded by the user.",
'published': file_creation_time(fullpath),
'wordCount': len(content),
'pageContent': content,
'token_count_estimate': len(tokenize(content))
}
write_to_server_documents(data, f"{slugify(filename)}-{data.get('id')}")
move_source(parent_dir, f"{filename}{ext}", remove=remove)
print(f"[SUCCESS]: {filename}{ext} converted & ready for embedding.\n")
def as_odt(**kwargs):
parent_dir = kwargs.get('directory', 'hotdir')
filename = kwargs.get('filename')
ext = kwargs.get('ext', '.txt')
remove = kwargs.get('remove_on_complete', False)
fullpath = f"{parent_dir}/{filename}{ext}"
loader = UnstructuredODTLoader(fullpath)
data = loader.load()[0]
content = data.page_content
print(f"-- Working {fullpath} --")
data = {
'id': guid(),
'url': "file://"+os.path.abspath(f"{parent_dir}/processed/{filename}{ext}"),
'title': f"{filename}{ext}",
'description': "a custom file uploaded by the user.",
'published': file_creation_time(fullpath),
'wordCount': len(content),
'pageContent': content,
'token_count_estimate': len(tokenize(content))
}
write_to_server_documents(data, f"{slugify(filename)}-{data.get('id')}")
move_source(parent_dir, f"{filename}{ext}", remove=remove)
print(f"[SUCCESS]: {filename}{ext} converted & ready for embedding.\n") | [] |
2024-01-10 | hanselpetter/AnythingLLM | collector~scripts~watch~convert~as_markdown.py | import os
from langchain.document_loaders import UnstructuredMarkdownLoader
from slugify import slugify
from ..utils import guid, file_creation_time, write_to_server_documents, move_source
from ...utils import tokenize
# Process all text-related documents.
def as_markdown(**kwargs):
parent_dir = kwargs.get('directory', 'hotdir')
filename = kwargs.get('filename')
ext = kwargs.get('ext', '.txt')
remove = kwargs.get('remove_on_complete', False)
fullpath = f"{parent_dir}/{filename}{ext}"
loader = UnstructuredMarkdownLoader(fullpath)
data = loader.load()[0]
content = data.page_content
print(f"-- Working {fullpath} --")
data = {
'id': guid(),
'url': "file://"+os.path.abspath(f"{parent_dir}/processed/{filename}{ext}"),
'title': f"{filename}{ext}",
'description': "a custom file uploaded by the user.",
'published': file_creation_time(fullpath),
'wordCount': len(content),
'pageContent': content,
'token_count_estimate': len(tokenize(content))
}
write_to_server_documents(data, f"{slugify(filename)}-{data.get('id')}")
move_source(parent_dir, f"{filename}{ext}", remove=remove)
print(f"[SUCCESS]: {filename}{ext} converted & ready for embedding.\n") | [] |
2024-01-10 | we1k/LLM_IR | src~llm~template_manager.py | from langchain.prompts import PromptTemplate, FewShotPromptTemplate
class TemplateManager:
def __init__(self):
self.templates = {
# 如何?
0 : PromptTemplate(
input_variables=["question", "related_str"],
template="""根据相关信息,专业,准确并简要地回答问题。\n问题是:{question}\n已知信息:\n\n{related_str}\n答案是:\n
"""
),
# 总结
1 : PromptTemplate(
input_variables=["question", "answer"],
template="""请根据问题总结答案。问题是:{question}, 答案是:{answer}
"""
),
# few_shot
"few_shot" : PromptTemplate(
input_variables=["question", "related_str", "filtered_str","answer"],
template="你是一位智能汽车说明的问答助手,现在我们节选到了部分说明书中的信息,可能存在着部分无关的信息,请根据说明书的已知信息,筛选出相关信息,然后完整、准确并简要地回答问题,请你回答最直接的答案,不要回答无关内容,或进行解释和延伸。问题是:{question}\n已知信息:{related_str}\n筛选信息:{filtered_str}\n答案是:{answer}"
)
}
self.examples = [
{
"question": "在使用FCTA时需要注意哪些事项?",
"related_str": "当车速处于4-15km/h范围内,车辆驶出停车位或经过交叉路口时,若前方交叉路口预警系统(FCTA)监测到存在前方侧向碰撞风险,将通过仪表提示信息和蜂鸣声提醒驾驶注意观察前方道路情况,从而降低发生前方侧向碰撞的风险。\n<SEP>警告:\n■FCTA属于驾驶辅助系统,并不能确保在任何情况下均能正常工作;紧急情况时,驾驶员应及时接管车辆。\n■虽然车辆配备有FCTA,但为确保行驶安全,通过交叉路口或其他视线受阻的环境前应环顾四周,确认周围环境无危险。\n■FCTA主要对横穿的轿车/SUV、客车/卡车等车辆类型作用,对三轮车、外表不规则的车辆、行人、骑行者、动物具有识别的局限性而不起作用。\n■对于系统识别的有效目标,根据车辆、场景、路况的不同,FCTA并非总能达到相同的性能水平。",
"filtered_str": "<SEP>警告:\n■FCTA属于驾驶辅助系统,并不能确保在任何情况下均能正常工作;紧急情况时,驾驶员应及时接管车辆。\n■虽然车辆配备有FCTA,但为确保行驶安全,通过交叉路口或其他视线受阻的环境前应环顾四周,确认周围环境无危险。\n■FCTA主要对横穿的轿车/SUV、客车/卡车等车辆类型作用,对三轮车、外表不规则的车辆、行人、骑行者、动物具有识别的局限性而不起作用。\n■对于系统识别的有效目标,根据车辆、场景、路况的不同,FCTA并非总能达到相同的性能水平。",
"answer" : "在使用FCTA时,需要注意以下事项:\n\n1. FCTA属于驾驶辅助系统,并不能确保在任何情况下均能正常工作,紧急情况时,驾驶员应及时接管车辆。\n2. 虽然车辆配备有FCTA,但为确保行驶安全,通过交叉路口或其他视线受阻的环境前应环顾四周,确认周围环境无危险。\n3. FCTA主要对横穿的轿车/SUV、客车/卡车等车辆类型作用,对三轮车、外表不规则的车辆、行人、骑行者、动物具有识别的局限性而不起作用。\n4. 对于系统识别的有效目标,根据车辆、场景、路况的不同,FCTA并非总能达到相同的性能水平。"
},
{
"question" : "自动模式下,中央显示屏是如何切换日间和夜间模式的?",
"related_str" : "01点击设置中央显示屏显示模式(日间模式、夜间模式、自动)。\n<SEP>说明:\n您可以依据个人喜好选择自动模式:□日出到日落:白天显示日间模式,晚上显示夜间模式。\n□自定时段:依据设置的时间段切换显示模式。\n□日夜模式选择自动模式后,中央显示屏会自动切换日间模式或夜间模式。\n调节中央显示屏亮度在中央显示屏中点击-设置-显示,进入显示设置界面。\n01点击设置中央显示屏亮暗模式。\n02滑动滑条调节中央显示屏亮度。您还可以通过以下方式调节中央显示屏亮度:■中央显示屏车辆功能界面,请参见车辆功能界面(页码 266)。",
"filtered_str": "<SEP>说明:\n您可以依据个人喜好选择自动模式:□日出到日落:白天显示日间模式,晚上显示夜间模式。\n□自定时段:依据设置的时间段切换显示模式。\n□日夜模式选择自动模式后,中央显示屏会自动切换日间模式或夜间模式。",
"answer" : "在自动模式下,中央显示屏会根据当前环境自动切换日间和夜间模式。您可以在设置中央显示屏显示模式(日间模式、夜间模式、自动)来根据个人喜好选择模式。"
},
{
"question" : "我应该在哪里添加香氛精油?",
"related_str" : "香氛精油添加在香氛胶囊中,香氛胶囊位于手套箱内。\n1打开手套箱。\n2向下取下香氛胶囊。\n01香氛胶囊。\n3将香氛精油均匀地涂抹在香芯棒上(三面都要涂抹),静置一分钟。\n4将香氛胶囊安装回位即可。\n",
"filtered_str" : "香氛精油添加在香氛胶囊中,香氛胶囊位于手套箱内。",
"answer" : "在香氛精油内添加香氛精油。"
}
]
def get_template(self, template_name):
return self.templates.get(template_name, None)
def get_fewshot_template(self):
few_shot_prompt = FewShotPromptTemplate(
examples=self.examples,
example_prompt=self.templates.get("few_shot", None),
suffix="根据说明书的已知信息,筛选出相关信息,然后完整、准确并简要地回答问题。问题是:{question}\n已知信息:{related_str}\n",
input_variables=["question", "related_str"]
)
return few_shot_prompt
template_manager = TemplateManager() | [
"根据相关信息,专业,准确并简要地回答问题。\n问题是:{question}\n已知信息:\n\n{related_str}\n答案是:\n\n ",
"question",
"related_str",
"请根据问题总结答案。问题是:{question}, 答案是:{answer}\n ",
"你是一位智能汽车说明的问答助手,现在我们节选到了部分说明书中的信息,可能存在着部分无关的信息,请根据说明书的已知信息,筛选出相关信息,然后完整、准确并简要地回答问题,请你回答最直接的答案,不要回答无关内容,或进行解释和延伸。问题是:{question}\n已知信息:{related_str}\n筛选信息:{filtered_str}\n答案是:{answer}",
"根据说明书的已知信息,筛选出相关信息,然后完整、准确并简要地回答问题。问题是:{question}\n已知信息:{related_str}\n"
] |
2024-01-10 | we1k/LLM_IR | retrieve_info_2.py | import re
import tqdm
import json
import spacy
import PyPDF2
from argparse import ArgumentParser
from src.embeddings import BGEpeftEmbedding
from langchain import FAISS
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.schema import Document
def extract_page_text(filepath, max_len=256):
page_content = []
spliter = spacy.load("zh_core_web_sm")
chunks = []
with open(filepath, 'rb') as f:
pdf_reader = PyPDF2.PdfReader(f)
page_count = 10
pattern = r'^\d{1,3}'
for page in tqdm.tqdm(pdf_reader.pages[page_count:]):
page_text = page.extract_text().strip()
raw_text = [text.strip() for text in page_text.split('\n')]
new_text = '\n'.join(raw_text[1:])
new_text = re.sub(pattern, '', new_text).strip()
page_content.append(new_text)
max_chunk_length = max_len # 最大 chunk 长度
current_chunk = ""
if len(new_text) > 10:
for sentence in spliter(new_text).sents:
sentence_text = sentence.text
if len(current_chunk) + len(sentence_text) <= max_chunk_length:
current_chunk += sentence_text
else:
chunks.append(Document(page_content=current_chunk, metadata={'page':page_count+1}))
current_chunk = sentence_text
# 添加最后一个 chunk(如果有的话)
if current_chunk:
chunks.append(Document(page_content=current_chunk, metadata={'page':page_count+1}))
page_count += 1
cleaned_chunks = []
i = 0
while i <= len(chunks)-2: #简单合并一些上下文
current_chunk = chunks[i]
next_chunk = chunks[min(i+1, len(chunks)-1)]
if len(next_chunk.page_content) < 0.5 * len(current_chunk.page_content):
new_chunk = Document(page_content=current_chunk.page_content + next_chunk.page_content, metadata=current_chunk.metadata)
cleaned_chunks.append(new_chunk)
i += 2
else:
i+=1
cleaned_chunks.append(current_chunk)
return cleaned_chunks
def run_query(args):
## pdf -> Doc
if args.local_run == True:
filepath = "data/trainning_data.pdf"
else:
filepath = "/tcdata/trainning_data.pdf"
docs = extract_page_text(filepath=filepath, max_len=256)
# load in embedding model
if "bge" in args.embedding_model:
if local_run:
model_name = "./models/bge-large-zh-v1.5"
else:
model_name = "/app/models/bge-large-zh-v1.5"
embeddings = BGEpeftEmbedding(model_name)
elif "stella" in args.embedding_model:
if args.local_run:
model_name = "/home/lzw/.hf_models/stella-base-zh-v2"
else:
model_name = "/app/rerank_model/stella-base-zh-v2"
embeddings = HuggingFaceEmbeddings(
model_name=model_name,
model_kwargs={"device": "cuda"} ,
encode_kwargs={"normalize_embeddings": False})
elif "gte" in args.embedding_model:
model_name = "/app/models/gte-large-zh"
embeddings = HuggingFaceEmbeddings(
model_name=model_name,
model_kwargs={"device": "cuda"} ,
encode_kwargs={"normalize_embeddings": False})
db = FAISS.from_documents(docs, embeddings)
db.save_local(folder_path='./vector', index_name='index_256')
if args.local_run == True:
question_path = './data/all_question.json'
else:
question_path = "/tcdata/test_question.json"
with open(question_path, 'r', encoding='utf-8') as f:
question_list = json.load(f)
answers=[]
for i, line in enumerate(question_list):
# print(f"question {i}:", line['question'])
search_docs = db.similarity_search(line['question'], k=args.max_num_related_str)
# print(search_docs)
related_str = []
for doc in search_docs:
related_str.append(doc.page_content)
sample = {"question": line['question'], "related_str": related_str, "keyword": ""}
answers.append(sample)
with open(f"result/related_str.json", 'w', encoding='utf-8') as f:
json.dump(answers, f, ensure_ascii=False, indent=4)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("--test", action="store_true")
parser.add_argument("--max_num_related_str", default=5, type=int)
parser.add_argument("--local_run", action="store_true")
parser.add_argument("--embedding_model", default="stella")
args = parser.parse_args()
# bge // stella // gte
run_query(args) | [] |
2024-01-10 | we1k/LLM_IR | src~parse_pdf.py | import re
import sys
import random
from collections import defaultdict
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter, NLTKTextSplitter, CharacterTextSplitter
from langchain.vectorstores.faiss import FAISS
def contains_digit(sentence):
for char in sentence:
if char.isdigit():
return True
return False
def split_text_with_numbers_as_delimiters(text):
# 去除只有数字的页脚
if text.isdigit():
return [text], []
text = text.replace(".", "")
delimiters = re.findall(r'\d+', text)
# 使用负向预查来排除包含 "12V" 的数字
delimiters = [number for number in delimiters if not (re.search(r'12V', number) or re.search(r'360°', number))]
if delimiters:
pattern = '|'.join(map(re.escape, delimiters))
# 使用正则表达式切分文本
parts = re.split(pattern, text)
# 去除空字符串
parts = [part.strip() for part in parts if part.strip() ]
delimiters = [int(delimiter.strip()) for delimiter in delimiters if delimiter.strip() ]
return parts, delimiters
else:
# 如果没有找到数字作为分隔符,则返回原始文本
return [text], delimiters
def parse_page_of_content(pdf_path='data/QA.pdf'):
loader = PyPDFLoader(pdf_path)
pdf_docs = loader.load()
text_splitter = CharacterTextSplitter(
chunk_size=100,
chunk_overlap=20,
)
documents = text_splitter.split_documents(pdf_docs)
table_of_content = dict()
page_index = None
## 1-7 页为目录
for page in documents[1:7]:
sents = page.page_content.split('\n')
for sent in sents:
if "目录" in sent:
continue
elif not contains_digit(sent):
if page_index != None:
main_chapter_name = main_chapter_name.replace("V电源", "车载12V电源").replace("°全景影像", "360°全景影像")
table_of_content[main_chapter_name] = page_index
page_index = defaultdict(list)
main_chapter_name = sent
else:
sub_sections, page_ids = split_text_with_numbers_as_delimiters(sent)
if len(sub_sections) != len(page_ids):
print(f"Not matched chapter name and page_num, source: {sent}")
for sub_section, page in zip(sub_sections, page_ids):
page_index[sub_section].append(page)
return documents, table_of_content
def find_first_key_geq(d, x):
for key, value in reversed(d.items()):
if x >= value:
return key
return "None" # 返回 None 如果没有找到满足条件的键
def parse_section_doc(documents, all_key_word):
all_text = ""
for doc in documents:
# 页码删除
page_id = doc.metadata['page'] + 1
content = doc.page_content.replace(str(page_id), "")
new_lines = []
tmp = ""
## 修正换行
for line in content.split("\n"):
line = line.strip()
tmp += line
if line.endswith("。"):
new_lines.append(tmp)
tmp = ""
elif line in all_key_word:
# 添加 <sub_section> 标签
line = "\n<sub_section>" + line
if len(new_lines) > 0:
new_lines[-1] = new_lines[-1] + "\n<\sub_section>"
new_lines.append(line)
tmp = ""
new_lines.append(tmp)
# section 页眉删除
section_name = find_first_key_geq(section_start_page_id, page_id)
if new_lines[0].startswith(section_name):
new_lines[0] = new_lines[0].replace(section_name, "")
content = "\n".join(new_lines)
content = content.replace("警告!", "<SEP>警告:\n")
content = content.replace("注意!", "<SEP>注意:\n")
content = content.replace("说明!", "<SEP>说明:\n")
# all_text += f"\n<PAGE_SEP> page_id:{page_id}\n" + content
all_text += content | [] |
2024-01-10 | we1k/LLM_IR | src~text_splitter.py | import re
from typing import List, Optional, Any
from langchain.text_splitter import RecursiveCharacterTextSplitter
import logging
logger = logging.getLogger(__name__)
def _split_text_with_regex_from_end(
text: str, separator: str, keep_separator: bool
) -> List[str]:
# Now that we have the separator, split the text
if separator:
if keep_separator:
# The parentheses in the pattern keep the delimiters in the result.
_splits = re.split(f"({separator})", text)
splits = ["".join(i) for i in zip(_splits[0::2], _splits[1::2])]
if len(_splits) % 2 == 1:
splits += _splits[-1:]
# splits = [_splits[0]] + splits
else:
splits = re.split(separator, text)
else:
splits = list(text)
return [s for s in splits if s != ""]
class ChineseRecursiveTextSplitter(RecursiveCharacterTextSplitter):
def __init__(
self,
separators: Optional[List[str]] = None,
keep_separator: bool = True,
is_separator_regex: bool = True,
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(keep_separator=keep_separator, **kwargs)
self._separators = separators or [
"\n\n",
"\n",
"。|!|?",
"\.\s|\!\s|\?\s",
";|;\s",
",|,\s"
]
self._is_separator_regex = is_separator_regex
def _split_text(self, text: str, separators: List[str]) -> List[str]:
"""Split incoming text and return chunks."""
final_chunks = []
# Get appropriate separator to use
separator = separators[-1]
new_separators = []
for i, _s in enumerate(separators):
_separator = _s if self._is_separator_regex else re.escape(_s)
if _s == "":
separator = _s
break
if re.search(_separator, text):
separator = _s
new_separators = separators[i + 1:]
break
_separator = separator if self._is_separator_regex else re.escape(separator)
splits = _split_text_with_regex_from_end(text, _separator, self._keep_separator)
# Now go merging things, recursively splitting longer texts.
_good_splits = []
_separator = "" if self._keep_separator else separator
for s in splits:
if self._length_function(s) < self._chunk_size:
_good_splits.append(s)
else:
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
_good_splits = []
if not new_separators:
final_chunks.append(s)
else:
other_info = self._split_text(s, new_separators)
final_chunks.extend(other_info)
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
return [re.sub(r"\n{2,}", "\n", chunk.strip()) for chunk in final_chunks if chunk.strip()!=""]
if __name__ == "__main__":
text_splitter = ChineseRecursiveTextSplitter(
keep_separator=True,
is_separator_regex=True,
chunk_size=50,
chunk_overlap=0
)
ls = [
"""中国对外贸易形势报告(75页)。前 10 个月,一般贸易进出口 19.5 万亿元,增长 25.1%, 比整体进出口增速高出 2.9 个百分点,占进出口总额的 61.7%,较去年同期提升 1.6 个百分点。其中,一般贸易出口 10.6 万亿元,增长 25.3%,占出口总额的 60.9%,提升 1.5 个百分点;进口8.9万亿元,增长24.9%,占进口总额的62.7%, 提升 1.8 个百分点。加工贸易进出口 6.8 万亿元,增长 11.8%, 占进出口总额的 21.5%,减少 2.0 个百分点。其中,出口增 长 10.4%,占出口总额的 24.3%,减少 2.6 个百分点;进口增 长 14.2%,占进口总额的 18.0%,减少 1.2 个百分点。此外, 以保税物流方式进出口 3.96 万亿元,增长 27.9%。其中,出 口 1.47 万亿元,增长 38.9%;进口 2.49 万亿元,增长 22.2%。前三季度,中国服务贸易继续保持快速增长态势。服务 进出口总额 37834.3 亿元,增长 11.6%;其中服务出口 17820.9 亿元,增长 27.3%;进口 20013.4 亿元,增长 0.5%,进口增 速实现了疫情以来的首次转正。服务出口增幅大于进口 26.8 个百分点,带动服务贸易逆差下降 62.9%至 2192.5 亿元。服 务贸易结构持续优化,知识密集型服务进出口 16917.7 亿元, 增长 13.3%,占服务进出口总额的比重达到 44.7%,提升 0.7 个百分点。 二、中国对外贸易发展环境分析和展望 全球疫情起伏反复,经济复苏分化加剧,大宗商品价格 上涨、能源紧缺、运力紧张及发达经济体政策调整外溢等风 险交织叠加。同时也要看到,我国经济长期向好的趋势没有 改变,外贸企业韧性和活力不断增强,新业态新模式加快发 展,创新转型步伐提速。产业链供应链面临挑战。美欧等加快出台制造业回迁计 划,加速产业链供应链本土布局,跨国公司调整产业链供应 链,全球双链面临新一轮重构,区域化、近岸化、本土化、 短链化趋势凸显。疫苗供应不足,制造业“缺芯”、物流受限、 运价高企,全球产业链供应链面临压力。 全球通胀持续高位运行。能源价格上涨加大主要经济体 的通胀压力,增加全球经济复苏的不确定性。世界银行今年 10 月发布《大宗商品市场展望》指出,能源价格在 2021 年 大涨逾 80%,并且仍将在 2022 年小幅上涨。IMF 指出,全 球通胀上行风险加剧,通胀前景存在巨大不确定性。""",
]
# text = """"""
for inum, text in enumerate(ls):
print(inum)
chunks = text_splitter.split_text(text)
for chunk in chunks:
print(chunk) | [] |
2024-01-10 | we1k/LLM_IR | src~preprocess.py | import os
import re
import json
from collections import defaultdict
from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTextSplitter
from langchain.document_loaders import PyPDFLoader
from langchain.embeddings import HuggingFaceEmbeddings, HuggingFaceBgeEmbeddings
from langchain.schema import Document
from langchain.vectorstores import FAISS, Chroma
from src.text_splitter import ChineseRecursiveTextSplitter
from src.utils import save_docs_to_jsonl, load_docs_from_jsonl, load_embedding_model
MAX_KEYWORD_LEN = 13
DELIMITER = [',', ',', '。', ';', '–', ':', '!', '-', '、', '■', '□', '℃',
'.', '•']
def save_docs_to_jsonl(array, file_path:str)->None:
with open(file_path, 'w') as jsonl_file:
for doc in array:
jsonl_file.write(json.dumps(doc.dict(), ensure_ascii=False) + '\n')
def contains_chinese_characters(text):
pattern = re.compile(r'[\u4e00-\u9fff]') # 匹配汉字的 Unicode 范围
match = pattern.search(text)
return match is not None
def get_keywords(file_path='pdf_output/trainning_data.outline'):
# File path to the outline document
# Read the entire file content
with open(file_path, 'r') as file:
file_content = file.read()
# Use regular expression to find all instances of chapter names in <a> tags
chapter_details = re.findall(
r'<a class="l"[^>]*data-dest-detail=\'\[(\d+),[^\]]+\]\'>(.*?)\s*</a>',
file_content
)
chapter_to_number_dict = {detail[1].strip(): int(detail[0]) for detail in chapter_details}
chapter_names = [k.replace("&", "&").strip() for k, v in chapter_to_number_dict.items()]
return chapter_names
def build_sections(keywords, max_sentence_len=29):
section_docs = []
sections = defaultdict(str)
chapter_name = ""
tmp = ""
with open("data/all.txt", 'r', encoding='UTF-8') as f:
for line in f.readlines():
# 去除目录
if ". . . ." in line or "目录" in line or "...." in line:
continue
if line.strip() in keywords:
if chapter_name != "":
sections[chapter_name] += "<SEP>" + tmp
tmp = ""
chapter_name = line.strip()
else:
tmp += line
with open("data/section.json", 'w', encoding='UTF-8') as f:
json.dump(sections, f, ensure_ascii=False, indent=4)
sections[chapter_name] = tmp
for chapter_name, text in sections.items():
subsection_dict = {}
text = text.replace("点击\n", "点击")
text = text.replace("\n-", "-")
text = text.replace("\n“\n", "")
text = text.replace("\n”\n", "")
text = text.replace("\\", "")
text = re.sub(r"\n\d+km/h\n", "", text)
text = text.replace("的\n", "的")
sentences = text.split('\n')
keyword = chapter_name
cur_chunk = chapter_name + "\n"
for sentence in sentences:
sentence = sentence.strip("<SEP>").replace('"', '').replace(" ", "")
if len(sentence) == 0 or sentence.isdigit():
continue
# 大概率是目录
# 可能包含章节数字 1.1 标题
elif re.match(r"^\d*(\.\d+)?.*$", sentence) and not any(it in sentence for it in DELIMITER) and not sentence.startswith("0") and 1< len(sentence) <= MAX_KEYWORD_LEN:
if cur_chunk.strip("\n") != keyword:
subsection_dict[keyword] = cur_chunk
keyword = sentence
cur_chunk = sentence + "\n"
# 拼接上下句子
elif len(sentence) >= max_sentence_len - 1 or ("," in sentence and not sentence.endswith("。")):
cur_chunk += sentence
# 换行后第一个字符是分隔符
elif any(sentence.startswith(it) for it in DELIMITER):
cur_chunk = cur_chunk.strip("\n") + sentence + "\n"
else:
cur_chunk += sentence + "\n"
# adding last chunk
if cur_chunk.strip("\n") != keyword:
subsection_dict[keyword] = cur_chunk
for subkeyword, text_chunk in subsection_dict.items():
if len(text_chunk.strip("<SEP>")) > 0 and not text_chunk.isalpha() > 0:
# skip special char
text_chunk = text_chunk.replace("<SEP>", "")
# skip too short section (maybe a table content)
# or section name didn't contain chinese characters
if len(text_chunk.replace("\n", "")) - len(subkeyword) < 5 or not contains_chinese_characters(subkeyword):
continue
section_docs.append(Document(page_content=text_chunk, metadata={"keyword": chapter_name, "subkeyword": subkeyword}))
return section_docs
def preprocess(embeddings, max_sentence_len=20):
keywords = get_keywords()
# print(keywords)
with open("data/raw.txt", 'r', encoding='UTF-8') as f:
text = f.read()
pages = re.split(r'!\[\]\(.+?\)', text)
# 去掉页眉和页码
for i in range(len(pages)):
lines, idx = pages[i].split("\n"), 0
lines = [line for line in lines if len(line.strip()) > 0]
while len(lines) > 0 and not contains_chinese_characters(lines[-1]):
lines.pop(-1)
while 0 <= idx < len(lines) and lines[idx].strip().isdigit():
idx += 1
pages[i] = "\n".join(lines[idx+2:])
# pages[i] = re.sub(rf'^.*?\n{i}\n', "", pages[i], flags=re.DOTALL)
# 去掉图片的编号
pages[i] = re.sub(r"[A-Za-z0-9]+-[A-Za-z0-9]+\n", "", pages[i])
pages[i] = pages[i]
all_text = "".join(pages).replace("\n\n", "\n")
with open("data/all.txt", 'w', encoding='UTF-8') as f:
f.write(all_text)
section_docs = build_sections(keywords, max_sentence_len)
# section_docs_tmp = load_docs_from_jsonl("doc/section_docs_1.jsonl")
# section_docs += section_docs_tmp
section_docs = load_docs_from_jsonl("doc/section_docs.jsonl")
all_keywords = [doc.metadata["keyword"] for doc in section_docs] + [doc.metadata["subkeyword"] for doc in section_docs]
all_keywords = list(set(all_keywords))
with open("data/keywords.txt", 'w', encoding='UTF-8') as f:
f.write("\n".join(all_keywords))
db = FAISS.from_documents(section_docs, embeddings)
db.save_local("vector_store/section_db")
# index_db = FAISS.from_texts(all_keywords, embeddings)
# index_db.save_local("vector_store/index_db")
# index_db = FAISS.load_local('vector_store/index_db', embeddings)
# sentence cut
chunk_size = 120
chunk_overlap = 20
sentence_splitter = ChineseRecursiveTextSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
length_function=len,
is_separator_regex=False
)
for doc in section_docs:
doc.page_content = doc.page_content.replace(" ", "")
sent_docs = sentence_splitter.split_documents(section_docs)
# adding index and combine
cur_doc_content = ""
clean_sent_docs = []
for doc in sent_docs:
cur_doc_content += doc.page_content
# TODO : 50 is the hyperparameter, and using doc.page_content instead of cur_doc_content may be a bug?
# doc.page_content > 50 means the doc is a complete sentence, but cur_doc_content is not
if len(doc.page_content) >= 50:
doc.page_content = cur_doc_content
# doc.page_content = doc.page_content.replace(" ", "")
doc.page_content = doc.page_content.replace("<SEP>", "")
doc.page_content = doc.page_content.replace("•", "")
doc.page_content = doc.page_content.replace("□", "")
doc.page_content = doc.page_content.strip("。\n")
doc.metadata['index'] = len(clean_sent_docs)
clean_sent_docs.append(doc)
cur_doc_content = ""
sent_docs = clean_sent_docs
save_docs_to_jsonl(sent_docs, "doc/sent_docs.jsonl")
sent_db = FAISS.from_documents(sent_docs, embeddings)
sent_db.save_local("vector_store/sentence_db")
if __name__ == '__main__':
embeddings = load_embedding_model("stella", True)
preprocess(embeddings) | [] |
2024-01-10 | khuang9/health_log | health_logs~ai_advisor.py | import openai
import os
os.environ['OPENAI_API_KEY'] = "to be replaced"
from langchain.indexes import VectorstoreIndexCreator
from langchain.document_loaders.csv_loader import CSVLoader
csvloader = CSVLoader(file_path="C:\Prj\LLM\Hackathon\health_log\media\measurements.txt", source_column="measurement type")
def ai_answer(query):
index = VectorstoreIndexCreator().from_loaders([csvloader])
answer = index.query(query).strip()
return answer
| [] |
2024-01-10 | khuang9/health_log | health_logs~ai_improver.py | import openai
from utils import *
from .constants import *
openai.api_key = OPENAIKEY
import streamlit as st
from io import StringIO
from PIL import Image
FIXED_KEYS = ['NAME','SUMMARY','DOB','WORKING AS','CONTACTS']
VARIABLE_KEYS = ['EXPERIENCE','SCHOOL']
ORDERED_KEYS = ['NAME','DOB','WORKING AS','SUMMARY','EXPERIENCE','SCHOOL','CONTACTS']
DOB_PROMPT = 'Check that the following date is in YYYY/MM/DD. Correct it if its not in the same format, otherwise return the same: '
SUMMARY_PROMPT_CONVERT = 'Convert the text in a CV summary:'
TEMPERATURE_SUMMARY_PROMPT_CONVERT = 0.8
SUMMARY_PROMPT_IMPROVER = 'Improve the following text quality:'
TEMPERATURE_SUMMARY_PROMPT_IMPROVER = 0.3
OPENAIMODEL = 'text-davinci-003'
OPENAIKEY = "fake_key"
TEMPLATE_FILE = 'cv_template.txt'
EXPERIENCE_PROMPT_CONVERT = "Make the text more appealing for a recruiter:"
RESULT_FILE = 'cv_improved.txt'
def general_corrector(prompt, temperature,model = OPENAIMODEL,max_tokens = 20):
openai.api_key = OPENAIKEY
res = "blah blah"
#openai.Completion.create(model=model,prompt=prompt,temperature=temperature,max_tokens=max_tokens)
return res['choices'][0]['text']
def single_experience_corrector(experience_text):
correct_text = general_corrector(prompt=EXPERIENCE_PROMPT_CONVERT+experience_text,temperature=0.4,max_tokens=200)
st.markdown("<span style='color:lightblue'>"+experience_text+"</span>",
unsafe_allow_html=True)
st.text('The AI suggests the following summary instead: \n')
#print(final_correction)
st.markdown("<span style='color:red'>"+correct_text+"</span>",
unsafe_allow_html=True)
return correct_text
def summary_corrector(summary_text):
print('The AI is rephrasing the text (if necessary): \n')
st.text('The AI is rephrasing the text (if necessary):\n')
first_correction = general_corrector(prompt=SUMMARY_PROMPT_CONVERT+summary_text,temperature=TEMPERATURE_SUMMARY_PROMPT_CONVERT,max_tokens=200)
print('The AI is improving the rephrased summary \n')
st.text('The AI is improving the rephrased summary \n')
final_correction = general_corrector(prompt=SUMMARY_PROMPT_IMPROVER+first_correction,temperature =TEMPERATURE_SUMMARY_PROMPT_IMPROVER,max_tokens=200)
print('The summary of your current CV is the following:\n')
st.text('The AI is improving the rephrased summary \n')
print(summary_text)
#st.text(summary_text)
st.text('The summary section of your CV is the following one: \n')
st.markdown("<span style='color:lightblue'>"+summary_text+"</span>",
unsafe_allow_html=True)
st.text('The AI suggests the following summary instead: \n')
print(final_correction)
st.markdown("<span style='color:red'>"+final_correction+"</span>",
unsafe_allow_html=True)
return final_correction
def summary_corrector_main(summary_text):
first_correction = general_corrector(prompt=SUMMARY_PROMPT_CONVERT+summary_text,temperature=TEMPERATURE_SUMMARY_PROMPT_CONVERT,max_tokens=200)
final_correction = general_corrector(prompt=SUMMARY_PROMPT_IMPROVER+first_correction,temperature =TEMPERATURE_SUMMARY_PROMPT_IMPROVER,max_tokens=200)
return final_correction
def single_experience_corrector_main(experience_text):
correct_text = general_corrector(prompt=EXPERIENCE_PROMPT_CONVERT+experience_text,temperature=0.4,max_tokens=200)
return correct_text | [
"Improve the following text quality:",
"cv_template.txt",
"Check that the following date is in YYYY/MM/DD. Correct it if its not in the same format, otherwise return the same: ",
"0.8",
"Make the text more appealing for a recruiter:",
"0.3",
"Convert the text in a CV summary:"
] |
2024-01-10 | CalebCourier/guardrails | guardrails~llm_providers.py | import os
from dataclasses import dataclass
from functools import partial
from typing import Any, Awaitable, Callable, Dict, List, Optional, cast
import openai
from pydantic import BaseModel
from tenacity import retry, retry_if_exception_type, wait_exponential_jitter
try:
MANIFEST = True
import manifest
except ImportError:
MANIFEST = False
OPENAI_RETRYABLE_ERRORS = [
openai.error.APIConnectionError,
openai.error.APIError,
openai.error.TryAgain,
openai.error.Timeout,
openai.error.RateLimitError,
openai.error.ServiceUnavailableError,
]
RETRYABLE_ERRORS = tuple(OPENAI_RETRYABLE_ERRORS)
class PromptCallableException(Exception):
pass
###
# Synchronous wrappers
###
@dataclass
class PromptCallable:
"""A wrapper around a callable that takes in a prompt.
Catches exceptions to let the user know clearly if the callable
failed, and how to fix it.
"""
fn: Callable
@retry(
wait=wait_exponential_jitter(max=60),
retry=retry_if_exception_type(RETRYABLE_ERRORS),
)
def __call__(self, *args, **kwargs):
try:
result = self.fn(*args, **kwargs)
except Exception as e:
raise PromptCallableException(
"The callable `fn` passed to `Guard(fn, ...)` failed"
f" with the following error: `{e}`. "
"Make sure that `fn` can be called as a function that"
" takes in a single prompt string "
"and returns a string."
)
if not isinstance(result, str):
raise PromptCallableException(
"The callable `fn` passed to `Guard(fn, ...)` returned"
f" a non-string value: {result}. "
"Make sure that `fn` can be called as a function that"
" takes in a single prompt string "
"and returns a string."
)
return result
def nonchat_prompt(prompt: str, instructions: Optional[str] = None, **kwargs) -> str:
"""Prepare final prompt for nonchat engine."""
if instructions:
prompt = "\n\n".join([instructions, prompt])
return prompt
def chat_prompt(
prompt: str, instructions: Optional[str] = None, **kwargs
) -> List[Dict[str, str]]:
"""Prepare final prompt for chat engine."""
if not instructions:
instructions = "You are a helpful assistant."
return [
{"role": "system", "content": instructions},
{"role": "user", "content": prompt},
]
def openai_wrapper(
text: str,
engine: str = "text-davinci-003",
instructions: Optional[str] = None,
*args,
**kwargs,
):
api_key = os.environ.get("OPENAI_API_KEY")
openai_response = openai.Completion.create(
api_key=api_key,
engine=engine,
prompt=nonchat_prompt(text, instructions, **kwargs),
*args,
**kwargs,
)
return openai_response["choices"][0]["text"]
def openai_chat_wrapper(
text: str,
model="gpt-3.5-turbo",
instructions: Optional[str] = None,
base_model: Optional[BaseModel] = None,
*args,
**kwargs,
):
if base_model:
base_model_schema = base_model.schema()
function_params = {
"name": base_model_schema["title"],
"description": base_model_schema["description"]
if "description" in base_model_schema
else None,
"parameters": base_model_schema,
}
api_key = os.environ.get("OPENAI_API_KEY")
# TODO: update this as new models are released
if base_model:
openai_response = openai.ChatCompletion.create(
api_key=api_key,
model=model,
messages=chat_prompt(text, instructions, **kwargs),
functions=[function_params],
function_call={"name": function_params["name"]},
*args,
**kwargs,
)
return openai_response["choices"][0]["message"]["function_call"]["arguments"]
else:
openai_response = openai.ChatCompletion.create(
api_key=api_key,
model=model,
messages=chat_prompt(text, instructions, **kwargs),
*args,
**kwargs,
)
return openai_response["choices"][0]["message"]["content"]
def manifest_wrapper(
text: str, client: Any, instructions: Optional[str] = None, *args, **kwargs
):
"""Wrapper for manifest client.
To use manifest for guardrailse, do
```
client = Manifest(client_name=..., client_connection=...)
raw_llm_response, validated_response = guard(
client,
prompt_params={...},
...
```
"""
if not MANIFEST:
raise PromptCallableException(
"The `manifest` package is not installed. "
"Install with `pip install manifest-ml`"
)
client = cast(manifest.Manifest, client)
manifest_response = client.run(
nonchat_prompt(text, instructions, **kwargs), *args, **kwargs
)
return manifest_response
def get_llm_ask(llm_api: Callable, *args, **kwargs) -> PromptCallable:
if llm_api == openai.Completion.create:
fn = partial(openai_wrapper, *args, **kwargs)
elif llm_api == openai.ChatCompletion.create:
fn = partial(openai_chat_wrapper, *args, **kwargs)
elif MANIFEST and isinstance(llm_api, manifest.Manifest):
fn = partial(manifest_wrapper, client=llm_api, *args, **kwargs)
else:
# Let the user pass in an arbitrary callable.
fn = partial(llm_api, *args, **kwargs)
return PromptCallable(fn=fn)
###
# Async wrappers
###
@dataclass
class AsyncPromptCallable:
"""A wrapper around a callable that takes in a prompt.
Catches exceptions to let the user know clearly if the callable
failed, and how to fix it.
"""
fn: Callable[[Any], Awaitable[Any]]
@retry(
wait=wait_exponential_jitter(max=60),
retry=retry_if_exception_type(RETRYABLE_ERRORS),
)
async def __call__(self, *args, **kwargs):
try:
result = await self.fn(*args, **kwargs)
except Exception as e:
raise PromptCallableException(
"The callable `fn` passed to `Guard(fn, ...)` failed"
f" with the following error: `{e}`. "
"Make sure that `fn` can be called as a function that"
" takes in a single prompt string "
"and returns a string."
)
if not isinstance(result, str):
raise PromptCallableException(
"The callable `fn` passed to `Guard(fn, ...)` returned"
f" a non-string value: {result}. "
"Make sure that `fn` can be called as a function that"
" takes in a single prompt string "
"and returns a string."
)
return result
async def async_openai_wrapper(
text: str,
engine: str = "text-davinci-003",
instructions: Optional[str] = None,
*args,
**kwargs,
):
api_key = os.environ.get("OPENAI_API_KEY")
openai_response = await openai.Completion.acreate(
api_key=api_key,
engine=engine,
prompt=nonchat_prompt(text, instructions, **kwargs),
*args,
**kwargs,
)
return openai_response["choices"][0]["text"]
async def async_openai_chat_wrapper(
text: str,
model="gpt-3.5-turbo",
instructions: Optional[str] = None,
*args,
**kwargs,
):
api_key = os.environ.get("OPENAI_API_KEY")
openai_response = await openai.ChatCompletion.acreate(
api_key=api_key,
model=model,
messages=chat_prompt(text, instructions, **kwargs),
*args,
**kwargs,
)
return openai_response["choices"][0]["message"]["content"]
async def async_manifest_wrapper(
text: str, client: Any, instructions: Optional[str] = None, *args, **kwargs
):
"""Async wrapper for manifest client.
To use manifest for guardrails, do
```
client = Manifest(client_name=..., client_connection=...)
raw_llm_response, validated_response = guard(
client,
prompt_params={...},
...
```
"""
if not MANIFEST:
raise PromptCallableException(
"The `manifest` package is not installed. "
"Install with `pip install manifest-ml`"
)
client = cast(manifest.Manifest, client)
manifest_response = await client.run(
nonchat_prompt(text, instructions, **kwargs), *args, **kwargs
)
return manifest_response
def get_async_llm_ask(llm_api: Callable[[Any], Awaitable[Any]], *args, **kwargs):
if llm_api == openai.Completion.acreate:
fn = partial(async_openai_wrapper, *args, **kwargs)
elif llm_api == openai.ChatCompletion.acreate:
fn = partial(async_openai_chat_wrapper, *args, **kwargs)
elif MANIFEST and isinstance(llm_api, manifest.Manifest):
fn = partial(async_manifest_wrapper, client=llm_api, *args, **kwargs)
else:
# Let the user pass in an arbitrary callable.
fn = partial(llm_api, *args, **kwargs)
return AsyncPromptCallable(fn=fn)
| [
"\n\n",
"You are a helpful assistant."
] |
2024-01-10 | searchgame/ontogpt | src~ontogpt~engines~spires_engine.py | """
Main Knowledge Extractor class.
This works by recursively constructing structured prompt-completions where
a pseudo-YAML structure is requested, where the YAML
structure corresponds to a template class.
Describe in the SPIRES manuscript
TODO: add link
"""
import logging
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import pydantic
from linkml_runtime.linkml_model import ClassDefinition, SlotDefinition
from ontogpt.engines.knowledge_engine import (
ANNOTATION_KEY_PROMPT,
ANNOTATION_KEY_PROMPT_SKIP,
EXAMPLE,
FIELD,
OBJECT,
KnowledgeEngine,
chunk_text,
)
from ontogpt.templates.core import ExtractionResult
this_path = Path(__file__).parent
RESPONSE_ATOM = Union[str, "ResponseAtom"]
RESPONSE_DICT = Dict[FIELD, Union[RESPONSE_ATOM, List[RESPONSE_ATOM]]]
@dataclass
class SPIRESEngine(KnowledgeEngine):
"""Knowledge extractor."""
engine: str = "openai-text-davinci-003"
recurse: bool = True
"""If true, then complex non-named entity objects are always recursively parsed.
If this is false AND the complex object is a pair, then token-based splitting is
instead used.
TODO: deprecate this, it's not clear that token-based splitting is better, due to
the inability to control which tokens GPT will use"""
sentences_per_window: Optional[int] = None
"""If set, this will split the text into chains of sentences,
where this determines the maximum number of sentences per chain.
The results are then merged together."""
def extract_from_text(
self, text: str, cls: ClassDefinition = None, object: OBJECT = None
) -> ExtractionResult:
"""
Extract annotations from the given text.
:param text:
:param cls:
:param object: optional stub object
:return:
"""
if self.sentences_per_window:
chunks = chunk_text(text, self.sentences_per_window)
extracted_object = None
for chunk in chunks:
raw_text = self._raw_extract(chunk, cls, object=object)
logging.info(f"RAW TEXT: {raw_text}")
next_object = self.parse_completion_payload(raw_text, cls, object=object)
if extracted_object is None:
extracted_object = next_object
else:
for k, v in next_object.items():
if isinstance(v, list):
extracted_object[k] += v
else:
if k not in extracted_object:
extracted_object[k] = v
else:
extracted_object[k] = v
else:
raw_text = self._raw_extract(text, cls, object=object)
logging.info(f"RAW TEXT: {raw_text}")
extracted_object = self.parse_completion_payload(raw_text, cls, object=object)
return ExtractionResult(
input_text=text,
raw_completion_output=raw_text,
prompt=self.last_prompt,
extracted_object=extracted_object,
named_entities=self.named_entities,
)
def _extract_from_text_to_dict(self, text: str, cls: ClassDefinition = None) -> RESPONSE_DICT:
raw_text = self._raw_extract(text, cls)
return self._parse_response_to_dict(raw_text, cls)
def generalize(
self, object: Union[pydantic.BaseModel, dict], examples: List[EXAMPLE]
) -> ExtractionResult:
"""
Generalize the given examples.
:param object:
:param examples:
:return:
"""
cls = self.template_class
sv = self.schemaview
prompt = "example:\n"
for example in examples:
prompt += f"{self.serialize_object(example)}\n\n"
prompt += "\n\n===\n\n"
if isinstance(object, pydantic.BaseModel):
object = object.dict()
for k, v in object.items():
if v:
slot = sv.induced_slot(k, cls.name)
prompt += f"{k}: {self._serialize_value(v, slot)}\n"
logging.debug(f"PROMPT: {prompt}")
payload = self.client.complete(prompt)
prediction = self.parse_completion_payload(payload, object=object)
return ExtractionResult(
input_text=prompt,
raw_completion_output=payload,
# prompt=self.last_prompt,
results=[prediction],
named_entities=self.named_entities,
)
def map_terms(self, terms: List[str], ontology: str) -> Dict[str, List[str]]:
"""
Map the given terms to the given ontology.
EXPERIMENTAL
currently GPT-3 does not do so well with this task.
:param terms:
:param ontology:
:return:
"""
# TODO: make a separate config
examples = {
"go": {
"nucleui": "nucleus",
"mitochondrial": "mitochondrion",
"signaling": "signaling pathway",
"cysteine biosynthesis": "cysteine biosynthetic process",
"alcohol dehydrogenase": "alcohol dehydrogenase activity",
},
"uberon": {
"feet": "pes",
"forelimb, left": "left forelimb",
"hippocampus": "Ammons horn",
},
}
ontology = ontology.lower()
if ontology in examples:
example = examples[ontology]
else:
example = examples["uberon"]
prompt = "Normalize the following semicolon separated\
list of terms to the {ontology.upper()} ontology\n\n"
prompt += "For example:\n\n"
for k, v in example.items():
prompt += f"{k}: {v}\n"
prompt += "===\n\nTerms:"
prompt += "; ".join(terms)
prompt += "===\n\n"
payload = self.client.complete(prompt)
# outer parse
best_results = []
for sep in ["\n", "; "]:
results = payload.split(sep)
if len(results) > len(best_results):
best_results = results
def normalize(s: str) -> str:
s = s.strip()
s.replace("_", " ")
return s.lower()
mappings = {}
for result in best_results:
if ":" not in result:
logging.error(f"Count not parse result: {result}")
continue
k, v = result.strip().split(":", 1)
k = k.strip()
v = v.strip()
for t in terms:
if normalize(t) == normalize(k):
mappings[t] = v
break
for t in terms:
if t not in mappings:
logging.warning(f"Could not map term: {t}")
return mappings
def serialize_object(self, example: EXAMPLE, cls: ClassDefinition = None) -> str:
if cls is None:
cls = self.template_class
if isinstance(example, str):
return example
if isinstance(example, pydantic.BaseModel):
example = example.dict()
lines = []
sv = self.schemaview
for k, v in example.items():
if not v:
continue
slot = sv.induced_slot(k, cls.name)
v_serialized = self._serialize_value(v, slot)
lines.append(f"{k}: {v_serialized}")
return "\n".join(lines)
def _serialize_value(self, val: Any, slot: SlotDefinition) -> str:
if val is None:
return ""
if isinstance(val, list):
return "; ".join([self._serialize_value(v, slot) for v in val if v])
if isinstance(val, dict):
return " - ".join([self._serialize_value(v, slot) for v in val.values() if v])
sv = self.schemaview
if slot.range in sv.all_classes():
if self.labelers:
labelers = list(self.labelers)
else:
labelers = []
labelers += self.get_annotators(sv.get_class(slot.range))
if labelers:
for labeler in labelers:
label = labeler.label(val)
if label:
return label
return val
def _raw_extract(self, text, cls: ClassDefinition = None, object: OBJECT = None) -> str:
"""
Extract annotations from the given text.
:param text:
:return:
"""
prompt = self.get_completion_prompt(cls, text, object=object)
self.last_prompt = prompt
payload = self.client.complete(prompt)
return payload
def get_completion_prompt(
self, cls: ClassDefinition = None, text: str = None, object: OBJECT = None
) -> str:
"""Get the prompt for the given template."""
if cls is None:
cls = self.template_class
if not text or ("\n" in text or len(text) > 60):
prompt = (
"From the text below, extract the following entities in the following format:\n\n"
)
else:
prompt = "Split the following piece of text into fields in the following format:\n\n"
for slot in self.schemaview.class_induced_slots(cls.name):
if ANNOTATION_KEY_PROMPT_SKIP in slot.annotations:
continue
if ANNOTATION_KEY_PROMPT in slot.annotations:
slot_prompt = slot.annotations[ANNOTATION_KEY_PROMPT].value
elif slot.description:
slot_prompt = slot.description
else:
if slot.multivalued:
slot_prompt = f"semicolon-separated list of {slot.name}s"
else:
slot_prompt = f"the value for {slot.name}"
if slot.range in self.schemaview.all_enums():
enum_def = self.schemaview.get_enum(slot.range)
pvs = [str(k) for k in enum_def.permissible_values.keys()]
slot_prompt += f"Must be one of: {', '.join(pvs)}"
prompt += f"{slot.name}: <{slot_prompt}>\n"
# prompt += "Do not answer if you don't know\n\n"
prompt = f"{prompt}\n\nText:\n{text}\n\n===\n\n"
if object:
if cls is None:
cls = self.template_class
if isinstance(object, pydantic.BaseModel):
object = object.dict()
for k, v in object.items():
if v:
slot = self.schemaview.induced_slot(k, cls.name)
prompt += f"{k}: {self._serialize_value(v, slot)}\n"
return prompt
def _parse_response_to_dict(
self, results: str, cls: ClassDefinition = None
) -> Optional[RESPONSE_DICT]:
"""
Parse the pseudo-YAML response from OpenAI into a dictionary object.
E.g.
foo: a; b; c
becomes
{"foo": ["a", "b", "c"]}
:param results:
:return:
"""
lines = results.splitlines()
ann = {}
promptable_slots = self.promptable_slots(cls)
for line in lines:
line = line.strip()
if not line:
continue
if ":" not in line:
if len(promptable_slots) == 1:
slot = promptable_slots[0]
logging.warning(
f"Coercing to YAML-like with key {slot.name}: Original line: {line}"
)
line = f"{slot.name}: {line}"
else:
logging.error(f"Line '{line}' does not contain a colon; ignoring")
return
r = self._parse_line_to_dict(line, cls)
if r is not None:
field, val = r
ann[field] = val
return ann
def _parse_line_to_dict(
self, line: str, cls: ClassDefinition = None
) -> Optional[Tuple[FIELD, RESPONSE_ATOM]]:
if cls is None:
cls = self.template_class
sv = self.schemaview
# each line is a key-value pair
logging.info(f"PARSING LINE: {line}")
field, val = line.split(":", 1)
# Field nornalization:
# The LLML may mutate the output format somewhat,
# randomly pluralizing or replacing spaces with underscores
field = field.lower().replace(" ", "_")
cls_slots = sv.class_slots(cls.name)
slot = None
if field in cls_slots:
slot = sv.induced_slot(field, cls.name)
else:
if field.endswith("s"):
field = field[:-1]
if field in cls_slots:
slot = sv.induced_slot(field, cls.name)
if not slot:
logging.error(f"Cannot find slot for {field} in {line}")
# raise ValueError(f"Cannot find slot for {field} in {line}")
return
if not val:
msg = f"Empty value in key-value line: {line}"
if slot.required:
raise ValueError(msg)
if slot.recommended:
logging.warning(msg)
return
inlined = slot.inlined
slot_range = sv.get_class(slot.range)
if not inlined:
if slot.range in sv.all_classes():
inlined = sv.get_identifier_slot(slot_range.name) is None
val = val.strip()
if slot.multivalued:
vals = [v.strip() for v in val.split(";")]
else:
vals = [val]
vals = [val for val in vals if val]
logging.debug(f"SLOT: {slot.name} INL: {inlined} VALS: {vals}")
if inlined:
transformed = False
slots_of_range = sv.class_slots(slot_range.name)
if self.recurse or len(slots_of_range) > 2:
vals = [self._extract_from_text_to_dict(v, slot_range) for v in vals]
else:
for sep in [" - ", ":", "/", "*", "-"]:
if all([sep in v for v in vals]):
vals = [dict(zip(slots_of_range, v.split(sep, 1))) for v in vals]
for v in vals:
for k in v.keys():
v[k] = v[k].strip()
transformed = True
break
if not transformed:
logging.warning(f"Did not find separator in {vals} for line {line}")
return
# transform back from list to single value if not multivalued
if slot.multivalued:
final_val = vals
else:
if len(vals) != 1:
logging.error(f"Expected 1 value for {slot.name} in '{line}' but got {vals}")
final_val = vals[0]
return field, final_val
def parse_completion_payload(
self, results: str, cls: ClassDefinition = None, object: dict = None
) -> pydantic.BaseModel:
"""
Parse the completion payload into a pydantic class.
:param results:
:param cls:
:param object: stub object
:return:
"""
raw = self._parse_response_to_dict(results, cls)
logging.debug(f"RAW: {raw}")
if object:
raw = {**object, **raw}
return self.ground_annotation_object(raw, cls)
def ground_annotation_object(
self, ann: RESPONSE_DICT, cls: ClassDefinition = None
) -> Optional[pydantic.BaseModel]:
"""Ground the direct parse of the OpenAI payload.
The raw openAI payload is a YAML-like string, which is parsed to
a response dictionary.
This dictionary is then grounded, using this method
:param ann: Raw annotation object
:param cls: schema class the ground object should instantiate
:return: Grounded annotation object
"""
logging.debug(f"Grounding annotation object {ann}")
if cls is None:
cls = self.template_class
sv = self.schemaview
new_ann = {}
if ann is None:
logging.error(f"Cannot ground None annotation, cls={cls.name}")
return
for field, vals in ann.items():
if isinstance(vals, list):
multivalued = True
else:
multivalued = False
vals = [vals]
slot = sv.induced_slot(field, cls.name)
rng_cls = sv.get_class(slot.range)
enum_def = None
if slot.range:
if slot.range in self.schemaview.all_enums():
enum_def = self.schemaview.get_enum(slot.range)
new_ann[field] = []
for val in vals:
if not val:
continue
if isinstance(val, tuple):
# special case for pairs
sub_slots = sv.class_induced_slots(rng_cls.name)
obj = {}
for i in range(0, len(val)):
sub_slot = sub_slots[i]
sub_rng = sv.get_class(sub_slot.range)
if not sub_rng:
logging.error(f"Cannot find range for {sub_slot.name}")
result = self.normalize_named_entity(val[i], sub_slot.range)
obj[sub_slot.name] = result
elif isinstance(val, dict):
# recurse
obj = self.ground_annotation_object(val, rng_cls)
else:
obj = self.normalize_named_entity(val, slot.range)
if enum_def:
found = False
logging.info(f"Looking for {obj} in {enum_def.name}")
for k, _pv in enum_def.permissible_values.items():
if obj.lower() == k.lower():
obj = k
found = True
break
if not found:
logging.info(f"Cannot find enum value for {obj} in {enum_def.name}")
obj = None
if multivalued:
new_ann[field].append(obj)
else:
new_ann[field] = obj
logging.debug(f"Creating object from dict {new_ann}")
logging.info(new_ann)
py_cls = self.template_module.__dict__[cls.name]
return py_cls(**new_ann)
| [
"; ",
"Normalize the following semicolon separated list of terms to the {ontology.upper()} ontology\n\n",
"===\n\nTerms:",
"example:\n",
"PLACEHOLDER\n\nText:\nPLACEHOLDER\n\n===\n\n",
"Split the following piece of text into fields in the following format:\n\n",
"For example:\n\n",
"PLACEHOLDER: PLACEHOLDER\n",
"\n\n===\n\n",
", ",
"From the text below, extract the following entities in the following format:\n\n",
"===\n\n"
] |
2024-01-10 | searchgame/ontogpt | src~ontogpt~engines~ggml_engine.py | """
GGML-based knowledge extractor class.
Like the SPIRES implementation seen in spires_engine.py,
this process constructs prompt-completions in which
a pseudo-YAML structure is requested and the YAML
structure corresponds to a template class.
This class is intended for use with GGML-format models
such as those released by GPT4All (https://gpt4all.io/).
"""
import logging
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import pydantic
from linkml_runtime.linkml_model import ClassDefinition, SlotDefinition
from ontogpt.engines.knowledge_engine import (
ANNOTATION_KEY_PROMPT,
ANNOTATION_KEY_PROMPT_SKIP,
EXAMPLE,
FIELD,
OBJECT,
KnowledgeEngine,
chunk_text,
)
from ontogpt.templates.core import ExtractionResult
from ontogpt.utils.gpt4all_runner import chain_gpt4all_model, set_up_gpt4all_model
this_path = Path(__file__).parent
RESPONSE_ATOM = Union[str, "ResponseAtom"]
RESPONSE_DICT = Dict[FIELD, Union[RESPONSE_ATOM, List[RESPONSE_ATOM]]]
@dataclass
class GGMLEngine(KnowledgeEngine):
"""Knowledge extractor for GGML chat models."""
sentences_per_window: Optional[int] = None
"""If set, this will split the text into chains of sentences,
where this determines the maximum number of sentences per chain.
The results are then merged together."""
recurse: bool = True
"""If true, then complex non-named entity objects are always recursively parsed.
If this is false AND the complex object is a pair, then token-based splitting is
instead used."""
local_model = None
"""Cached local model path."""
loaded_model = None
"""Langchain loaded model object."""
def __post_init__(self, local_model):
self.local_model = local_model
self.loaded_model = set_up_gpt4all_model(self.local_model)
if self.template:
self.template_class = self._get_template_class(self.template)
if self.template_class:
logging.info(f"Using template {self.template_class.name}")
def extract_from_text(
self, text: str, cls: ClassDefinition = None, object: OBJECT = None
) -> ExtractionResult:
"""
Extract annotations from the given text.
:param text:
:param cls:
:param object: optional stub object
:return:
"""
if self.sentences_per_window:
chunks = chunk_text(text, self.sentences_per_window)
extracted_object = None
for chunk in chunks:
raw_text = self._raw_extract(chunk, cls, object=object)
logging.info(f"RAW TEXT: {raw_text}")
next_object = self.parse_completion_payload(raw_text, cls, object=object)
if extracted_object is None:
extracted_object = next_object
else:
for k, v in next_object.items():
if isinstance(v, list):
extracted_object[k] += v
else:
if k not in extracted_object:
extracted_object[k] = v
else:
extracted_object[k] = v
else:
raw_text = self._raw_extract(text, cls, object=object)
logging.info(f"RAW TEXT: {raw_text}")
extracted_object = self.parse_completion_payload(raw_text, cls, object=object)
return ExtractionResult(
input_text=text,
raw_completion_output=raw_text,
prompt=self.last_prompt,
extracted_object=extracted_object,
named_entities=self.named_entities,
)
def _extract_from_text_to_dict(self, text: str, cls: ClassDefinition = None) -> RESPONSE_DICT:
raw_text = self._raw_extract(text, cls)
return self._parse_response_to_dict(raw_text, cls)
def generalize(
self, object: Union[pydantic.BaseModel, dict], examples: List[EXAMPLE]
) -> ExtractionResult:
"""
Generalize the given examples.
:param object:
:param examples:
:return:
"""
cls = self.template_class
sv = self.schemaview
prompt = "example:\n"
for example in examples:
prompt += f"{self.serialize_object(example)}\n\n"
prompt += "\n\n===\n\n"
if isinstance(object, pydantic.BaseModel):
object = object.dict()
for k, v in object.items():
if v:
slot = sv.induced_slot(k, cls.name)
prompt += f"{k}: {self._serialize_value(v, slot)}\n"
logging.debug(f"PROMPT: {prompt}")
payload = self.client.complete(prompt)
prediction = self.parse_completion_payload(payload, object=object)
return ExtractionResult(
input_text=prompt,
raw_completion_output=payload,
# prompt=self.last_prompt,
results=[prediction],
named_entities=self.named_entities,
)
def map_terms(self, terms: List[str], ontology: str) -> Dict[str, List[str]]:
"""
Map the given terms to the given ontology.
EXPERIMENTAL
currently GPT-3 does not do so well with this task.
:param terms:
:param ontology:
:return:
"""
# TODO: make a separate config
examples = {
"go": {
"nucleui": "nucleus",
"mitochondrial": "mitochondrion",
"signaling": "signaling pathway",
"cysteine biosynthesis": "cysteine biosynthetic process",
"alcohol dehydrogenase": "alcohol dehydrogenase activity",
},
"uberon": {
"feet": "pes",
"forelimb, left": "left forelimb",
"hippocampus": "Ammons horn",
},
}
ontology = ontology.lower()
if ontology in examples:
example = examples[ontology]
else:
example = examples["uberon"]
prompt = "Normalize the following semicolon separated\
list of terms to the {ontology.upper()} ontology\n\n"
prompt += "For example:\n\n"
for k, v in example.items():
prompt += f"{k}: {v}\n"
prompt += "===\n\nTerms:"
prompt += "; ".join(terms)
prompt += "===\n\n"
payload = self.client.complete(prompt)
# outer parse
best_results = []
for sep in ["\n", "; "]:
results = payload.split(sep)
if len(results) > len(best_results):
best_results = results
def normalize(s: str) -> str:
s = s.strip()
s.replace("_", " ")
return s.lower()
mappings = {}
for result in best_results:
if ":" not in result:
logging.error(f"Count not parse result: {result}")
continue
k, v = result.strip().split(":", 1)
k = k.strip()
v = v.strip()
for t in terms:
if normalize(t) == normalize(k):
mappings[t] = v
break
for t in terms:
if t not in mappings:
logging.warning(f"Could not map term: {t}")
return mappings
def serialize_object(self, example: EXAMPLE, cls: ClassDefinition = None) -> str:
if cls is None:
cls = self.template_class
if isinstance(example, str):
return example
if isinstance(example, pydantic.BaseModel):
example = example.dict()
lines = []
sv = self.schemaview
for k, v in example.items():
if not v:
continue
slot = sv.induced_slot(k, cls.name)
v_serialized = self._serialize_value(v, slot)
lines.append(f"{k}: {v_serialized}")
return "\n".join(lines)
def _serialize_value(self, val: Any, slot: SlotDefinition) -> str:
if val is None:
return ""
if isinstance(val, list):
return "; ".join([self._serialize_value(v, slot) for v in val if v])
if isinstance(val, dict):
return " - ".join([self._serialize_value(v, slot) for v in val.values() if v])
sv = self.schemaview
if slot.range in sv.all_classes():
if self.labelers:
labelers = list(self.labelers)
else:
labelers = []
labelers += self.get_annotators(sv.get_class(slot.range))
if labelers:
for labeler in labelers:
label = labeler.label(val)
if label:
return label
return val
def _raw_extract(self, text, cls: ClassDefinition = None, object: OBJECT = None) -> str:
"""
Extract annotations from the given text.
:param text:
:return:
"""
prompt = self.get_completion_prompt(cls, text, object=object)
self.last_prompt = prompt
payload = chain_gpt4all_model(self.loaded_model, prompt)
return payload
def get_completion_prompt(
self, cls: ClassDefinition = None, text: str = None, object: OBJECT = None
) -> str:
"""Get the prompt for the given template."""
if cls is None:
cls = self.template_class
if not text or ("\n" in text or len(text) > 60):
prompt = (
"From the text below, extract the following entities in the following format:\n\n"
)
else:
prompt = "Split the following piece of text into fields in the following format:\n\n"
for slot in self.schemaview.class_induced_slots(cls.name):
if ANNOTATION_KEY_PROMPT_SKIP in slot.annotations:
continue
if ANNOTATION_KEY_PROMPT in slot.annotations:
slot_prompt = slot.annotations[ANNOTATION_KEY_PROMPT].value
elif slot.description:
slot_prompt = slot.description
else:
if slot.multivalued:
slot_prompt = f"semicolon-separated list of {slot.name}s"
else:
slot_prompt = f"the value for {slot.name}"
if slot.range in self.schemaview.all_enums():
enum_def = self.schemaview.get_enum(slot.range)
pvs = [str(k) for k in enum_def.permissible_values.keys()]
slot_prompt += f"Must be one of: {', '.join(pvs)}"
prompt += f"{slot.name}: <{slot_prompt}>\n"
# prompt += "Do not answer if you don't know\n\n"
prompt = f"{prompt}\n\nText:\n{text}\n\n===\n\n"
if object:
if cls is None:
cls = self.template_class
if isinstance(object, pydantic.BaseModel):
object = object.dict()
for k, v in object.items():
if v:
slot = self.schemaview.induced_slot(k, cls.name)
prompt += f"{k}: {self._serialize_value(v, slot)}\n"
return prompt
def _parse_response_to_dict(
self, results: str, cls: ClassDefinition = None
) -> Optional[RESPONSE_DICT]:
"""
Parse the pseudo-YAML response from OpenAI into a dictionary object.
E.g.
foo: a; b; c
becomes
{"foo": ["a", "b", "c"]}
:param results:
:return:
"""
lines = results.splitlines()
ann = {}
promptable_slots = self.promptable_slots(cls)
for line in lines:
line = line.strip()
if not line:
continue
if ":" not in line:
if len(promptable_slots) == 1:
slot = promptable_slots[0]
logging.warning(
f"Coercing to YAML-like with key {slot.name}: Original line: {line}"
)
line = f"{slot.name}: {line}"
else:
logging.error(f"Line '{line}' does not contain a colon; ignoring")
return
r = self._parse_line_to_dict(line, cls)
if r is not None:
field, val = r
ann[field] = val
return ann
def _parse_line_to_dict(
self, line: str, cls: ClassDefinition = None
) -> Optional[Tuple[FIELD, RESPONSE_ATOM]]:
if cls is None:
cls = self.template_class
sv = self.schemaview
# each line is a key-value pair
logging.info(f"PARSING LINE: {line}")
field, val = line.split(":", 1)
# Field nornalization:
# The LLML may mutate the output format somewhat,
# randomly pluralizing or replacing spaces with underscores
field = field.lower().replace(" ", "_")
cls_slots = sv.class_slots(cls.name)
slot = None
if field in cls_slots:
slot = sv.induced_slot(field, cls.name)
else:
if field.endswith("s"):
field = field[:-1]
if field in cls_slots:
slot = sv.induced_slot(field, cls.name)
if not slot:
logging.error(f"Cannot find slot for {field} in {line}")
# raise ValueError(f"Cannot find slot for {field} in {line}")
return
if not val:
msg = f"Empty value in key-value line: {line}"
if slot.required:
raise ValueError(msg)
if slot.recommended:
logging.warning(msg)
return
inlined = slot.inlined
slot_range = sv.get_class(slot.range)
if not inlined:
if slot.range in sv.all_classes():
inlined = sv.get_identifier_slot(slot_range.name) is None
val = val.strip()
if slot.multivalued:
vals = [v.strip() for v in val.split(";")]
else:
vals = [val]
vals = [val for val in vals if val]
logging.debug(f"SLOT: {slot.name} INL: {inlined} VALS: {vals}")
if inlined:
transformed = False
slots_of_range = sv.class_slots(slot_range.name)
if self.recurse or len(slots_of_range) > 2:
vals = [self._extract_from_text_to_dict(v, slot_range) for v in vals]
else:
for sep in [" - ", ":", "/", "*", "-"]:
if all([sep in v for v in vals]):
vals = [dict(zip(slots_of_range, v.split(sep, 1))) for v in vals]
for v in vals:
for k in v.keys():
v[k] = v[k].strip()
transformed = True
break
if not transformed:
logging.warning(f"Did not find separator in {vals} for line {line}")
return
# transform back from list to single value if not multivalued
if slot.multivalued:
final_val = vals
else:
if len(vals) != 1:
logging.error(f"Expected 1 value for {slot.name} in '{line}' but got {vals}")
final_val = vals[0]
return field, final_val
def parse_completion_payload(
self, results: str, cls: ClassDefinition = None, object: dict = None
) -> pydantic.BaseModel:
"""
Parse the completion payload into a pydantic class.
:param results:
:param cls:
:param object: stub object
:return:
"""
raw = self._parse_response_to_dict(results, cls)
logging.debug(f"RAW: {raw}")
if object:
raw = {**object, **raw}
return self.ground_annotation_object(raw, cls)
def ground_annotation_object(
self, ann: RESPONSE_DICT, cls: ClassDefinition = None
) -> Optional[pydantic.BaseModel]:
"""Ground the direct parse of the OpenAI payload.
The raw openAI payload is a YAML-like string, which is parsed to
a response dictionary.
This dictionary is then grounded, using this method
:param ann: Raw annotation object
:param cls: schema class the ground object should instantiate
:return: Grounded annotation object
"""
logging.debug(f"Grounding annotation object {ann}")
if cls is None:
cls = self.template_class
sv = self.schemaview
new_ann = {}
if ann is None:
logging.error(f"Cannot ground None annotation, cls={cls.name}")
return
for field, vals in ann.items():
if isinstance(vals, list):
multivalued = True
else:
multivalued = False
vals = [vals]
slot = sv.induced_slot(field, cls.name)
rng_cls = sv.get_class(slot.range)
enum_def = None
if slot.range:
if slot.range in self.schemaview.all_enums():
enum_def = self.schemaview.get_enum(slot.range)
new_ann[field] = []
for val in vals:
if not val:
continue
if isinstance(val, tuple):
# special case for pairs
sub_slots = sv.class_induced_slots(rng_cls.name)
obj = {}
for i in range(0, len(val)):
sub_slot = sub_slots[i]
sub_rng = sv.get_class(sub_slot.range)
if not sub_rng:
logging.error(f"Cannot find range for {sub_slot.name}")
result = self.normalize_named_entity(val[i], sub_slot.range)
obj[sub_slot.name] = result
elif isinstance(val, dict):
# recurse
obj = self.ground_annotation_object(val, rng_cls)
else:
obj = self.normalize_named_entity(val, slot.range)
if enum_def:
found = False
logging.info(f"Looking for {obj} in {enum_def.name}")
for k, _pv in enum_def.permissible_values.items():
if obj.lower() == k.lower():
obj = k
found = True
break
if not found:
logging.info(f"Cannot find enum value for {obj} in {enum_def.name}")
obj = None
if multivalued:
new_ann[field].append(obj)
else:
new_ann[field] = obj
logging.debug(f"Creating object from dict {new_ann}")
logging.info(new_ann)
py_cls = self.template_module.__dict__[cls.name]
return py_cls(**new_ann)
| [
"; ",
"===\n\n",
"Normalize the following semicolon separated list of terms to the {ontology.upper()} ontology\n\n",
"===\n\nTerms:",
"example:\n",
"PLACEHOLDER\n\nText:\nPLACEHOLDER\n\n===\n\n",
"For example:\n\n",
"PLACEHOLDER: PLACEHOLDER\n",
"\n\n===\n\n",
", ",
"From the text below, extract the following entities in the following format:\n\n",
"Split the following piece of text into fields in the following format:\n\n"
] |
2024-01-10 | searchgame/ontogpt | src~ontogpt~engines~halo_engine.py | """
Uses code-davinci-002.
Note also that fine-tuning can't be done with code-davinci-002, see:
https://community.openai.com/t/finetuning-code-davinci/23132/2
"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Dict, List, Optional, Set
import openai
import pydantic
import tiktoken
import yaml
from linkml.utils.schema_fixer import uncamel
from linkml_runtime.utils.formatutils import camelcase
from oaklib.datamodels.obograph import Graph
from oaklib.datamodels.vocabulary import IS_A
from oaklib.interfaces.obograph_interface import OboGraphInterface
from tiktoken import Encoding
from ontogpt.clients import OpenAIClient
from ontogpt.engines.knowledge_engine import FIELD, KnowledgeEngine
from ontogpt.io.yaml_wrapper import dump_minimal_yaml
from ontogpt.templates.halo import Ontology, OntologyElement
this_path = Path(__file__).parent
logger = logging.getLogger(__name__)
ELEMENT_NAME = str
INSTRUCTIONS = """
## Instructions:
## Add an additional element to the YAML below, which is for elements
## in an industrial ontology. Complete as far as possible the following
## fields:
"""
class StructuredPrompt(pydantic.BaseModel):
header: str = None
body: str = None
main_prompt: str = None
@property
def text(self) -> str:
return f"{self.header}\n{self.body}\n{self.main_prompt}"
@dataclass
class HALOEngine(KnowledgeEngine):
"""Engine for Hallucinating Latent Ontologies."""
engine: str = "code-davinci-002"
ontology: Ontology = None
traverse_slots: List[FIELD] = field(
default_factory=lambda: ["subtypes", "parts", "subclass_of", "part_of"]
)
fixed_slot_values: Dict[str, str] = None
adapter: OboGraphInterface = None
visited: Set[ELEMENT_NAME] = field(default_factory=lambda: set())
candidates: List[ELEMENT_NAME] = None
always_extend: bool = False
expand_horizon: bool = False
element_scores: Dict[ELEMENT_NAME, float] = field(default_factory=lambda: {})
"""Ranks each element by estimated informativeness for training."""
tokenizer_encoding: Encoding = field(default_factory=lambda: tiktoken.get_encoding("gpt2"))
def __post_init__(self):
self.template_class = self._get_template_class("halo.OntologyElement")
self.client = OpenAIClient(model=self.engine)
self.api_key = self._get_openai_api_key()
openai.api_key = self.api_key
def seed(self, seed_ontology: Ontology):
"""Seed the engine with an initial ontology.
:param seed_ontology:
:return:
"""
self.ontology = seed_ontology
if not self.expand_horizon:
self.visited = {x.name for x in self.ontology.elements}
def seed_from_file(self, file_path: str) -> Ontology:
"""Seed the engine with an initial ontology from a file.
:param file_path:
:return:
"""
ontology = Ontology(**yaml.safe_load(open(file_path)))
self.seed(ontology)
logger.info(f"Seeded with {len(ontology.elements)} elements")
return ontology
def hallucinate(
self, seed_elements: List[ELEMENT_NAME] = None, num_iterations=10
) -> List[OntologyElement]:
"""Run the HALO engine for a given number of iterations.
Each iteration will expand the initial seed ontology.
:param num_iterations:
:return:
"""
added = []
if seed_elements:
for e in seed_elements:
added.append(self.hallucinate_element(e))
for i in range(num_iterations):
logger.info(f"Running HALO iteration {i}")
elt = self.hallucinate_once()
if elt:
added.append(elt)
else:
break
return added
def hallucinate_once(self) -> Optional[OntologyElement]:
"""Run the HALO engine once.
Finds a candidate element to expand,
then runs HALO on that element by generating
a prompt for it.
:return:
"""
candidate_elements = self.get_candidate_elements()
logger.info(f"Found {len(candidate_elements)} candidate elements")
logger.info(f"Candidate elements: {candidate_elements} // visited={self.visited}")
if not candidate_elements:
return None
element = candidate_elements[0]
logger.info(f"Selected element {element}")
return self.hallucinate_element(element)
def get_candidate_elements(self) -> List[ELEMENT_NAME]:
"""Get candidate elements to expand.
Has side effect of removing candidate cache if visited
:return:
"""
if self.candidates is None:
# TODO: exclude seed set that is not on the horizon
self.candidates = [x.name for x in self.ontology.elements]
self.candidates = [c for c in self.candidates if c not in self.visited]
return self.candidates
def extend_candidates(self, elements: List[ELEMENT_NAME]) -> List[ELEMENT_NAME]:
"""Extend candidates by all entities in the signature of the specified elements.
:return:
"""
visited = self.visited
if not self.traverse_slots:
raise ValueError("No slots to traverse")
for element_name in elements:
visited.add(element_name)
logger.info(f"Extending candidates for {element_name}; visited={visited}")
element = self.get_element(element_name)
for slot_name in self.traverse_slots:
refs = getattr(element, slot_name)
for ref in refs:
if ref not in visited:
if not self.candidates:
self.candidates = []
self.candidates.append(ref)
logger.info(f" -- Added {ref} to candidates")
return self.candidates
def get_element(self, element_name: ELEMENT_NAME) -> Optional[OntologyElement]:
"""Get an element by name.
:param element_name:
:return:
"""
for e in self.ontology.elements:
if e.name == element_name:
return e
def old_get_candidate_elements(self) -> List[ELEMENT_NAME]:
"""Get candidate elements for HALO.
:return:
"""
candidate_elements = set()
visited = self.visited
if not self.traverse_slots:
raise ValueError("No slots to traverse")
for element in self.ontology.elements:
all_slots_have_refs = True
for slot_name in self.traverse_slots:
refs = getattr(element, slot_name)
if len(refs) == 0 and element.name not in visited:
candidate_elements.add(element.name)
all_slots_have_refs = False
for ref in refs:
if ref not in visited:
candidate_elements.add(ref)
visited.add(ref)
if all_slots_have_refs and not self.always_extend:
visited.add(element.name)
return list(candidate_elements)
def hallucinate_element(self, element: ELEMENT_NAME) -> OntologyElement:
"""Generate an ontology element based on its name.
:param element: example: LeftDigit1OfHand
:return:
"""
logger.info(f"Hallucinating element {element}")
example_elements = self.get_example_elements(element)
example_element_names = [x.name for x in example_elements]
logger.info(f"Found {len(example_elements)} example elements: {example_element_names}")
# TODO: set bound dynamically
prompt = self.generate_prompt(element, example_elements[0:10])
# logger.info(f"Generated prompt: {prompt}")
payload = self.client.complete(prompt.text)
objs = self.integrate_payload(prompt, payload)
logger.info(f"Integrated {len(objs)} objects")
obj = objs[0]
self.extend_candidates([obj.name])
return obj
def get_example_elements(self, element: ELEMENT_NAME) -> List[OntologyElement]:
"""Get example elements for HALO.
:param element:
:return:
"""
name = uncamel(element)
toks = set(self.tokenizer_encoding.encode(name))
score_element_pairs = [(self.get_element_score(e, toks), e) for e in self.ontology.elements]
score_element_pairs.sort(key=lambda x: x[0], reverse=True)
logger.info(f"Scores[{element}: {[x for x, _ in score_element_pairs]}")
logger.info(f"Sorted elements: {score_element_pairs}")
return [x for _, x in score_element_pairs]
def get_element_score(self, element: OntologyElement, tokens: Set[int]) -> float:
"""Calculate a score for an element based on how informative it is for few-shot learning.
:param element:
:param tokens: tokenized form of the element name
:return:
"""
element_name = element.name
if element_name in self.element_scores:
score = self.element_scores[element_name]
else:
score = 0
for _, v in element.dict().items():
if v:
score += 1
self.element_scores[element_name] = score
element_tokens = set(self.tokenizer_encoding.encode(element_name))
jaccard = len(tokens.intersection(element_tokens)) / len(tokens.union(element_tokens))
return score / 100 + jaccard
def generate_prompt(
self, seed_element: ELEMENT_NAME, example_elements: List[OntologyElement]
) -> StructuredPrompt:
"""Generate a prompt for HALO.
:param seed_element:
:param example_elements:
:return:
"""
prompt = StructuredPrompt()
prompt.header = INSTRUCTIONS
for slot in self.schemaview.class_induced_slots("OntologyElement"):
desc = slot.description
if not desc:
logger.warning(f"No description for slot {slot.name}")
desc = ""
prompt.header += f"## {slot.name}: {desc}\n"
prompt.body = "\n## Examples:\n"
for element in example_elements:
prompt.body += dump_minimal_yaml(element)
stub_object = {
"name": seed_element,
}
for k, v in self.fixed_slot_values.items():
stub_object[k] = v
prompt.main_prompt = yaml.dump([stub_object])
logger.info(
f"Generated prompt: {len(prompt.text)} = {len(prompt.header)} +\
{len(prompt.body)} + {len(prompt.main_prompt)}"
)
return prompt
def integrate_payload(
self, prompt: StructuredPrompt, payload: Dict[str, Any]
) -> List[OntologyElement]:
"""Integrate the payload from HALO into the ontology.
:param payload:
:param element:
:return:
"""
effective_payload = prompt.main_prompt + payload
# logger.info(f"## EFFECTIVE: {effective_payload}")
try:
objs = yaml.safe_load(effective_payload)
except:
# codex does not give reliable YAML
objs = self.parse_what_you_can(effective_payload)
logger.info(f"## PARSED: {len(objs)}")
elt = self.integrate_object(objs[0])
logger.info(f" * INTEGRATED: {elt}")
return [elt]
def integrate_object(self, obj: Dict[str, Any], strict=True) -> Optional[OntologyElement]:
obj = self.repair_dict(obj)
try:
elt = OntologyElement(**obj)
except pydantic.ValidationError as e:
logger.warning(f"## COULD NOT PARSE: {obj} /// {e}")
if strict:
raise e
return None
self.add_element(elt)
return elt
def repair_dict(self, obj: dict) -> dict:
slots = self.schemaview.class_slots("OntologyElement")
nu_obj = {}
for k, v in obj.items():
if k not in slots:
logger.warning(f"Could not find slot {k} in slots")
continue
slot = self.schemaview.induced_slot(k, "OntologyElement")
if slot.multivalued and not isinstance(v, list):
logger.warning(f"Coercing {v} to list")
v = [v]
elif not slot.multivalued and isinstance(v, list):
logger.warning(f"Coercing {v} len {len(v)} to single value")
v = v[0]
nu_obj[k] = v
return nu_obj
def old_integrate_payload(self, prompt: StructuredPrompt, payload: Dict[str, Any]):
"""Integrate the payload from HALO into the ontology.
:param payload:
:param element:
:return:
"""
allowed_slots = self.schemaview.class_slots("OntologyElement")
effective_payload = prompt.main_prompt + payload
# logger.info(f"## EFFECTIVE: {effective_payload}")
try:
objs = yaml.safe_load(effective_payload)
except:
# codex does not give reliable YAML
objs = self.parse_what_you_can(effective_payload)
logger.info(f"## PARSED: {len(objs)}")
added = []
n = 0
for obj in objs:
n += 1
slots_populated = [k for k, v in obj.items() if v]
diff = set(slots_populated).difference(allowed_slots)
if diff:
logger.info(f"## SKIPPING SLOTS {diff}")
obj = {k: v for k, v in obj.items() if k in allowed_slots}
try:
elt = OntologyElement(**obj)
except pydantic.ValidationError as e:
logger.info(f"## COULD NOT PARSE: {obj} /// {e}")
return added
logger.info(f"Elt: {elt.name} // {slots_populated} // {obj}")
if self.add_element(elt):
logger.info(f" - Added {elt.name}")
added.append(elt)
else:
logger.info(f" - already got {elt.name}")
if n == 1:
logger.error(f"Failed to add first element {elt.name}")
logger.info(f"Added {len(added)} elements")
return added
def parse_what_you_can(self, yaml_str: str) -> List[Dict[str, Any]]:
"""Parse as much of the YAML as possible.
:param yaml_str:
:return:
"""
objs = None
chunk = ""
for line in yaml_str.split("\n"):
chunk += line + "\n"
try:
objs = yaml.safe_load(chunk)
except:
pass
if objs is None:
raise ValueError(f"Could not parse YAML {yaml_str}")
return objs
def add_element(self, element: OntologyElement) -> bool:
"""Add an element to the ontology.
:param obj:
:return:
"""
existing = self.get_element(element.name)
if existing:
return False
self.ontology.elements.append(element)
return True
def xxextract_seed_ontology(self, seeds: List[str], predicates: List[str]) -> Ontology:
"""Extract an ontology from a given text.
:param text:
:return:
"""
ancestors = list(set(list(self.adapter.ancestors(seeds, predicates, reflexive=True))))
seed_graph = self.adapter.extract_graph(ancestors, predicates, dangling=False)
logger.info(len(seed_graph.nodes))
seed_ontology = self.ontology_from_obograph(seed_graph)
return seed_ontology
def xxontology_from_obograph(self, graph: Graph) -> Ontology:
"""Convert an OBO Graph to an Ontology.
:param graph:
:return:
"""
adapter = self.adapter
ontology = Ontology()
element_index = {}
node_to_element_name = {}
id2slot = {}
inverses = {}
for slot in self.schemaview.class_induced_slots(OntologyElement.__name__):
if slot.inverse:
inverses[slot.name] = slot.inverse
inverses[slot.inverse] = slot.name
if slot.slot_uri:
id2slot[slot.slot_uri] = slot
logger.info(list(id2slot.keys()))
logger.info(inverses)
for node in graph.nodes:
meta = node.meta
if not node.lbl:
continue
if not meta:
# logger.warning(f"Node {node.id} has no meta")
continue
element = OntologyElement(
name=self.node_to_name(node.id, node.lbl),
synonyms=[synonym.val for synonym in meta.synonyms],
description=meta.definition.val if meta.definition else None,
)
for k, v in self.fixed_slot_values.items():
setattr(element, k, v)
element_index[element.name] = element
node_to_element_name[node.id] = element.name
for edge in graph.edges:
if edge.pred == "is_a":
pred = IS_A
else:
try:
pred = adapter.uri_to_curie(edge.pred)
except:
pred = edge.pred
if pred not in id2slot:
continue
if edge.sub not in node_to_element_name:
continue
if edge.obj not in node_to_element_name:
continue
subject = node_to_element_name[edge.sub]
object = node_to_element_name[edge.obj]
slot = id2slot[pred]
getattr(element_index[subject], slot.name).append(object)
if slot.name in inverses:
inverse = inverses[slot.name]
getattr(element_index[object], inverse).append(subject)
for ldef in adapter.logical_definitions([node.id for node in graph.nodes]):
if ldef.definedClassId in node_to_element_name:
element = element_index[node_to_element_name[ldef.definedClassId]]
if not ldef.genusIds:
continue
if not ldef.restrictions:
continue
genus_elts = [node_to_element_name[g] for g in ldef.genusIds]
differentia = [
f"{adapter.label(r.propertyId)} some {self.node_to_name(r.fillerId)}"
for r in ldef.restrictions
]
element.equivalent_to = (
f"{' and '.join(genus_elts)} and {' and '.join(differentia)}"
)
logger.info(f"Equiv[{element.name}] = {element.equivalent_to}")
for element in element_index.values():
ontology.elements.append(element)
return ontology
def xxnode_to_name(self, curie: str, label: Optional[str] = None) -> str:
"""Convert a node to a name.
:param curie:
:param label:
:return:
"""
if label is None:
label = self.adapter.label(curie)
if label is None:
logger.warning(f"Node {curie} has no label")
label = curie
return camelcase(label)
| [
"None"
] |
2024-01-10 | searchgame/ontogpt | src~ontogpt~engines~hfhub_engine.py | """
HuggingFace Hub-based knowledge extractor class.
Like the SPIRES implementation seen in spires_engine.py,
this process constructs prompt-completions in which
a pseudo-YAML structure is requested and the YAML
structure corresponds to a template class.
This class is intended for use with HuggingFace Hub
models, specifically text-generation models.
Find them here:
https://huggingface.co/models?pipeline_tag=text-generation
"""
import logging
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import pydantic
from linkml_runtime.linkml_model import ClassDefinition, SlotDefinition
from ontogpt.engines.knowledge_engine import (
ANNOTATION_KEY_PROMPT,
ANNOTATION_KEY_PROMPT_SKIP,
EXAMPLE,
FIELD,
OBJECT,
KnowledgeEngine,
chunk_text,
)
from ontogpt.templates.core import ExtractionResult
from ontogpt.clients.hfhub_client import HFHubClient
this_path = Path(__file__).parent
RESPONSE_ATOM = Union[str, "ResponseAtom"]
RESPONSE_DICT = Dict[FIELD, Union[RESPONSE_ATOM, List[RESPONSE_ATOM]]]
@dataclass
class HFHubEngine(KnowledgeEngine):
"""Knowledge extractor for HuggingFace Hub models."""
sentences_per_window: Optional[int] = None
"""If set, this will split the text into chains of sentences,
where this determines the maximum number of sentences per chain.
The results are then merged together."""
recurse: bool = True
"""If true, then complex non-named entity objects are always recursively parsed.
If this is false AND the complex object is a pair, then token-based splitting is
instead used."""
api_client = HFHubClient()
"""API client for HF Hub."""
local_model = None
"""The name of the HF Hub model as per its repo, e.g., gpt2 or databricks/dolly-v2-3b"""
loaded_model = None
"""Langchain loaded model object."""
def __post_init__(self, local_model):
self.local_model = local_model
logging.info(f"Using HuggingFace model {self.local_model}")
self.loaded_model = self.api_client.get_model(self.local_model)
if self.template:
self.template_class = self._get_template_class(self.template)
if self.template_class:
logging.info(f"Using template {self.template_class.name}")
def extract_from_text(
self, text: str, cls: ClassDefinition = None, object: OBJECT = None
) -> ExtractionResult:
"""
Extract annotations from the given text.
:param text:
:param cls:
:param object: optional stub object
:return:
"""
if self.sentences_per_window:
chunks = chunk_text(text, self.sentences_per_window)
extracted_object = None
for chunk in chunks:
raw_text = self._raw_extract(chunk, cls, object=object)
logging.info(f"RAW TEXT: {raw_text}")
next_object = self.parse_completion_payload(raw_text, cls, object=object)
if extracted_object is None:
extracted_object = next_object
else:
for k, v in next_object.items():
if isinstance(v, list):
extracted_object[k] += v
else:
if k not in extracted_object:
extracted_object[k] = v
else:
extracted_object[k] = v
else:
raw_text = self._raw_extract(text, cls, object=object)
logging.info(f"RAW TEXT: {raw_text}")
extracted_object = self.parse_completion_payload(raw_text, cls, object=object)
return ExtractionResult(
input_text=text,
raw_completion_output=raw_text,
prompt=self.last_prompt,
extracted_object=extracted_object,
named_entities=self.named_entities,
)
def _extract_from_text_to_dict(self, text: str, cls: ClassDefinition = None) -> RESPONSE_DICT:
raw_text = self._raw_extract(text, cls)
return self._parse_response_to_dict(raw_text, cls)
def generalize(
self, object: Union[pydantic.BaseModel, dict], examples: List[EXAMPLE]
) -> ExtractionResult:
"""
Generalize the given examples.
:param object:
:param examples:
:return:
"""
cls = self.template_class
sv = self.schemaview
prompt = "example:\n"
for example in examples:
prompt += f"{self.serialize_object(example)}\n\n"
prompt += "\n\n===\n\n"
if isinstance(object, pydantic.BaseModel):
object = object.dict()
for k, v in object.items():
if v:
slot = sv.induced_slot(k, cls.name)
prompt += f"{k}: {self._serialize_value(v, slot)}\n"
logging.debug(f"PROMPT: {prompt}")
payload = self.client.complete(prompt)
prediction = self.parse_completion_payload(payload, object=object)
return ExtractionResult(
input_text=prompt,
raw_completion_output=payload,
# prompt=self.last_prompt,
results=[prediction],
named_entities=self.named_entities,
)
def map_terms(self, terms: List[str], ontology: str) -> Dict[str, List[str]]:
"""
Map the given terms to the given ontology.
:param terms:
:param ontology:
:return:
"""
# TODO: make a separate config
examples = {
"go": {
"nucleui": "nucleus",
"mitochondrial": "mitochondrion",
"signaling": "signaling pathway",
"cysteine biosynthesis": "cysteine biosynthetic process",
"alcohol dehydrogenase": "alcohol dehydrogenase activity",
},
"uberon": {
"feet": "pes",
"forelimb, left": "left forelimb",
"hippocampus": "Ammons horn",
},
}
ontology = ontology.lower()
if ontology in examples:
example = examples[ontology]
else:
example = examples["uberon"]
prompt = "Normalize the following semicolon separated\
list of terms to the {ontology.upper()} ontology\n\n"
prompt += "For example:\n\n"
for k, v in example.items():
prompt += f"{k}: {v}\n"
prompt += "===\n\nTerms:"
prompt += "; ".join(terms)
prompt += "===\n\n"
payload = self.client.complete(prompt)
# outer parse
best_results = []
for sep in ["\n", "; "]:
results = payload.split(sep)
if len(results) > len(best_results):
best_results = results
def normalize(s: str) -> str:
s = s.strip()
s.replace("_", " ")
return s.lower()
mappings = {}
for result in best_results:
if ":" not in result:
logging.error(f"Count not parse result: {result}")
continue
k, v = result.strip().split(":", 1)
k = k.strip()
v = v.strip()
for t in terms:
if normalize(t) == normalize(k):
mappings[t] = v
break
for t in terms:
if t not in mappings:
logging.warning(f"Could not map term: {t}")
return mappings
def serialize_object(self, example: EXAMPLE, cls: ClassDefinition = None) -> str:
if cls is None:
cls = self.template_class
if isinstance(example, str):
return example
if isinstance(example, pydantic.BaseModel):
example = example.dict()
lines = []
sv = self.schemaview
for k, v in example.items():
if not v:
continue
slot = sv.induced_slot(k, cls.name)
v_serialized = self._serialize_value(v, slot)
lines.append(f"{k}: {v_serialized}")
return "\n".join(lines)
def _serialize_value(self, val: Any, slot: SlotDefinition) -> str:
if val is None:
return ""
if isinstance(val, list):
return "; ".join([self._serialize_value(v, slot) for v in val if v])
if isinstance(val, dict):
return " - ".join([self._serialize_value(v, slot) for v in val.values() if v])
sv = self.schemaview
if slot.range in sv.all_classes():
if self.labelers:
labelers = list(self.labelers)
else:
labelers = []
labelers += self.get_annotators(sv.get_class(slot.range))
if labelers:
for labeler in labelers:
label = labeler.label(val)
if label:
return label
return val
def _raw_extract(self, text, cls: ClassDefinition = None, object: OBJECT = None) -> str:
"""
Extract annotations from the given text.
:param text:
:return:
"""
prompt = self.get_completion_prompt(cls, text, object=object)
self.last_prompt = prompt
payload = self.api_client.query_hf_model(self.loaded_model, prompt)
return payload
def get_completion_prompt(
self, cls: ClassDefinition = None, text: str = None, object: OBJECT = None
) -> str:
"""Get the prompt for the given template."""
if cls is None:
cls = self.template_class
if not text or ("\n" in text or len(text) > 60):
prompt = (
"From the text below, extract the following entities in the following format:\n\n"
)
else:
prompt = "Split the following piece of text into fields in the following format:\n\n"
for slot in self.schemaview.class_induced_slots(cls.name):
if ANNOTATION_KEY_PROMPT_SKIP in slot.annotations:
continue
if ANNOTATION_KEY_PROMPT in slot.annotations:
slot_prompt = slot.annotations[ANNOTATION_KEY_PROMPT].value
elif slot.description:
slot_prompt = slot.description
else:
if slot.multivalued:
slot_prompt = f"semicolon-separated list of {slot.name}s"
else:
slot_prompt = f"the value for {slot.name}"
if slot.range in self.schemaview.all_enums():
enum_def = self.schemaview.get_enum(slot.range)
pvs = [str(k) for k in enum_def.permissible_values.keys()]
slot_prompt += f"Must be one of: {', '.join(pvs)}"
prompt += f"{slot.name}: <{slot_prompt}>\n"
# prompt += "Do not answer if you don't know\n\n"
prompt = f"{prompt}\n\nText:\n{text}\n\n===\n\n"
if object:
if cls is None:
cls = self.template_class
if isinstance(object, pydantic.BaseModel):
object = object.dict()
for k, v in object.items():
if v:
slot = self.schemaview.induced_slot(k, cls.name)
prompt += f"{k}: {self._serialize_value(v, slot)}\n"
return prompt
def _parse_response_to_dict(
self, results: str, cls: ClassDefinition = None
) -> Optional[RESPONSE_DICT]:
"""
Parse the pseudo-YAML response from OpenAI into a dictionary object.
E.g.
foo: a; b; c
becomes
{"foo": ["a", "b", "c"]}
:param results:
:return:
"""
lines = results.splitlines()
ann = {}
promptable_slots = self.promptable_slots(cls)
for line in lines:
line = line.strip()
if not line:
continue
if ":" not in line:
if len(promptable_slots) == 1:
slot = promptable_slots[0]
logging.warning(
f"Coercing to YAML-like with key {slot.name}: Original line: {line}"
)
line = f"{slot.name}: {line}"
else:
logging.error(f"Line '{line}' does not contain a colon; ignoring")
return
r = self._parse_line_to_dict(line, cls)
if r is not None:
field, val = r
ann[field] = val
return ann
def _parse_line_to_dict(
self, line: str, cls: ClassDefinition = None
) -> Optional[Tuple[FIELD, RESPONSE_ATOM]]:
if cls is None:
cls = self.template_class
sv = self.schemaview
# each line is a key-value pair
logging.info(f"PARSING LINE: {line}")
field, val = line.split(":", 1)
# Field nornalization:
# The LLML may mutate the output format somewhat,
# randomly pluralizing or replacing spaces with underscores
field = field.lower().replace(" ", "_")
cls_slots = sv.class_slots(cls.name)
slot = None
if field in cls_slots:
slot = sv.induced_slot(field, cls.name)
else:
if field.endswith("s"):
field = field[:-1]
if field in cls_slots:
slot = sv.induced_slot(field, cls.name)
if not slot:
logging.error(f"Cannot find slot for {field} in {line}")
# raise ValueError(f"Cannot find slot for {field} in {line}")
return
if not val:
msg = f"Empty value in key-value line: {line}"
if slot.required:
raise ValueError(msg)
if slot.recommended:
logging.warning(msg)
return
inlined = slot.inlined
slot_range = sv.get_class(slot.range)
if not inlined:
if slot.range in sv.all_classes():
inlined = sv.get_identifier_slot(slot_range.name) is None
val = val.strip()
if slot.multivalued:
vals = [v.strip() for v in val.split(";")]
else:
vals = [val]
vals = [val for val in vals if val]
logging.debug(f"SLOT: {slot.name} INL: {inlined} VALS: {vals}")
if inlined:
transformed = False
slots_of_range = sv.class_slots(slot_range.name)
if self.recurse or len(slots_of_range) > 2:
vals = [self._extract_from_text_to_dict(v, slot_range) for v in vals]
else:
for sep in [" - ", ":", "/", "*", "-"]:
if all([sep in v for v in vals]):
vals = [dict(zip(slots_of_range, v.split(sep, 1))) for v in vals]
for v in vals:
for k in v.keys():
v[k] = v[k].strip()
transformed = True
break
if not transformed:
logging.warning(f"Did not find separator in {vals} for line {line}")
return
# transform back from list to single value if not multivalued
if slot.multivalued:
final_val = vals
else:
if len(vals) != 1:
logging.error(f"Expected 1 value for {slot.name} in '{line}' but got {vals}")
final_val = vals[0]
return field, final_val
def parse_completion_payload(
self, results: str, cls: ClassDefinition = None, object: dict = None
) -> pydantic.BaseModel:
"""
Parse the completion payload into a pydantic class.
:param results:
:param cls:
:param object: stub object
:return:
"""
raw = self._parse_response_to_dict(results, cls)
logging.debug(f"RAW: {raw}")
if object:
raw = {**object, **raw}
return self.ground_annotation_object(raw, cls)
def ground_annotation_object(
self, ann: RESPONSE_DICT, cls: ClassDefinition = None
) -> Optional[pydantic.BaseModel]:
"""Ground the direct parse of the OpenAI payload.
The raw openAI payload is a YAML-like string, which is parsed to
a response dictionary.
This dictionary is then grounded, using this method
:param ann: Raw annotation object
:param cls: schema class the ground object should instantiate
:return: Grounded annotation object
"""
logging.debug(f"Grounding annotation object {ann}")
if cls is None:
cls = self.template_class
sv = self.schemaview
new_ann = {}
if ann is None:
logging.error(f"Cannot ground None annotation, cls={cls.name}")
return
for field, vals in ann.items():
if isinstance(vals, list):
multivalued = True
else:
multivalued = False
vals = [vals]
slot = sv.induced_slot(field, cls.name)
rng_cls = sv.get_class(slot.range)
enum_def = None
if slot.range:
if slot.range in self.schemaview.all_enums():
enum_def = self.schemaview.get_enum(slot.range)
new_ann[field] = []
for val in vals:
if not val:
continue
if isinstance(val, tuple):
# special case for pairs
sub_slots = sv.class_induced_slots(rng_cls.name)
obj = {}
for i in range(0, len(val)):
sub_slot = sub_slots[i]
sub_rng = sv.get_class(sub_slot.range)
if not sub_rng:
logging.error(f"Cannot find range for {sub_slot.name}")
result = self.normalize_named_entity(val[i], sub_slot.range)
obj[sub_slot.name] = result
elif isinstance(val, dict):
# recurse
obj = self.ground_annotation_object(val, rng_cls)
else:
obj = self.normalize_named_entity(val, slot.range)
if enum_def:
found = False
logging.info(f"Looking for {obj} in {enum_def.name}")
for k, _pv in enum_def.permissible_values.items():
if obj.lower() == k.lower():
obj = k
found = True
break
if not found:
logging.info(f"Cannot find enum value for {obj} in {enum_def.name}")
obj = None
if multivalued:
new_ann[field].append(obj)
else:
new_ann[field] = obj
logging.debug(f"Creating object from dict {new_ann}")
logging.info(new_ann)
py_cls = self.template_module.__dict__[cls.name]
return py_cls(**new_ann)
| [
"Normalize the following semicolon separated list of terms to the {ontology.upper()} ontology\n\n",
"===\n\n",
"; ",
"===\n\nTerms:",
"example:\n",
"PLACEHOLDER\n\nText:\nPLACEHOLDER\n\n===\n\n",
"For example:\n\n",
"PLACEHOLDER: PLACEHOLDER\n",
"\n\n===\n\n",
", ",
"From the text below, extract the following entities in the following format:\n\n",
"Split the following piece of text into fields in the following format:\n\n"
] |
2024-01-10 | searchgame/ontogpt | src~ontogpt~utils~gpt4all_runner.py | """Tools for loading and working with GPT4All models."""
import logging
from langchain import LLMChain, PromptTemplate
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.llms import GPT4All
def set_up_gpt4all_model(modelpath):
"""Prepare a GGML-formatted GPT4All model for LLM interaction."""
logging.info(f"Preparing {modelpath}...")
local_path = str(modelpath)
callbacks = [StreamingStdOutCallbackHandler()]
if local_path.endswith("ggml-gpt4all-j-v1.3-groovy.bin"):
backend = "gptj"
llm = GPT4All(model=local_path, backend=backend, callbacks=callbacks, verbose=True)
else:
llm = GPT4All(model=local_path, callbacks=callbacks, verbose=True)
return llm
def chain_gpt4all_model(llm, prompt_text):
"""Interact with a GPT4All model."""
template = """{prompt_text}"""
prompt = PromptTemplate(template=template, input_variables=["prompt_text"])
llm_chain = LLMChain(prompt=prompt, llm=llm)
raw_output = llm_chain.run({"prompt_text": prompt_text})
return raw_output
| [
"{prompt_text}",
"prompt_text"
] |
2024-01-10 | searchgame/ontogpt | src~ontogpt~engines~knowledge_engine.py | """Main Knowledge Extractor class."""
import importlib
import logging
import re
from abc import ABC
from dataclasses import dataclass, field
from pathlib import Path
from types import ModuleType
from typing import Dict, Iterator, List, Optional, TextIO, Union
from urllib.parse import quote
import inflection
import openai
import pydantic
import tiktoken
import yaml
from linkml_runtime import SchemaView
from linkml_runtime.linkml_model import ClassDefinition, ElementName, SlotDefinition
from oaklib import BasicOntologyInterface, get_adapter
from oaklib.datamodels.text_annotator import TextAnnotationConfiguration
from oaklib.implementations import OntoPortalImplementationBase
from oaklib.interfaces import MappingProviderInterface, TextAnnotatorInterface
from oaklib.utilities.apikey_manager import get_apikey_value
from oaklib.utilities.subsets.value_set_expander import ValueSetExpander
from ontogpt import DEFAULT_MODEL
from ontogpt.clients import OpenAIClient
from ontogpt.templates.core import ExtractionResult, NamedEntity
this_path = Path(__file__).parent
logger = logging.getLogger(__name__)
OBJECT = Union[str, pydantic.BaseModel, dict]
EXAMPLE = OBJECT
FIELD = str
TEMPLATE_NAME = str
MODEL_NAME = str
# annotation metamodel
ANNOTATION_KEY_PROMPT = "prompt"
ANNOTATION_KEY_PROMPT_SKIP = "prompt.skip"
ANNOTATION_KEY_ANNOTATORS = "annotators"
ANNOTATION_KEY_RECURSE = "ner.recurse"
ANNOTATION_KEY_EXAMPLES = "prompt.examples"
# TODO: introspect
DATAMODELS = [
"treatment.DiseaseTreatmentSummary",
"gocam.GoCamAnnotations",
"bioloigical_process.BiologicalProcess",
"environmental_sample.Study",
"mendelian_disease.MendelianDisease",
"reaction.Reaction",
"recipe.Recipe",
]
def chunk_text(text: str, window_size=3) -> Iterator[str]:
"""Chunk text into windows of sentences."""
sentences = re.split(r"[.?!]\s+", text)
for right_index in range(1, len(sentences)):
left_index = max(0, right_index - window_size)
yield ". ".join(sentences[left_index:right_index])
@dataclass
class KnowledgeEngine(ABC):
"""
Abstract base class for all knowledge engines.
A Knowledge Engine is able to extract knowledge from text, utilizing
knowledge sources plus LLMs
"""
template: TEMPLATE_NAME = None
"""LinkML Template to use for this engine.
Must be of the form <module_name>.<ClassName>"""
template_class: ClassDefinition = None
"""LinkML Class for the template.
This is derived from the template and does not need to be set manually."""
template_pyclass = None
"""Python class for the template.
This is derived from the template and does not need to be set manually."""
template_module: ModuleType = None
"""Python module for the template.
This is derived from the template and does not need to be set manually."""
schemaview: SchemaView = None
"""LinkML SchemaView over the template.
This is derived from the template and does not need to be set manually."""
api_key: str = None
"""OpenAI API key."""
model: MODEL_NAME = None
"""Language Model. This may be overridden in subclasses."""
# annotator: TextAnnotatorInterface = None
# """Default annotator. TODO: deprecate?"""
annotators: Dict[str, List[TextAnnotatorInterface]] = None
"""Annotators for each class.
An annotator will ground/map labels to CURIEs.
These override the annotators annotated in the template
"""
skip_annotators: Optional[List[TextAnnotatorInterface]] = None
"""Annotators to skip.
This overrides any specified in the schema"""
mappers: List[BasicOntologyInterface] = None
"""List of concept mappers, to assist in grounding to desired ID prefix"""
labelers: List[BasicOntologyInterface] = None
"""Labelers that map CURIEs to labels"""
client: OpenAIClient = None
"""All calls to LLMs are delegated through this client"""
dictionary: Dict[str, str] = field(default_factory=dict)
"""Local dictionary of strings/labels to IDs"""
value_set_expansions: Dict[str, List[str]] = field(default_factory=dict)
min_grounding_text_overlap = 0.66
"""Min proportion of overlap in characters between text and grounding. TODO: use tokenization"""
named_entities: List[NamedEntity] = field(default_factory=list)
"""Cache of all named entities"""
auto_prefix: str = None
"""If set then non-normalized named entities will be mapped to this prefix"""
last_text: str = None
"""Cache of last text."""
last_prompt: str = None
"""Cache of last prompt used."""
encoding = None
def __post_init__(self):
if self.template:
self.template_class = self._get_template_class(self.template)
if self.template_class:
logging.info(f"Using template {self.template_class.name}")
if not self.model:
self.model = DEFAULT_MODEL
if self.mappers is None:
logging.info("Using mappers (currently hardcoded)")
self.mappers = [get_adapter("translator:")]
self.set_up_client()
self.encoding = tiktoken.encoding_for_model(self.client.model)
def set_api_key(self, key: str):
self.api_key = key
openai.api_key = key
def extract_from_text(
self, text: str, cls: ClassDefinition = None, object: OBJECT = None
) -> ExtractionResult:
raise NotImplementedError
def extract_from_file(self, file: Union[str, Path, TextIO]) -> pydantic.BaseModel:
"""
Extract annotations from the given text.
:param file:
:return:
"""
if isinstance(file, str):
file = Path(file)
if isinstance(file, Path):
with file.open() as f:
text = f.read()
else:
text = file.read()
self.last_text = text
r = self.extract_from_text(text)
r.input_id = str(file)
return r
def load_dictionary(self, path: Union[str, Path, list]):
if not isinstance(path, list):
logger.info(f"Loading dictionary from {path}")
with open(str(path)) as f:
return self.load_dictionary(yaml.safe_load(f))
if self.dictionary is None:
self.dictionary = {}
entries = [(entry["synonym"].lower(), entry["id"]) for entry in path]
entries = sorted(entries, key=lambda x: len(x[0]), reverse=True)
for syn, id in entries:
if syn in self.dictionary and self.dictionary[syn] != id:
logger.warning(f"Duplicate synonym: {syn} => {id}, {self.dictionary[syn]}")
self.dictionary[syn] = id
logger.info(f"Loaded {len(self.dictionary)}")
# @abstractmethod
def synthesize(self, cls: ClassDefinition = None, object: OBJECT = None) -> ExtractionResult:
raise NotImplementedError
def generalize(
self, object: Union[pydantic.BaseModel, dict], examples: List[EXAMPLE]
) -> ExtractionResult:
raise NotImplementedError
def map_terms(self, terms: List[str], ontology: str) -> Dict[str, List[str]]:
raise NotImplementedError
def _get_template_class(self, template: TEMPLATE_NAME) -> ClassDefinition:
"""
Get the LinkML class for a template.
:param template: template name of the form module.ClassName
:return: LinkML class definition
"""
logger.info(f"Loading schema for {template}")
if "." in template:
module_name, class_name = template.split(".", 1)
else:
module_name = template
class_name = None
templates_path = this_path.parent / "templates"
path_to_template = str(templates_path / f"{module_name}.yaml")
sv = SchemaView(path_to_template)
if class_name is None:
roots = [c.name for c in sv.all_classes().values() if c.tree_root]
if len(roots) != 1:
raise ValueError(f"Template {template} does not have singular root: {roots}")
class_name = roots[0]
mod = importlib.import_module(f"ontogpt.templates.{module_name}")
self.template_module = mod
self.template_pyclass = mod.__dict__[class_name]
self.schemaview = sv
logger.info(f"Getting class for template {template}")
cls = None
for c in sv.all_classes().values():
if c.name == class_name:
cls = c
break
if not cls:
raise ValueError(f"Template {template} not found")
return cls
def _get_openai_api_key(self):
"""Get the OpenAI API key from the environment."""
# return os.environ.get("OPENAI_API_KEY")
return get_apikey_value("openai")
def get_annotators(self, cls: ClassDefinition = None) -> List[BasicOntologyInterface]:
"""
Get the annotators/labelers for a class.
The annotators are returned in order of precedence
Annotators are used to *ground* labels as CURIEs.
Annotators may also do double-duty as labelers (i.e. map CURIEs to labels)
These are specified by linkml annotations within the template/schema;
if the engine has a set of annotators specified these take precedence.
:param cls: schema class
:return: list of annotations
"""
if self.annotators and cls.name in self.annotators:
annotators = self.annotators[cls.name]
else:
if ANNOTATION_KEY_ANNOTATORS not in cls.annotations:
logger.error(f"No annotators for {cls.name}")
return []
annotators = cls.annotations[ANNOTATION_KEY_ANNOTATORS].value.split(", ")
logger.info(f" Annotators: {annotators} [will skip: {self.skip_annotators}]")
annotators = []
for annotator in annotators:
if isinstance(annotator, str):
logger.info(f"Loading annotator {annotator}")
if self.skip_annotators and annotator in self.skip_annotators:
logger.info(f"Skipping annotator {annotator}")
continue
if annotator not in self.annotators:
self.annotators[annotator] = get_adapter(annotator)
annotators.append(self.annotators[annotator])
elif isinstance(annotator, BasicOntologyInterface):
annotators.append(annotator)
else:
raise ValueError(f"Unknown annotator type {annotator}")
return annotators
def promptable_slots(self, cls: Optional[ClassDefinition] = None) -> List[SlotDefinition]:
"""
List of all slots that are not skipped for purposes of prompting.
Examples of slots that are skipped are:
- identifier fields
- the source text used in extraction
- other metadata that is outside what we might want to predict
:param cls:
:return:
"""
if cls is None:
cls = self.template_class
sv = self.schemaview
return [s for s in sv.class_induced_slots(cls.name) if not self.slot_is_skipped(s)]
def slot_is_skipped(self, slot: SlotDefinition) -> bool:
sv = self.schemaview
if ANNOTATION_KEY_PROMPT_SKIP in slot.annotations:
return True
def normalize_named_entity(self, text: str, range: ElementName) -> str:
"""
Grounds and normalizes to preferred ID prefixes.
if the entity cannot be grounded and normalized, the original text is returned.
:param text:
:param cls:
:return:
"""
sv = self.schemaview
cls = sv.get_class(range)
if cls is None:
return text
if ANNOTATION_KEY_EXAMPLES in cls.annotations:
examples = cls.annotations[ANNOTATION_KEY_EXAMPLES].value.split(", ")
examples = [x.lower() for x in examples]
logger.debug(f"Will exclude if in list of examples: {examples}")
if text.lower() in examples:
logger.warning(f"Likely a hallucination as it is the example set: {text}")
return f"LIKELY HALLUCINATION: {text}"
for obj_id in self.groundings(text, cls):
logger.info(f"Grounding {text} to {obj_id}; next step is to normalize")
for normalized_id in self.normalize_identifier(obj_id, cls):
if not any(e for e in self.named_entities if e.id == normalized_id):
self.named_entities.append(NamedEntity(id=normalized_id, label=text))
logger.info(f"Normalized {text} with {obj_id} to {normalized_id}")
return normalized_id
logger.info(f"Could not ground and normalize {text} to {cls.name}")
if self.auto_prefix:
obj_id = f"{self.auto_prefix}:{quote(text)}"
if not any(e for e in self.named_entities if e.id == obj_id):
self.named_entities.append(NamedEntity(id=obj_id, label=text))
else:
obj_id = text
if ANNOTATION_KEY_RECURSE in cls.annotations:
logger.info(f"Using recursive strategy to parse: {text} to {cls.name}")
obj = self.extract_from_text(text, cls).extracted_object
if obj:
if self.named_entities is None:
self.named_entities = []
try:
obj.id = obj_id
except ValueError as e:
logger.error(f"No id for {obj} {e}")
self.named_entities.append(obj)
return obj_id
def is_valid_identifier(self, input_id: str, cls: ClassDefinition) -> bool:
sv = self.schemaview
if cls.id_prefixes:
if ":" not in input_id:
return False
prefix, rest = input_id.split(":", 1)
if prefix not in cls.id_prefixes:
logger.debug(f"ID {input_id} not in prefixes {cls.id_prefixes}")
return False
id_slot = sv.get_identifier_slot(cls.name)
if id_slot and id_slot.pattern:
id_regex = re.compile(id_slot.pattern)
m = re.match(id_regex, input_id)
if not m:
logger.debug(f"ID {input_id} does not match pattern {id_slot.pattern}")
return False
if id_slot and id_slot.values_from:
vse = ValueSetExpander()
is_found = False
for e in id_slot.values_from:
if e not in self.value_set_expansions:
# expanding value set for first time
range_enum = sv.get_enum(e)
pvs = vse.expand_value_set(range_enum, sv.schema)
valid_ids = [pv.text for pv in pvs]
self.value_set_expansions[e] = valid_ids
logger.info(f"Expanded {e} to {len(valid_ids)} IDs")
if input_id in self.value_set_expansions[e]:
is_found = True
logger.info(f"ID {input_id} found in value set {e}")
break
if not is_found:
logger.info(f"ID {input_id} not in value set {e}")
return False
return True
def normalize_identifier(self, input_id: str, cls: ClassDefinition) -> Iterator[str]:
if self.is_valid_identifier(input_id, cls):
yield input_id
for obj_id in self.map_identifier(input_id, cls):
if obj_id == input_id:
continue
if self.is_valid_identifier(obj_id, cls):
yield obj_id
def map_identifier(self, input_id: str, cls: ClassDefinition) -> Iterator[str]:
"""
Normalize an identifier to a preferred prefix.
:param input_id:
:param cls:
:return:
"""
if input_id.startswith("http://purl.bioontology.org/ontology"):
# TODO: this should be fixed upstream in OAK
logging.info(f"Normalizing BioPortal id {input_id}")
input_id = input_id.replace("http://purl.bioontology.org/ontology/", "").replace(
"/", ":"
)
if input_id.startswith("http://id.nlm.nih.gov/mesh/"):
# TODO: this should be fixed upstream in OAK
logging.info(f"Normalizing MESH id {input_id}")
input_id = input_id.replace("http://id.nlm.nih.gov/mesh/", "").replace("/", ":")
if input_id.startswith("drugbank:"):
input_id = input_id.replace("drugbank:", "DRUGBANK:")
yield input_id
if not cls.id_prefixes:
return
if not self.mappers:
return
for mapper in self.mappers:
if isinstance(mapper, MappingProviderInterface):
for mapping in mapper.sssom_mappings([input_id]):
yield str(mapping.object_id)
else:
raise ValueError(f"Unknown mapper type {mapper}")
def groundings(self, text: str, cls: ClassDefinition) -> Iterator[str]:
"""
Ground the given text to element identifiers.
This can potentially yield multiple possible alternatives; these
should be yielded in priority order.
- if there is a different singular form of the text, yield from that first
- dictionary exact matches are yielded first
- dictionary partial matches are yielded next
- annotators are yielded next, in order in which they are specified in the schema
:param text: text to ground, e.g. gene symbol
:param cls: schema class the ground object should instantiate
:return:
"""
logger.info(f"GROUNDING {text} using {cls.name}")
id_matches = re.match(r"^(\S+):(\d+)$", text)
if id_matches:
obj_prefix = id_matches.group(1)
matching_prefixes = [x for x in cls.id_prefixes if x.upper() == obj_prefix.upper()]
if matching_prefixes:
yield matching_prefixes[0] + ":" + id_matches.group(2)
text_lower = text.lower()
text_singularized = inflection.singularize(text_lower)
if text_singularized != text_lower:
logger.info(f"Singularized {text} to {text_singularized}")
yield from self.groundings(text_singularized, cls)
paren_char = "["
parenthetical_components = re.findall(r"\[(.*?)\]", text_lower)
if not parenthetical_components:
paren_char = "("
parenthetical_components = re.findall(r"\((.*?)\)", text_lower)
if parenthetical_components:
logger.info(f"{text_lower} =>paren=> {parenthetical_components}")
trimmed_text = text_lower
for component in parenthetical_components:
if component:
logger.debug(
f"RECURSIVE GROUNDING OF {component} from {parenthetical_components}"
)
yield from self.groundings(component, cls)
if paren_char == "(":
trimmed_text = trimmed_text.replace(f"({component})", "")
elif paren_char == "[":
trimmed_text = trimmed_text.replace(f"[{component}]", "")
else:
raise AssertionError(f"Unknown paren char {paren_char}")
trimmed_text = trimmed_text.strip().replace(" ", " ")
if trimmed_text:
if len(trimmed_text) >= len(text_lower):
raise AssertionError(
f"Trimmed text {trimmed_text} is not shorter than {text_lower}"
)
logger.debug(
f"{text_lower} =>trimmed=> {trimmed_text}; in {parenthetical_components}"
)
yield from self.groundings(trimmed_text, cls)
if self.dictionary and text_lower in self.dictionary:
obj_id = self.dictionary[text_lower]
logger.debug(f"Found {text} in dictionary: {obj_id}")
yield obj_id
if self.dictionary:
for syn, obj_id in self.dictionary.items():
if syn in text_lower:
if len(syn) / len(text_lower) > self.min_grounding_text_overlap:
logger.debug(f"Found {syn} < {text} in dictionary: {obj_id}")
yield obj_id
if self.annotators and cls.name in self.annotators:
annotators = self.annotators[cls.name]
else:
if ANNOTATION_KEY_ANNOTATORS not in cls.annotations:
annotators = []
else:
annotators = cls.annotations[ANNOTATION_KEY_ANNOTATORS].value.split(", ")
logger.info(f" Annotators: {annotators} [will skip: {self.skip_annotators}]")
# prioritize whole matches by running these first
for matches_whole_text in [True, False]:
config = TextAnnotationConfiguration(matches_whole_text=matches_whole_text)
for annotator in annotators:
if isinstance(annotator, str):
if self.skip_annotators and annotator in self.skip_annotators:
continue
if self.annotators is None:
self.annotators = {}
if annotator not in self.annotators:
logger.info(f"Loading annotator {annotator}")
self.annotators[annotator] = get_adapter(annotator)
annotator = self.annotators[annotator]
if not matches_whole_text and not isinstance(
annotator, OntoPortalImplementationBase
):
# TODO: allow more fine-grained control
logger.info(
f"Skipping {type(annotator)} as it does not support partial matches"
)
continue
try:
results = annotator.annotate_text(text, config)
for result in results:
yield result.object_id
except Exception as e:
logger.error(f"Error with {annotator} for {text}: {e}")
# def ground_text_to_id(self, text: str, cls: ClassDefinition = None) -> str:
# raise NotImplementedError
def merge_resultsets(
self, resultset: List[ExtractionResult], unique_fields: List[str] = None
) -> ExtractionResult:
"""
Merge all resultsets into a single resultset.
Note the first element of the list is mutated.
:param resultset:
:return:
"""
result = resultset[0].extracted_object
for next_extraction in resultset[1:]:
next_result = next_extraction.extracted_object
if unique_fields:
for k in unique_fields:
if k in result and k in next_result:
if result[k] != next_result[k]:
logger.error(
f"Cannot merge unique fields: {k} {result[k]} != {next_result[k]}"
)
continue
for k, v in vars(next_result).items():
curr_v = getattr(result, k, None)
if isinstance(v, list):
if all(isinstance(x, str) for x in v):
setattr(result, k, list(set(curr_v).union(set(v))))
else:
setattr(result, k, curr_v + v)
else:
if curr_v and v and curr_v != v:
logger.error(f"Cannot merge {curr_v} and {v}")
if v:
setattr(result, k, v)
return resultset[0]
def set_up_client(self):
self.client = OpenAIClient(model=self.model)
logging.info("Setting up OpenAI client API Key")
self.api_key = self._get_openai_api_key()
openai.api_key = self.api_key
| [
"<class 'str'>",
"prompt.skip",
"prompt",
"PLACEHOLDER.yaml",
"None"
] |
2024-01-10 | bigdan88/ninjaGPT | main2.py | import openai
import logging
import utils.audio
import utils.whisper
import utils.tts
import json
from utils.chatgpt_api import ChatGPTAPI # Import the ChatGPTAPI class
import pdb
# Set up logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
def preprocess_json_string(md_json_string):
"""
Preprocesses a JSON string that is enclosed within Markdown backticks.
Args:
md_json_string (str): The JSON string with Markdown backticks.
Returns:
str: The cleaned JSON string.
"""
# Split the string into lines
print("preprocessing json string")
lines = md_json_string.split('\n')
# Remove the first and last lines if they contain backticks
if lines[0].strip().startswith('```'):
lines = lines[1:]
if lines[-1].strip().endswith('```'):
lines = lines[:-1]
# Join the lines back into a single string
cleaned_string = '\n'.join(lines).strip()
return cleaned_string
def decode_json(json_string):
try:
data = json.loads(json_string)
command = data.get("command")
args = data.get("args", {})
if command == "advice":
print("Giving advice...")
advice = args.get("content")
# Call the advice handling function
# Convert the response to speech
utils.tts.text_to_speech(advice)
logging.info("Text-to-speech conversion completed")
elif command == "set_timer":
print("Setting timer...")
time = args.get("duration")
# Convert the response to speech
utils.tts.text_to_speech(f"Setting timer for {time} minutes")
logging.info("Text-to-speech conversion completed")
else:
print(f"Unknown command: {command}")
except json.JSONDecodeError as e:
print(f"Invalid JSON: {e}")
def read_api_key(file_path):
"""Read the API key from a file."""
try:
with open(file_path, 'r') as file:
return file.read().strip()
except IOError:
logging.error("Unable to read API key. Check if the credentials.txt file exists and is readable.")
return None
# Read API key from credentials.txt
api_key = read_api_key('credentials.txt')
if not api_key:
logging.critical("API key not found. Exiting.")
exit(1)
# Initialize ChatGPT API with the API key
chat_gpt_api = ChatGPTAPI(api_key)
# chat_gpt_api.add_system_message("You are KAI, a cooking assistant. Please give cooking advice to the user.")
# chat_gpt_api.add_system_message("""You are KAI, a cooking assistant. You only have two actions: advice and set_timer. You should only respond in JSON format as described below:
# {
# "command": "advice",
# "parameters": {
# "content": "The best temperature to cook a steak is medium rare"
# },
# }
# or
# {
# "command": "set_timer",
# "parameters": {
# "duration": "10 minutes",
# "message": "Check the oven"
# },
# }
# """)
chat_gpt_api.add_system_message("You are KAI, a cooking assistant. Please give cooking advice to the user. If giving the user a recipe, please ask the user if they would like to hear the steps one at a time. If they do, please provide one step of instructions until the user signals that they are ready for the next step.")
chat_gpt_api.add_system_message("""
These are your abilities:
ABILITIES = (
'advice: Gives answers to the user, args: "code": "<full_code_string>"',
'set_timer: Starts a timer, args: "duration": "<float, duration in minutes>"', "message": "<message>",
)
""")
chat_gpt_api.add_system_message("""
You should only respond in JSON format as described below:
{
"command": "advice",
"args": {
"content": "The best temperature to cook a steak is medium rare"
},
}
or
{
"command": "set_timer",
"args": {
"duration": "10 minutes",
"message": "Check the oven"
},
}
""")
# Main process
logging.info("Starting main process")
# File name for the recorded audio
file_name = "test.wav"
while(True):
# # Record audio and save it to 'file_name'
utils.audio.record_audio(file_name, 0.09, 2)
logging.info("Audio recorded and saved as " + file_name)
# Transcribe the recorded audio
transcription = utils.whisper.transcribe_audio(file_name)
logging.info("Transcription complete")
# Log transcription
logging.info("Transcription: " + transcription)
# Send the transcription as a question to ChatGPT
response = chat_gpt_api.ask_question(transcription)
logging.info("Response received from ChatGPT")
print("AFTER Response:")
# Decode the JSON response
response = preprocess_json_string(response)
decode_json(response)
# Log response
logging.info("Response: " + response)
# # Convert the response to speech
# utils.tts.text_to_speech(response)
# logging.info("SECOND Text-to-speech conversion completed")
print("Press any key to talk to KAI...")
input()
| [] |
2024-01-10 | bigdan88/ninjaGPT | utils~ninjagpt35.py | # Filename: chatgpt_api.py
import openai
import logging
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
class ChatGPTAPI:
def __init__(self, api_key):
"""
Initialize the ChatGPT API client.
Args:
api_key (str): The API key for accessing OpenAI's GPT-3 service.
"""
self.client = openai.Client(api_key=api_key)
def ask_question(self, question, model="gpt-4", temperature=0.7, max_tokens=100):
"""
Sends a question to the ChatGPT API and retrieves the response.
Args:
question (str): The question to be asked.
model (str): The model to use (default: "gpt-4").
temperature (float): The temperature to use for the response (default: 0.7).
max_tokens (int): The maximum number of tokens to generate (default: 100).
Returns:
str: The response from ChatGPT.
"""
try:
response = self.client.create_completion(
model=model,
prompt=question,
temperature=temperature,
max_tokens=max_tokens
)
return response.choices[0].text.strip()
except Exception as e:
logging.error("Error in ChatGPT API request: " + str(e))
return ""
# Example usage
if __name__ == "__main__":
# Replace 'your_api_key' with your actual OpenAI API key
api_key = 'your_api_key'
chat_gpt = ChatGPTAPI(api_key)
response = chat_gpt.ask_question("What is the capital of France?")
print("Response:", response)
# You are a cooking assistant. You only have two actions: advice and set_timer. You should only respond in JSON format as described below:
# {
# "command": "advice",
# "parameters": {
# "content": "The best temperature to cook a steak is medium rare"
# },
# }
# or
# {
# "command": "set_timer",
# "parameters": {
# "duration": "10 minutes",
# "message": "Check the oven"
# },
# }
| [] |
2024-01-10 | bigdan88/ninjaGPT | utils~tts.py | import openai
from pathlib import Path
from playsound import playsound
import logging
# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
def read_api_key(file_path):
"""
Reads the API key from a file.
Args:
file_path (str): Path to the file containing the API key.
Returns:
str: The API key.
"""
try:
with open(file_path, 'r') as file:
return file.read().strip()
except IOError:
logging.error("Unable to read API key. Check if the credentials file exists and is readable.")
return None
# Read API key from credentials file
api_key = read_api_key('credentials.txt')
if not api_key:
logging.critical("API key not found. Exiting.")
exit(1)
# Initialize OpenAI client with the API key
client = openai.Client(api_key=api_key)
def text_to_speech(text):
"""
Converts the given text to speech using OpenAI's text-to-speech API.
Args:
text (str): The text to convert to speech.
Returns:
str: The path to the saved audio file.
"""
# Path for the speech file
speech_file_path = Path(__file__).parent / "speech.mp3"
try:
# Create the audio file using OpenAI's text-to-speech API
response = client.audio.speech.create(
model="tts-1",
voice="nova",
input=text
)
# Save the audio file
response.stream_to_file(speech_file_path)
# Play the audio file
playsound(str(speech_file_path))
logging.info(f"Speech successfully generated and saved to {speech_file_path}")
except Exception as e:
logging.error("Failed to convert text to speech: " + str(e))
if __name__ == "__main__":
text_to_speech("Hello world!") # Replace with your actual text
| [] |
2024-01-10 | bigdan88/ninjaGPT | utils~ninjagpt.py | import openai
import time
# Initialize OpenAI client with your API key
client = openai.Client(api_key='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')
# openai.api_key = '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'
# Function to create the ninjaGPT assistant
def create_ninjaGPT(client):
assistant = client.beta.assistants.create(
name="ninjaGPT",
instructions="You are an assistant inside a cooking gadget. Provide cooking advice.",
tools=[],
model="gpt-4-1106-preview" # Assuming you are using the GPT-4 model
)
return assistant
# Function to create a thread for a new conversation
def create_thread(client):
thread = client.beta.threads.create()
return thread
# Function to send a message to ninjaGPT and receive a response
def ask_ninjaGPT(client, thread_id, assistant_id, question):
message = client.beta.threads.messages.create(
thread_id=thread_id,
role="user",
content=question
)
run = client.beta.threads.runs.create(
thread_id=thread_id,
assistant_id=assistant_id
)
return run
# Function to update gadget parameters based on ninjaGPT's advice
def update_gadget_parameters(parameter_updates):
# Implement this function to interface with your cooking gadget
# Adjust temperature, time settings, etc., based on the assistant's advice
pass
# Function to extract parameters from the assistant's response
def extract_parameters_from_response(response):
# Implement logic to parse response for cooking parameter adjustments
# Return a dictionary or similar structure with the extracted parameters
pass
# Function to wait for a run to complete and then return its status
def wait_for_run_completion(client, thread_id, run_id):
while True:
run_status = client.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
if run_status.status == 'completed': # Access status attribute directly
return run_status
time.sleep(1) # Wait for 1 second before checking again
# Function to get all messages from a thread, including the assistant's responses
def get_thread_messages(client, thread_id):
messages_page = client.beta.threads.messages.list(thread_id=thread_id)
return messages_page.data if hasattr(messages_page, 'data') else []
if __name__ == "__main__":
ninjaGPT = create_ninjaGPT(client)
thread = create_thread(client)
# Example question
question = "How long should I bake chicken at 350 degrees Fahrenheit?"
run = ask_ninjaGPT(client, thread.id, ninjaGPT.id, question)
# Wait for the run to complete
completed_run = wait_for_run_completion(client, thread.id, run.id)
# Retrieve and print all messages from the thread
messages = get_thread_messages(client, thread.id)
for message in messages:
print(f"Role: {message.role}, Content: {message.content[0].text.value}") | [] |
2024-01-10 | SentientPlatypus/Amoris | SCP~Globals.py |
from datetime import date
from inspect import trace
import openai
from logging import exception
from operator import mul
from os import name, stat
from typing import AsyncContextManager, final
import discord
from discord import errors
from discord import client
from discord import channel
from discord import embeds
from discord.embeds import Embed
from discord.ext import commands
from discord.ext.commands.core import command
from discord.member import Member
from discord.player import PCMAudio
from discord.utils import time_snowflake
from openai.api_resources import model
from pymongo import MongoClient
import names
from pymongo.collection import _FIND_AND_MODIFY_DOC_FIELDS
import re
import random
import math
import asyncio
import linecache
import sys
import traceback
import string
import itertools
from imdb import IMDb
from pymongo.database import Database
from youtube_search import YoutubeSearch
import json
import youtube_dl
from discord_components import DiscordComponents, Button, ButtonStyle, InteractionType
import text2emotion as te
from removebg import RemoveBg
import os
from PIL import Image
from io import BytesIO
import requests
import Globals
import pymongo
import ssl
class noImageError(commands.CommandError):
def __init__(self, user, *args, **kwargs):
self.user = user
def getMongo():
return MongoClient("mongodb+srv://SCP:[email protected]/myFirstDatabase?retryWrites=true&w=majority&ssl=true&ssl_cert_reqs=CERT_NONE")
def getDashboardURL():
return "http://scp16tsundere.pagekite.me:443"
class botUser(object):
def __init__(self, user:discord.Member):
self.user = user
self.inv = mulah.find_one({"id":user.id}, {"inv"})["inv"]
self.gf = mulah.find_one({"id":user.id}, {"gf"})["gf"]
self.lp = mulah.find_one({"id":user.id}, {"lp"})["lp"]
self.breakups = mulah.find_one({"id":user.id}, {"breakups"})["breakups"]
self.kisses = mulah.find_one({"id":user.id}, {"kisses"})["kisses"]
self.boinks = mulah.find_one({"id":user.id}, {"boinks"})["boinks"]
self.money = mulah.find_one({"id":user.id}, {"money"})["money"]
self.job = mulah.find_one({"id":user.id}, {"job"})["job"]
self.duelwins = mulah.find_one({"id":user.id}, {"duelwins"})["duelwins"]
self.duelloses = mulah.find_one({"id":user.id}, {"duelloses"})["duelloses"]
self.duelretreats = mulah.find_one({"id":user.id}, {"duelretreats"})["duelretreats"]
self.watchlist = mulah.find_one({"id":user.id}, {"watchlist"})["watchlist"]
self.achievements = mulah.find_one({"id":user.id}, {"achievements"})["achievements"]
self.proposes = mulah.find_one({"id":user.id}, {"proposes"})["proposes"]
self.dates = mulah.find_one({"id":user.id}, {"dates"})["dates"]
self.relationships = mulah.find_one({"id":user.id}, {"relationships"})["relationships"]
self.gambles = mulah.find_one({"id":user.id}, {"gambles"})["gambles"]
self.gamblewins = mulah.find_one({"id":user.id}, {"gamblewins"})["gamblewins"]
self.upgradepoints = mulah.find_one({"id":user.id}, {"upgradepoints"})["upgradepoints"]
self.gameskill = mulah.find_one({"id":user.id}, {"gameskill"})["gameskill"]
self.bank = mulah.find_one({"id":user.id}, {"bank"})["bank"]
self.net = mulah.find_one({"id":user.id}, {"net"})["net"]
self.abilityxp = mulah.find_one({"id":user.id}, {"abilityxp"})["abilityxp"]
self.mmorpg = mulah.find_one({"id":user.id}, {"mmorpg"})["mmorpg"]
def updateWholeMongo(self):
dictionaryOfAttributes = self.__dict__
for x in dictionaryOfAttributes:
mulah.update_one({"id", self.user.id}, {"$set":{x:dictionaryOfAttributes[x]}})
def updateOne(self, attribute):
attribute = attribute.lower()
dictionaryOfAttributes = self.__dict__
try:
mulah.update_one({"id":self.user.id}, {"$set":{attribute:dictionaryOfAttributes[attribute]}})
except:
pass
def incOne(self, attribute, value:int):
attribute = attribute.lower()
try:
mulah.update_one({"id":self.user.id}, {"$inc":{attribute:value}})
except:
pass
cluster = getMongo()
mulah = cluster["discord"]["mulah"]
levelling = cluster["discord"]["levelling"]
DiscordGuild = cluster["discord"]["guilds"]
achievements = [
{"name":"First Kiss!", "desc":"Kiss someone for the first time!", "category":"relationships"},
{"name":"Virginity Loss!", "desc":"Boink someone for the first time!", "category":"relationships"},
{"name":"Engaged!", "desc":"Propose to someone for the first time!", "category":"relationships"},
{"name":"Jerk", "desc":"Turns out you were the problem", "category":"relationships"},
{"name":"Divorcee!", "desc":"Get a life bro.", "category":"relationships"},
{"name":"First Date!", "desc":"First date with GF!", "category":"relationships"},
{"name":"bobs", "desc":";)", "category":"relationships"},
{"name":"Getting By", "desc":"finally making some money! good job!", "category":"finance"},
{"name":"Millionaire!", "desc":"its what it sounds like", "category":"finance"},
{"name":"Billionaire!", "desc":"Treat your workers with respect.", "category":"finance"},
{"name":"Employed!", "desc":"You got a job.", "category":"finance"},
{"name":"Gambler!", "desc":"You gambled for the first time! ", "category":"finance"},
{"name":"Winner!", "desc":"You won a gamble! ", "category":"finance"},
{"name":"Death!", "desc":"Get a life bro.", "category":"gaming"},
{"name":"virgin", "desc":"Secret!", "category":"gaming"},
{"name":"FloorGang", "desc":"Secret!", "category":"gaming"},
{"name":"First PC!", "desc":"Create your first PC!", "category":"gaming"},
{"name":"Linus Tech Tips", "desc":"Create a beefy Computer with at least 12000 power!", "category":"gaming"},
{"name":"True Gamer", "desc":"Install 5 games on a single PC!", "category":"gaming"},
]
def gamble(odds:int, times:int):
count = 0
wins = 0
while count<=times:
number = random.randint(1,odds)
if number == 1:
wins+=1
else:
pass
count+=1
return wins
def GetFirstKey(dict:dict):
for x in dict:
return x
def removeDupes(test_list:list):
res =[]
for i in test_list:
if i not in res:
res.append(i)
return res
class chat(object):
def __init__(self, chatlog):
self.chatlog = chatlog
def ask(self,question):
response = openai.Completion.create(
engine="davinci",
prompt=self.chatlog + question + "\nAI:",
temperature=0.9,
max_tokens=150,
top_p=1,
frequency_penalty=0,
presence_penalty=0.6,
stop=["\n", " Human:", " AI:"]
)
return response["choices"][0]["text"]
def append_interaction_to_chat_log(self, answer):
self.chatlog += "AI:" +answer+"\n"
#openai api completions.create -m ada:ft-sentientproductions-2021-12-27-00-47-10 -p "*bad text*"
##-------------------------------------------------------------INV FUNCTS
def RemoveFromInventory(user, item, AmountToRemove:int=None):
if AmountToRemove==None:
AmountToRemove=1
inv = mulah.find_one({"id":user.id}, {"inv"})["inv"]
itemdict = next(x for x in inv if x["name"].lower() ==item.lower())
itemdict["amount"]-=AmountToRemove
if itemdict["amount"]==0:
inv.remove(itemdict)
mulah.update_one({"id":user.id}, {"$set":{"inv":inv}})
def AddToInventory(user, item, ReferenceList:list, AmountToAdd:int=None):
if AmountToAdd==None:
AmountToAdd=1
inv = mulah.find_one({"id":user.id}, {"inv"})["inv"]
itemdict = next((x for x in inv if x["name"].lower() ==item.lower()), None)
ThingToAdd = next(x for x in ReferenceList if x["name"].lower()==item.lower())
if itemdict != None:
itemdict["amount"]+=AmountToAdd
else:
inv.append({"name":ThingToAdd["name"], "amount":AmountToAdd, "desc": "%s"%(ThingToAdd["desc"])})
mulah.update_one({"id":user.id}, {"$set":{"inv":inv}})
def InvCheck(user, item:str, Id=False, amount:int=1) -> bool:
if Id==False:
inv = mulah.find_one({"id":user.id}, {"inv"})["inv"]
check = next((x for x in inv if x["name"].lower()==item.lower() and x["amount"]>=amount), None)
if check == None:
return False
else:
return True
else:
inv = mulah.find_one({"id":user}, {"inv"})["inv"]
check = next((x for x in inv if x["name"].lower()==item.lower() and x["amount"]>=amount), None)
if check == None:
return False
else:
return True
def InvCheckWithItem(user, item:str, Id=False, amount:int=1):
if Id==False:
user = user.id
inv = mulah.find_one({"id":user}, {"inv"})["inv"]
check = next((x for x in inv if x["name"].lower()==item.lower() and x["amount"]>=amount and "parts" not in x.keys()), None)
if check == None:
return False
else:
return check
##----------------------------------------------------Achievement Functs
def XpBar(val, max, fill=":blue_square:", empty=":white_large_square:", NumOfSquares=20, righttoleft=False):
if righttoleft:
valueOfBlue = math.floor((val/max)*NumOfSquares)
if valueOfBlue<0:
return empty*NumOfSquares
valueofWhite = NumOfSquares-valueOfBlue
finalstr = empty*valueofWhite+fill*valueOfBlue
return finalstr
else:
valueOfBlue = math.floor((val/max)*NumOfSquares)
if valueOfBlue<0:
return empty*NumOfSquares
valueofWhite = NumOfSquares-valueOfBlue
finalstr = fill*valueOfBlue+empty*valueofWhite
return finalstr
def GetKeysFromDictInList(list:list):
keys= []
for x in list:
for z in x.keys():
keys.append(z)
return keys
def GetLevel(id):
xp = levelling.find_one({"id":id}, {"xp"})["xp"]
lvl = 0
while True:
if xp < ((50*(lvl**2))+(50*(lvl))):
break
lvl+=1
return lvl
def getLevelfromxp(xp):
lvl = 0
while True:
if xp < ((50*(lvl**2))+(50*(lvl))):
break
lvl+=1
return lvl
def achievementcheck(user,achievement:str):
try:
value = mulah.find_one({"id":user.id}, {"achievements"})["achievements"]
if achievement in value:
return "✅"
else:
return "❌"
except:
return "❌"
def achievementpercent(achievement:str):
count = 0
achCount=0
for x in mulah.find():
count+=1
try:
if achievement in x["achievements"]:
achCount+=1
except:
pass
return (achCount/count)*100
def ChoiceParts(choices:list, ReactionsList = ['1️⃣', '2️⃣', '3️⃣', '4️⃣','5️⃣','6️⃣','7️⃣','8️⃣','9️⃣','🔟']):
count = 0
reactionlist = []
emptydict = {}
finalstr = ""
for x in choices:
emptydict[ReactionsList[count]]=x
reactionlist.append(ReactionsList[count])
finalstr+="%s %s\n"%(ReactionsList[count], x)
count+=1
return [emptydict, finalstr, reactionlist]
async def AchievementEmbed(ctx, EarnedAchievement):
yourachievements = mulah.find_one({"id":ctx.author.id}, {"achievements"})["achievements"]
AchievementDict = next(x for x in achievements if x["name"]==EarnedAchievement)
if AchievementDict["name"] not in yourachievements:
print(AchievementDict["name"])
embed = discord.Embed(title = "Congratulations! you earned the achievement %s"%(AchievementDict["name"]), description = AchievementDict["desc"], color = discord.Color.gold())
embed.set_image(url = 'https://emojipedia-us.s3.dualstack.us-west-1.amazonaws.com/socialmedia/apple/271/trophy_1f3c6.png')
yourachievements.append(AchievementDict["name"])
mulah.update_one({"id":ctx.author.id}, {"$set":{"achievements":yourachievements}})
embed.set_author(name = ctx.author.display_name, icon_url=ctx.author.avatar_url)
await ctx.channel.send(embed=embed)
##-------------------------------------------------------------------------GLOBAL VARIABLES, DATASETS
def getEmotionList():
return ["embarrassed", "horny","surprised","climax", "image", "bed", "angry", "fear", "sad", "dissapointed"]
def getBegList():
return {
1:{"name":"Jake Paul", "value":"ew get away from me", "value":2},
2:{"name":"Mrbeast", "value":"Oh heres 10 grand without the grand", "amount":10},
3:{"name":"Joe Biden", "value":"u smell nice today", "amount":9},
4:{"name":"Naruto", "value":"hiruzen gave me less","amount":1},
5:{"name":"Luffy", "value":"have some bro","amount":5},
6:{"name":"Alien", "value":"Damn capitalism sucks","amount":11},
7:{"name":"The Rock", "value":"passive income baby woo", "amount":6},
8:{"name":"zendaya", "value":"idk what zendaya says bruh", "amount":8},
9:{"name":"Louis XVI", "value":"hey man have some bread", "amount":19},
10:{"name":"Askeladd", "value":"have some gold", "amount":10},
}
def getAchievementList():
return achievements
def getWorkLists():
return [
{"name":"McDonalds worker", "salary":15, "req":1, "words":["bigmac", "burger", "broken"], "sentences":["sorry, the icecream machine is broken", "what can I get for you?", "welcome to mcdonalds"]},
{"name":"Gamer", "salary":150, "req": 5, "words":["dorito", "mechanical", "virgin"], "sentences":["i hate lag", "hes one tap", "what a sweat"]},
{"name":"Bitcoin Miner", "salary":250, "req": 10, "words":["nvidia", "shortage", "shameless"], "sentences":["People hate me for a good reason", "that is passive income", "I like cheese"]},
{"name":"Youtuber", "salary":450, "req": 15, "words":["subscribe", "like", "rich"], "sentences":["make sure to smash that like button", "i dont know how to start this video", "leave your memes in the subreddit"]},
{"name":"Business Man", "salary":160, "req":20, "words":["business", "passive", "pigeon"], "sentences":["sorry thats not passive income", "it is ten times cheaper to keep a customer than to get a new one"]},
{"name":"Jeff bezos", "salary":1000000000, "req":100, "words":["abuse", "rocket", "money"], "sentences":["I love money", "I appreciate the lower class", "i am not a supervillain"]},
]
def getShopItems():
return [
{"name":"phone", "value":800, "desc":"Text your Girlfriend!"},
{"name": "netflixsub", "value": 29, "desc": "Netflix and chill with your gf"},
{"name": "lotteryticket", "value": 2, "desc": "A chance to win 1 million dollars"},
{"name": "movieticket", "value" : 16, "desc":"watch a movie with your gf"},
{"name": "ring", "value" : 10000, "desc":"propose to your gf"},
]
def getBattleItems():
return [
{"name":"UpgradePoint", "value":2000, "desc":"`^upgrade` one of your stats!"},
{"name":"Vaccine",
"type":"Heal",
"desc":"Heal ig",
"rarity":"Legendary",
"value":2000,
"abilities":{"vaccine":1}},
{"name":"Saitamas Dish Gloves",
"type":"hands",
"desc":"The Most powerful item in the game.",
"rarity":"illegal",
"value":1000000000,
"attribute":{"strength":1000000}},
{"name":"Sharingan",
"type":"head",
"desc":"Op doujutsu",
"rarity":"Legendary",
"value":200000,
"abilities":{"Amaterasu":1, "Susanoo":1}},
{"name":"Demon Destroyer",
"type":"primary",
"desc":"Can deflect spells completely!",
"rarity":"Legendary",
"value":20000,
"abilities":{"Black Slash":1, "Deflect":1, "Black Divider":1}
},
{"name":"Sword",
"type":"primary",
"desc":"Basic sword.",
"rarity":"Common",
"value":200,
"abilities":{"Slash":1}
},
{"name":"Spear",
"type":"primary",
"desc":"Basic weapon.",
"rarity":"Common",
"value":200,
"abilities":{"Pierce":1}
},
]
def getToolValues():
return [
{"name": "rifle", "value" : 400, "desc":"`^hunt` to get animals!"},
{"name": "fishpole", "value" : 100, "desc":"`^fish` to catch fish!"},
{"name":"pickaxe", "durability":59, "fortune":1, "craft":{"wood":5}, "value":25, "desc":"cheap mining"},
{"name":"iron pickaxe", "durability":250, "fortune":2, "craft":{"wood":2, "iron":3}, "value":25, "desc":"better mining"},
{"name":"gold pickaxe", "durability":33, "fortune":4, "craft":{"wood":2, "gold":3}, "value":115, "desc":"fine mining"},
{"name":"diamond pickaxe", "durability":1562, "fortune":4, "craft":{"wood":2, "diamond":3}, "value":13010, "desc":"best mining"},
{"name":"axe", "durability":59, "fortune":1, "craft":{"wood":4}, "value":29, "desc":"Chop wood"},
{"name":"iron axe", "durability":250, "fortune":2, "craft":{"wood":2, "iron":3}, "value":25, "desc":"Chop more wood"},
{"name":"gold axe", "durability":33, "fortune":4, "craft":{"wood":2, "gold":3}, "value":115, "desc":"Chop lots of wood"},
{"name":"diamond axe", "durability":1562, "fortune":4, "craft":{"wood":2, "diamond":3}, "value":13010, "desc":"Chop even more wood"},
{"name":"hoe", "durability":59, "fortune":1, "craft":{"wood":2}, "value":10, "desc":"Farm stuff idk"},
{"name":"iron hoe", "durability":250, "fortune":2, "craft":{"wood":2, "iron":2}, "value":20, "desc":"Farm stuff idk"},
{"name":"gold hoe", "durability":32, "fortune":4, "craft":{"wood":2, "gold":2}, "value":80, "desc":"Farm stuff idk"},
{"name":"diamond hoe", "durability":1561, "fortune":4, "craft":{"wood":2, "diamond":2}, "value":8810, "desc":"Farm stuff idk"},
]
def getFarmItems():
return [
{"name":"uncommon fish", "value":10, "desc":"cheap fish to sell"},
{"name":"common fish", "value":20, "desc":"a mediocre fish"},
{"name":"rare fish", "value":50, "desc":"high quality fish"},
{"name":"legendary fish", "value":150, "desc":"very valuable fish"},
{"name":"mouse", "value":10, "desc":"idk why someone would even bother"},
{"name":"rabbit", "value":50, "desc":"tste great in stew"},
{"name":"deer", "value":150, "desc":"sells well"},
{"name":"bigfoot", "value":1000, "desc":"make some mulah"},
{"name":"coal", "value":1, "desc":"non renewable energy source"},
{"name":"iron", "value":5, "desc":"for what"},
{"name":"gold", "value":35, "desc":"terrible durability"},
{"name":"diamond", "value":4400, "desc":"sells for a lot"},
{"name":"ruby", "value":10000, "desc":"One of the most precious things in this world"},
{"name":"wheat", "value":10, "desc":"carbs"},
{"name":"beetroot", "value":20, "desc":"why do people eat this"},
{"name":"melon", "value":50, "desc":"mmm"},
{"name":"pumpkin", "value":150, "desc":"pumpkin pie tastes great"},
{"name":"wood", "value":5, "desc":"profits pile up"},
]
def getPcItems():
return [
{"name":"4gbRam", "type":"ram", "value": 20,"desc":"Use this for your PC!","power":0,"space":0, "rspace": 4000, "synthesis":0, "consumption":10},
{"name":"8gbRam", "type":"ram", "value": 50, "desc":"Reasonable upgrade!","power":0,"space":0, "rspace": 8000, "synthesis":0, "consumption":10},
{"name":"16gbRam", "type":"ram", "value": 100, "desc":"Do you really need this?","power":0,"space":0, "rspace": 16000, "synthesis":0, "consumption":10},
{"name":"32gbRam", "type":"ram", "value": 200, "desc":"Thats overkill man, but you do you ig.","space":0,"power":0, "rspace": 32000, "synthesis":0, "consumption":10},
{"name":"i5","type":"cpu", "value": 160, "desc":"A perfect cpu- if you are on a budget","space":0,"rspace":0, "power":1500 , "synthesis":0, "consumption":250},
{"name":"i7","type":"cpu", "value": 250, "desc":"Great for upper middle range machines!","space":0, "power":2000,"rspace":0, "synthesis":0, "consumption":250 },
{"name":"i9","type":"cpu", "value": 370, "desc":"A great gaming cpu overall.","space":0, "power":2500,"rspace":0, "synthesis":0, "consumption":250 },
{"name":"threadripper","type":"cpu", "value": 3000, "desc":"An excellent cpu that will never know pain.","space":0, "power":4000,"rspace":0, "synthesis":0, "consumption":280 },
{"name":"xeon","type":"cpu", "value": 10000, "desc":"For NASA computers", "power":10000,"space":0,"rspace":0, "synthesis":0, "consumption":350},
{"name":"512SSD","type":"storage", "value": 70, "desc":"Great storage for a decent machine!","rspace":0,"power":0, "synthesis":0, "space": 512000, "consumption":10},
{"name":"1TBSSD","type":"storage", "value": 100, "desc":"This should be enough for most people","rspace":0,"power":0, "synthesis":0, "space": 1000000, "consumption":10 },
{"name":"4TBSSD","type":"storage", "value": 500, "desc":"enough storage for your homework folder","rspace":0,"power":0, "synthesis":0, "space": 4000000, "consumption":10 },
{"name":"1660ti","type":"gpu", "value": 280, "desc":"entry level gpu","space":0, "power":1500,"rspace":0, "synthesis":0,"consumption":120 },
{"name":"1080ti","type":"gpu", "value": 1074, "desc":"Good for mid range machines","space":0, "power":2000,"rspace":0, "synthesis":0, "consumption":250 },
{"name":"2080ti","type":"gpu", "value": 1376, "desc":"imagine using a 20 series","space":0, "power":2500,"rspace":0, "synthesis":0, "consumption":275 },
{"name":"3080ti","type":"gpu", "value": 3000, "desc":"Scalper price!", "space":0, "power":6000,"rspace":0, "synthesis":0, "consumption":350 },
{"name":"650watt","type":"psu", "value": 5000, "desc":"scalper price!","space":0,"power":0, "synthesis":650,"rspace":0, "consumption":0 },
{"name":"750watt","type":"psu", "value": 5000, "desc":"scalper price!","space":0,"power":0, "synthesis":750,"rspace":0, "consumption":0 },
{"name":"850watt","type":"psu", "value": 5000, "desc":"scalper price!","space":0,"power":0, "synthesis":850,"rspace":0, "consumption":0 },
{"name":"900watt","type":"psu", "value": 5000, "desc":"scalper price!","space":0,"power":0, "synthesis":900,"rspace":0, "consumption":0 },
{"name":"motherboard","type":"board", "value": 100, "desc":"build a pc.","space":0,"power":0, "synthesis":0,"rspace":0, "consumption":0 }
]
def getGameItems():
return [
{"name":"Minecraft", "genre":["adventure", "creativity"],"space":1500, "value":26, "desc": "anything can run Minecraft!", "lpincrease":30, "recommendedspecs":{"totalram":8000, "power":1500}},
{"name":"Fortnite", "genre":["fps"],"space":49000, "value":0, "desc": "How much lp were you expecting for fortnite?", "lpincrease":5, "recommendedspecs":{"totalram":8000, "power":2500}},
{"name":"Valorant", "genre":["fps"],"space":14400, "value":0, "desc": "spend 80% of the game spectating.", "lpincrease":25, "recommendedspecs":{"totalram":8000, "power":3000}},
{"name":"Terraria", "genre":["adventure", "creativity"],"space":100, "value":5, "desc": "A great friend of Mc", "lpincrease":20, "recommendedspecs":{"totalram":8000, "power":1500}},
{"name":"Microsoft Flight simulator", "genre":["creativity"],"space":150000, "value":60, "desc": "You probably cant run this.", "lpincrease":40, "recommendedspecs":{"totalram":16000, "power":5000}},
{"name":"Crysis 3", "genre":["adventure"],"space":17000, "value":5, "desc": "Your pc simply cant run this.", "lpincrease":50, "recommendedspecs":{"totalram":32000, "power":7800}},
{"name":"League of Legends", "genre":["strategy"],"space":22000, "value":0, "desc": "Dont do it.", "lpincrease":-50, "recommendedspecs":{"totalram":8000, "power":2800}}
]
def getGameWords():
return [
{"name": "Minecraft", "words":["block", "redstone", "blockhit", "endcrystal"]},
{"name": "Fortnite", "words":["build", "ninja", "virgin", "clap"]},
{"name": "Valorant", "words":["hipfire", "slow", "spectator", "Operator"]},
{"name": "Terraria", "words":["Terraria", "cheap", "fun", "pewdiepie"]},
{"name": "Microsoft Flight Simulator", "words":["plane", "aviation", "pilot", "graphics"]},
{"name": "Crysis 3", "words":["Block", "redstone", "blockhit", "endcrystal"]},
{"name": "League of Legends", "words":["virgin", "discordmod", "glasses", "asian"]},
]
def getEnemyList():
return [
{"name":"Acnologia",
"health":5000,
"strength":800,
"defense":400,
"intelligence":1000,
"mana":1000,
"image":"https://static.wikia.nocookie.net/vsbattles/images/7/71/New_Human_Acnologia_Render.png/revision/latest/scale-to-width-down/400?cb=20200704092623",
"size":((160, 199)),
"paste":((468,125)),
"abilities":{"Fire Ball":1,"Absorb":1,"vaccine":1}
}
]
def getClassDict():
return [
{"class":"warrior",
"desc":"Warrior class. Great all around class.",
"stats":{"strength":50, "defense":50, "intelligence":30, "sense":20, "health":100, "CurrentHealth":100},
"ability":"Rage",
"abilitydesc":"Increase attack damage by 50%"},
{"class":"assassin",
"desc":"Assassin class. deadly damage output, low defense.",
"stats":{"strength":110, "defense":15, "intelligence":30, "sense":50, "health":80, "CurrentHealth":100},
"ability":"stealth",
"abilitydesc":"Become invisible! All attacks will deal full damage, ignoring opponents' defense stat."},
{"class":"Mage",
"desc":"Mage class. Uses movie science",
"stats":{"strength":40, "defense":30, "intelligence":100, "sense":60, "health":100, "CurrentHealth":100},
"ability":"Fire ball",
"abilitydesc":"Send a fire ball at your enemies!"},
{"class":"Healer",
"desc":"Healer class. Can heal. A lot.",
"stats":{"strength":40, "defense":50, "intelligence":80, "sense":30, "health":150, "CurrentHealth":150},
"ability":"Heal!",
"abilitydesc":"50% HP boost!"}
]
def getEffectDict():
return [
{"name":"Bleed","type":"Physical", "category":["health"], "AffectsSender":False, "value":95, "length":4, "ValSet":False},
{"name":"Defenseless","type":"Physical", "category":["defense"], "AffectsSender":False, "value":10, "length":3, "ValSet":True},
{"name":"Regeneration","type":"Physical", "category":["health"], "AffectsSender":False, "value":115, "length":4, "ValSet":True},
{"name":"Amaterasu","type":"Magic", "category":["health"], "AffectsSender":False, "value":80, "length":1000, "ValSet":False},
{"name":"Susanoo","type":"Magic", "category":["defense"], "AffectsSender":True, "value":1000, "length":1000, "ValSet":False},
]
def getBackgroundList():
return [
{"name":"house.jpg", "paste":(378,167), "size":(377,467)},
{"name":"nightsky.jpg", "paste":(60,82), "size":(195,279)},
{"name":"macd.jpg", "paste":(72,6), "size":(204,310)},
{"name":"OliveGarden.jpg", "paste":(133,155), "size":(203,310)},
{"name":"redlobster.jpg", "paste":(213,77), "size":(191,254)}
]
def getRestaurants():
return [
{
"name":"olive garden",
"menu":{
"Chicken and Shrimp Carbonara":27,
"Chicken Parmigiana":23,
"Shrimp Alfredo":26,
"Chicken Marsala":24,
"Cheese Ravioli":19,
"Herb Grilled Salmon":29,
"6oz sirloin":25
},
"background":"OliveGarden.jpg",
"img":"image",
"waiter":"OliveGardenWaiter.jpg"
},
{
"name":"Red Lobster",
"menu":{
"wild caught flounder":10,
"Shrimp Linguini Alfredo":11,
"Lobster pizza":11,
"clam chowder":5,
"classic caesar salad":10
},
"background":"redlobster.jpg",
"img":"image",
"waiter":"RedLobsterWaiter.jpg"
},
{
"name":"mcdonalds",
"menu":{
"bigmac":6,
"QuarterPounder":3,
"Bacon Clubhouse Burger":4,
"fillet-o-fish":3,
"happy meal":4
},
"background":"macd.jpg",
"img":"image",
"waiter":"McdonaldsWaiter.jpg"
}
]
def getDateTalkMap():
return [
{"typename":"Tsundere", "invite":["{author}! Im hungry! lets go eat!"], "react":["hmm, lets eat at {restaurant}!"], "whattoeat":["hmm, I'll order the {order}!, what will you have, {author}?"]},
{"typename":"Dandere", "invite":["{author}! Im hungry! lets go eat!"], "react":["hmm, lets eat at {restaurant}!"], "whattoeat":["hmm, I'll order the {order}!, what will you have, {author}?"]},
{"typename":"Kuudere", "invite":["{author}! Im hungry! lets go eat!"], "react":["hmm, lets eat at {restaurant}!"], "whattoeat":["hmm, I'll order the {order}!, what will you have, {author}?"]},
{"typename":"Sadodere", "invite":["{author}! Im hungry! lets go eat!"], "react":["hmm, lets eat at {restaurant}!"], "whattoeat":["hmm, I'll order the {order}!, what will you have, {author}?"]},
{"typename":"Kamidere", "invite":["{author}! Im hungry! lets go eat!"], "react":["hmm, lets eat at {restaurant}!"], "whattoeat":["hmm, I'll order the {order}!, what will you have, {author}?"]},
{"typename":"Sweet", "invite":["{author}! Im hungry! lets go eat!"], "react":["hmm, lets eat at {restaurant}!"], "whattoeat":["hmm, I'll order the {order}!, what will you have, darling?"]},
{"typename":"Yandere", "invite":["{author}! Im hungry! lets go eat!"], "react":["hmm, lets eat at {restaurant}!"], "whattoeat":["hmm, I'll order the {order}!, what will you have, {author}?"]},
]
def getTalkMap():
return [
[
{"typename":"Sweet", "action":"none","img":"image", "response":"see you, {0}! I love you!", "background":"house.jpg"},
{"map":["sad", "scared", "happy", "angry", "horny"], "action":"Im not feeling that way","img":"image", "response":["Im sorry, how are you feeling right now?"], "background":"house.jpg"},
{"map":["accept invitation"], "action":"horny","img":"embarrassed", "response":["ohh, thats what you were feeling. Thats ok, I can help you out with that ;)"], "background":"house.jpg"},
{"map":["leave","invite her to go do something"], "action":"end","img":"image", "response":["see you, {0}! I love you!"], "background":"house.jpg"},
{"map":["No, Im fine", "Im not feeling that way"], "action":"sad","img":"image", "response":["It seems like you are sad. is that right? Thats too bad! is there anything I can do?"], "background":"house.jpg"},
{"map":["lie on lap","dont lie on lap"], "action":"No, Im fine","img":"image", "response":["I cant give much, but I will support you will all Ive got! come here! *motions to rest on lap*"], "background":"house.jpg"},
{"map":["leave","lie on lap"], "action":"dont lie on lap","img":"image", "response":["come on, just for a little while? come here! *motions to rest on lap*"], "background":"house.jpg"},
{"map":["end"], "action":"lie on lap","img":"image","response": ["Hey. I know you can do it. I love you so much. That will never change. \n*You are my sunshine, My only sunshine\n You make me happy when skies are gray!\n You'll never know, dear, how much I love you!\n please dont take my sunshine away!*\n Did you like my voice? I hope so! *smooch*"], "background":"house.jpg"},
{"map":["hug","kiss","Im really thankful for you!", "Im not feeling that way"], "action":"happy","img":"image", "response":["Thats amazing! im so happy for you!"], "background":"house.jpg"},
{"map":["hug", "kiss", "end"], "action":"Im really thankful for you!","img":"image", "response":["aww, I love you so much! of course Id support you!"], "background":"house.jpg"},
{"map":["end","kiss","invite her to go do something"], "action":"hug","img":"image", "response":["hmmm? You want a hug? of course!! *sqeezes*"], "background":"house.jpg"},
{"map":["end","invite her to go do something"], "action":"kiss","img":"image", "response":["*mwah* I'll see you around! I love you!"], "background":"house.jpg"},
{"map":["kiss"], "action":"Im really thankful for you!","img":"image", "response":["hey. I care about you!! Its only normal.."], "background":"house.jpg"},
{"map":["walk away","lie on lap", "Im not feeling that way"], "action":"angry","img":"image", "response":["Hey. Im not sure if you are in the mood, you seem mad, or annoyed, but wanna rest on my lap?"], "background":"house.jpg"},
{"map":["go with her", "dont follow"], "action":"walk away","img":"image", "response":["Hey I know just the thing! Follow me!"], "background":"house.jpg"},
{"map":["end"], "action":"dont follow","img":"image", "response":["all right. I understand, Ill give you some time. If you wanna talk to me about anything, Im always available to you!"], "background":"house.jpg"},
{"map":["leave","look at stars"], "action":"go with her","img":"image", "response":["this is the night sky! It looks nice, right? You can relax here. I find it nice gazing at the stars"], "background":"nightsky.jpg"},
{"map":["end"], "action":"leave","img":"image", "response":["Im always ready to talk if you need me. I love you! bye!"], "background":"house.jpg"},
{"map":["end"], "action":"look at stars","img":"image", "response":["Its nice right? Ill leave you be for now."], "background":"nightsky.jpg"},
{"map":["end", "Im not feeling that way"], "action":"scared","img":"image", "response":["It sounds like you are scared. I know you are strong. You are also smart! if you cant handle it on your own, find someone to help you! You shouldnt always try to do things on your own!"], "background":"house.jpg"},
{"map":["comfortgf"], "action":"sadgf","img":"sad", "response":["{0}, im feeling really sad! I dont like this! do something!"], "background":"house.jpg"},
{"map":["gaming", "movies", "netflix", "horny"], "action":"invite her to go do something","img":"angry", "response":["What do you want to do together? Im... im open to anything!"], "background":"house.jpg"},
{"map":["accept invitation"], "action":"comfortgf1","img":"embarrassed", "response":["{0}! I.. I wanna f***. Im feeling horny af rn."], "background":"house.jpg"},
{"map":["gaming"], "action":"comfortgf2","img":"image", "response":["I.. I wanna game."], "background":"house.jpg"},
{"map":["netflix"], "action":"comfortgf3","img":"image", "response":["I.. I wanna watch netflix. "], "background":"house.jpg"},
{"map":["movies"], "action":"comfortgf4","img":"image", "response":["I.. I wanna watch a movie."], "background":"house.jpg"},
{"map":["comfortgf1", "comfortgf2", "comfortgf3","comfortgf4"], "action":"comfortgf","img":"angry", "response":["Hmm, I think I know what will cheer me up!"], "background":"house.jpg"},
{"map":["great", "Im not feeling that way"], "action":"happygf","img":"image", "response":["Hey! How are you doing?"], "background":"house.jpg"},
{"map":["yeah sure what?"], "action":"great","img":"embarrassed", "response":["I wanna go do something with you..."], "background":"house.jpg"},
{"map":["gaming"], "action":"gaming","img":"image", "response":["{0}! Lets play a game! I havnt played with you in forever!!"], "background":"house.jpg"},
{"map":["movies"], "action":"movies","img":"image", "response":["{0}! Lets watch a movie!"], "background":"house.jpg"},
{"map":["netflix"], "action":"netflix","img":"image", "response":["{0}! Lets watch netflix"], "background":"house.jpg"},
{"map":["kiss","hug"], "action":"attentionwant","img":"embarrassed", "response":["I want attention!"], "background":"house.jpg"},
{"map":["end", "Im not feeling that way"], "action":"scaredgf","img":"angry", "response":["Im not sure how im gonna pay the rent."], "background":"house.jpg"},
{"map":["end", "Im not feeling that way"], "action":"angrygf","img":"angry", "response":["Im not having a good day. I just wanna go to sleep."], "background":"house.jpg"},
],
[
{"typename":"Tsundere", "action":"none","img":"image", "response":"see you, {0}! I love you!", "background":"house.jpg"},
{"map":["sad", "scared", "happy", "angry", "horny"], "action":"Im not feeling that way","img":"image", "response":["Im sorry, how are you feeling right now?"], "background":"house.jpg"},
{"map":["accept invitation"], "action":"horny","img":"embarrassed", "response":["ohh, thats what you were feeling. Thats ok, I can help you out with that ;)"], "background":"house.jpg"},
{"map":["leave","invite her to go do something"], "action":"end","img":"image", "response":["see you, {0}! I love you!"], "background":"house.jpg"},
{"map":["No, Im fine", "Im not feeling that way"], "action":"sad","img":"image", "response":["It seems like you are sad. is that right? Thats too bad! is there anything I can do?"], "background":"house.jpg"},
{"map":["lie on lap","dont lie on lap"], "action":"No, Im fine","img":"image", "response":["I cant give much, but I will support you will all Ive got! come here! *motions to rest on lap*"], "background":"house.jpg"},
{"map":["leave","lie on lap"], "action":"dont lie on lap","img":"image", "response":["come on, just for a little while? come here! *motions to rest on lap*"], "background":"house.jpg"},
{"map":["end"], "action":"lie on lap","img":"image","response": ["Hey. I know you can do it. I love you so much. That will never change. \n*You are my sunshine, My only sunshine\n You make me happy when skies are gray!\n You'll never know, dear, how much I love you!\n please dont take my sunshine away!*\n Did you like my voice? I hope so! *smooch*"], "background":"house.jpg"},
{"map":["hug","kiss","Im really thankful for you!", "Im not feeling that way"], "action":"happy","img":"image", "response":["Thats amazing! im so happy for you!"], "background":"house.jpg"},
{"map":["hug", "kiss", "end"], "action":"Im really thankful for you!","img":"image", "response":["aww, I love you so much! of course Id support you!"], "background":"house.jpg"},
{"map":["end","kiss","invite her to go do something"], "action":"hug","img":"image", "response":["hmmm? You want a hug? of course!! *sqeezes*"], "background":"house.jpg"},
{"map":["end","invite her to go do something"], "action":"kiss","img":"image", "response":["*mwah* I'll see you around! I love you!"], "background":"house.jpg"},
{"map":["kiss"], "action":"Im really thankful for you!","img":"image", "response":["hey. I care about you!! Its only normal.."], "background":"house.jpg"},
{"map":["walk away","lie on lap", "Im not feeling that way"], "action":"angry","img":"image", "response":["Hey. Im not sure if you are in the mood, you seem mad, or annoyed, but wanna rest on my lap?"], "background":"house.jpg"},
{"map":["go with her", "dont follow"], "action":"walk away","img":"image", "response":["Hey I know just the thing! Follow me!"], "background":"house.jpg"},
{"map":["end"], "action":"dont follow","img":"image", "response":["all right. I understand, Ill give you some time. If you wanna talk to me about anything, Im always available to you!"], "background":"house.jpg"},
{"map":["leave","look at stars"], "action":"go with her","img":"image", "response":["this is the night sky! It looks nice, right? You can relax here. I find it nice gazing at the stars"], "background":"nightsky.jpg"},
{"map":["end"], "action":"leave","img":"image", "response":["Im always ready to talk if you need me. I love you! bye!"], "background":"house.jpg"},
{"map":["end"], "action":"look at stars","img":"image", "response":["Its nice right? Ill leave you be for now."], "background":"nightsky.jpg"},
{"map":["end", "Im not feeling that way"], "action":"scared","img":"image", "response":["It sounds like you are scared. I know you are strong. You are also smart! if you cant handle it on your own, find someone to help you! You shouldnt always try to do things on your own!"], "background":"house.jpg"},
{"map":["comfortgf"], "action":"sadgf","img":"sad", "response":["{0}, im feeling really sad! I dont like this! do something!"], "background":"house.jpg"},
{"map":["gaming", "movies", "netflix", "horny"], "action":"invite her to go do something","img":"angry", "response":["What do you want to do together? Im... im open to anything!"], "background":"house.jpg"},
{"map":["accept invitation"], "action":"comfortgf1","img":"embarrassed", "response":["{0}! I.. I wanna f***. Im feeling horny af rn."], "background":"house.jpg"},
{"map":["gaming"], "action":"comfortgf2","img":"image", "response":["I.. I wanna game."], "background":"house.jpg"},
{"map":["netflix"], "action":"comfortgf3","img":"image", "response":["I.. I wanna watch netflix. "], "background":"house.jpg"},
{"map":["movies"], "action":"comfortgf4","img":"image", "response":["I.. I wanna watch a movie."], "background":"house.jpg"},
{"map":["comfortgf1", "comfortgf2", "comfortgf3","comfortgf4"], "action":"comfortgf","img":"angry", "response":["Hmm, I think I know what will cheer me up!"], "background":"house.jpg"},
{"map":["great", "Im not feeling that way"], "action":"happygf","img":"image", "response":["Hey! How are you doing?"], "background":"house.jpg"},
{"map":["yeah sure what?"], "action":"great","img":"embarrassed", "response":["I wanna go do something with you..."], "background":"house.jpg"},
{"map":["gaming"], "action":"gaming","img":"image", "response":["{0}! Lets play a game! I havnt played with you in forever!!"], "background":"house.jpg"},
{"map":["movies"], "action":"movies","img":"image", "response":["{0}! Lets watch a movie!"], "background":"house.jpg"},
{"map":["netflix"], "action":"netflix","img":"image", "response":["{0}! Lets watch netflix"], "background":"house.jpg"},
{"map":["kiss","hug"], "action":"attentionwant","img":"embarrassed", "response":["I want attention!"], "background":"house.jpg"},
{"map":["end", "Im not feeling that way"], "action":"scaredgf","img":"angry", "response":["Im not sure how im gonna pay the rent."], "background":"house.jpg"},
{"map":["end", "Im not feeling that way"], "action":"angrygf","img":"angry", "response":["Im not having a good day. I just wanna go to sleep."], "background":"house.jpg"},
],
[
{"typename":"Yandere", "action":"none","img":"image", "response":"see you, {0}! I love you!", "background":"house.jpg"},
{"map":["sad", "scared", "happy", "angry", "horny"], "action":"Im not feeling that way","img":"image", "response":["Im sorry, how are you feeling right now?"], "background":"house.jpg"},
{"map":["accept invitation"], "action":"horny","img":"embarrassed", "response":["ohh, thats what you were feeling. Thats ok, I can help you out with that ;)"], "background":"house.jpg"},
{"map":["leave","invite her to go do something"], "action":"end","img":"image", "response":["see you, {0}! I love you!"], "background":"house.jpg"},
{"map":["No, Im fine", "Im not feeling that way"], "action":"sad","img":"image", "response":["It seems like you are sad. is that right? Thats too bad! is there anything I can do?"], "background":"house.jpg"},
{"map":["lie on lap","dont lie on lap"], "action":"No, Im fine","img":"image", "response":["I cant give much, but I will support you will all Ive got! come here! *motions to rest on lap*"], "background":"house.jpg"},
{"map":["leave","lie on lap"], "action":"dont lie on lap","img":"image", "response":["come on, just for a little while? come here! *motions to rest on lap*"], "background":"house.jpg"},
{"map":["end"], "action":"lie on lap","img":"image","response": ["Hey. I know you can do it. I love you so much. That will never change. \n*You are my sunshine, My only sunshine\n You make me happy when skies are gray!\n You'll never know, dear, how much I love you!\n please dont take my sunshine away!*\n Did you like my voice? I hope so! *smooch*"], "background":"house.jpg"},
{"map":["hug","kiss","Im really thankful for you!", "Im not feeling that way"], "action":"happy","img":"image", "response":["Thats amazing! im so happy for you!"], "background":"house.jpg"},
{"map":["hug", "kiss", "end"], "action":"Im really thankful for you!","img":"image", "response":["aww, I love you so much! of course Id support you!"], "background":"house.jpg"},
{"map":["end","kiss","invite her to go do something"], "action":"hug","img":"image", "response":["hmmm? You want a hug? of course!! *sqeezes*"], "background":"house.jpg"},
{"map":["end","invite her to go do something"], "action":"kiss","img":"image", "response":["*mwah* I'll see you around! I love you!"], "background":"house.jpg"},
{"map":["kiss"], "action":"Im really thankful for you!","img":"image", "response":["hey. I care about you!! Its only normal.."], "background":"house.jpg"},
{"map":["walk away","lie on lap", "Im not feeling that way"], "action":"angry","img":"image", "response":["Hey. Im not sure if you are in the mood, you seem mad, or annoyed, but wanna rest on my lap?"], "background":"house.jpg"},
{"map":["go with her", "dont follow"], "action":"walk away","img":"image", "response":["Hey I know just the thing! Follow me!"], "background":"house.jpg"},
{"map":["end"], "action":"dont follow","img":"image", "response":["all right. I understand, Ill give you some time. If you wanna talk to me about anything, Im always available to you!"], "background":"house.jpg"},
{"map":["leave","look at stars"], "action":"go with her","img":"image", "response":["this is the night sky! It looks nice, right? You can relax here. I find it nice gazing at the stars"], "background":"nightsky.jpg"},
{"map":["end"], "action":"leave","img":"image", "response":["Im always ready to talk if you need me. I love you! bye!"], "background":"house.jpg"},
{"map":["end"], "action":"look at stars","img":"image", "response":["Its nice right? Ill leave you be for now."], "background":"nightsky.jpg"},
{"map":["end", "Im not feeling that way"], "action":"scared","img":"image", "response":["It sounds like you are scared. I know you are strong. You are also smart! if you cant handle it on your own, find someone to help you! You shouldnt always try to do things on your own!"], "background":"house.jpg"},
{"map":["comfortgf"], "action":"sadgf","img":"sad", "response":["{0}, im feeling really sad! I dont like this! do something!"], "background":"house.jpg"},
{"map":["gaming", "movies", "netflix", "horny"], "action":"invite her to go do something","img":"angry", "response":["What do you want to do together? Im... im open to anything!"], "background":"house.jpg"},
{"map":["accept invitation"], "action":"comfortgf1","img":"embarrassed", "response":["{0}! I.. I wanna f***. Im feeling horny af rn."], "background":"house.jpg"},
{"map":["gaming"], "action":"comfortgf2","img":"image", "response":["I.. I wanna game."], "background":"house.jpg"},
{"map":["netflix"], "action":"comfortgf3","img":"image", "response":["I.. I wanna watch netflix. "], "background":"house.jpg"},
{"map":["movies"], "action":"comfortgf4","img":"image", "response":["I.. I wanna watch a movie."], "background":"house.jpg"},
{"map":["comfortgf1", "comfortgf2", "comfortgf3","comfortgf4"], "action":"comfortgf","img":"angry", "response":["Hmm, I think I know what will cheer me up!"], "background":"house.jpg"},
{"map":["great", "Im not feeling that way"], "action":"happygf","img":"image", "response":["Hey! How are you doing?"], "background":"house.jpg"},
{"map":["yeah sure what?"], "action":"great","img":"embarrassed", "response":["I wanna go do something with you..."], "background":"house.jpg"},
{"map":["gaming"], "action":"gaming","img":"image", "response":["{0}! Lets play a game! I havnt played with you in forever!!"], "background":"house.jpg"},
{"map":["movies"], "action":"movies","img":"image", "response":["{0}! Lets watch a movie!"], "background":"house.jpg"},
{"map":["netflix"], "action":"netflix","img":"image", "response":["{0}! Lets watch netflix"], "background":"house.jpg"},
{"map":["kiss","hug"], "action":"attentionwant","img":"embarrassed", "response":["I want attention!"], "background":"house.jpg"},
{"map":["end", "Im not feeling that way"], "action":"scaredgf","img":"angry", "response":["Im not sure how im gonna pay the rent."], "background":"house.jpg"},
{"map":["end", "Im not feeling that way"], "action":"angrygf","img":"angry", "response":["Im not having a good day. I just wanna go to sleep."], "background":"house.jpg"},
],
[
{"typename":"Dandere", "action":"none","img":"image", "response":["see you, {0}! I love you!"], "background":"house.jpg"},
{"map":["sad", "scared", "happy", "angry", "horny"], "action":"Im not feeling that way","img":"image", "response":["Im sorry, how are you feeling right now?"], "background":"house.jpg"},
{"map":["accept invitation"], "action":"horny","img":"embarrassed", "response":["ohh, thats what you were feeling. Thats ok, I can help you out with that ;)"], "background":"house.jpg"},
{"map":["leave","invite her to go do something"], "action":"end","img":"image", "response":["see you, {0}! I love you!"], "background":"house.jpg"},
{"map":["No, Im fine", "Im not feeling that way"], "action":"sad","img":"image", "response":["It seems like you are sad. is that right? Thats too bad! is there anything I can do?"], "background":"house.jpg"},
{"map":["lie on lap","dont lie on lap"], "action":"No, Im fine","img":"image", "response":["I cant give much, but I will support you will all Ive got! come here! *motions to rest on lap*"], "background":"house.jpg"},
{"map":["leave","lie on lap"], "action":"dont lie on lap","img":"image", "response":["come on, just for a little while? come here! *motions to rest on lap*"], "background":"house.jpg"},
{"map":["end"], "action":"lie on lap","img":"image","response": ["Hey. I know you can do it. I love you so much. That will never change. \n*You are my sunshine, My only sunshine\n You make me happy when skies are gray!\n You'll never know, dear, how much I love you!\n please dont take my sunshine away!*\n Did you like my voice? I hope so! *smooch*"], "background":"house.jpg"},
{"map":["hug","kiss","Im really thankful for you!", "Im not feeling that way"], "action":"happy","img":"image", "response":["Thats amazing! im so happy for you!"], "background":"house.jpg"},
{"map":["hug", "kiss", "end"], "action":"Im really thankful for you!","img":"image", "response":["aww, I love you so much! of course Id support you!"], "background":"house.jpg"},
{"map":["end","kiss","invite her to go do something"], "action":"hug","img":"image", "response":["hmmm? You want a hug? of course!! *sqeezes*"], "background":"house.jpg"},
{"map":["end","invite her to go do something"], "action":"kiss","img":"image", "response":["*mwah* I'll see you around! I love you!"], "background":"house.jpg"},
{"map":["kiss"], "action":"Im really thankful for you!","img":"image", "response":["hey. I care about you!! Its only normal.."], "background":"house.jpg"},
{"map":["walk away","lie on lap", "Im not feeling that way"], "action":"angry","img":"image", "response":["Hey. Im not sure if you are in the mood, you seem mad, or annoyed, but wanna rest on my lap?"], "background":"house.jpg"},
{"map":["go with her", "dont follow"], "action":"walk away","img":"image", "response":["Hey I know just the thing! Follow me!"], "background":"house.jpg"},
{"map":["end"], "action":"dont follow","img":"image", "response":["all right. I understand, Ill give you some time. If you wanna talk to me about anything, Im always available to you!"], "background":"house.jpg"},
{"map":["leave","look at stars"], "action":"go with her","img":"image", "response":["this is the night sky! It looks nice, right? You can relax here. I find it nice gazing at the stars"], "background":"nightsky.jpg"},
{"map":["end"], "action":"leave","img":"image", "response":["Im always ready to talk if you need me. I love you! bye!"], "background":"house.jpg"},
{"map":["end"], "action":"look at stars","img":"image", "response":["Its nice right? Ill leave you be for now."], "background":"nightsky.jpg"},
{"map":["end", "Im not feeling that way"], "action":"scared","img":"image", "response":["It sounds like you are scared. I know you are strong. You are also smart! if you cant handle it on your own, find someone to help you! You shouldnt always try to do things on your own!"], "background":"house.jpg"},
{"map":["comfortgf"], "action":"sadgf","img":"sad", "response":["{0}, im feeling really sad! I dont like this! do something!"], "background":"house.jpg"},
{"map":["gaming", "movies", "netflix", "horny"], "action":"invite her to go do something","img":"angry", "response":["What do you want to do together? Im... im open to anything!"], "background":"house.jpg"},
{"map":["accept invitation"], "action":"comfortgf1","img":"embarrassed", "response":["{0}! I.. I wanna f***. Im feeling horny af rn."], "background":"house.jpg"},
{"map":["gaming"], "action":"comfortgf2","img":"image", "response":["I.. I wanna game."], "background":"house.jpg"},
{"map":["netflix"], "action":"comfortgf3","img":"image", "response":["I.. I wanna watch netflix. "], "background":"house.jpg"},
{"map":["movies"], "action":"comfortgf4","img":"image", "response":["I.. I wanna watch a movie."], "background":"house.jpg"},
{"map":["comfortgf1", "comfortgf2", "comfortgf3","comfortgf4"], "action":"comfortgf","img":"angry", "response":["Hmm, I think I know what will cheer me up!"], "background":"house.jpg"},
{"map":["great", "Im not feeling that way"], "action":"happygf","img":"image", "response":["Hey! How are you doing?"], "background":"house.jpg"},
{"map":["yeah sure what?"], "action":"great","img":"embarrassed", "response":["I wanna go do something with you..."], "background":"house.jpg"},
{"map":["gaming"], "action":"gaming","img":"image", "response":["{0}! Lets play a game! I havnt played with you in forever!!"], "background":"house.jpg"},
{"map":["movies"], "action":"movies","img":"image", "response":["{0}! Lets watch a movie!"], "background":"house.jpg"},
{"map":["netflix"], "action":"netflix","img":"image", "response":["{0}! Lets watch netflix"], "background":"house.jpg"},
{"map":["kiss","hug"], "action":"attentionwant","img":"embarrassed", "response":["I want attention!"], "background":"house.jpg"},
{"map":["end", "Im not feeling that way"], "action":"scaredgf","img":"angry", "response":["Im not sure how im gonna pay the rent."], "background":"house.jpg"},
{"map":["end", "Im not feeling that way"], "action":"angrygf","img":"angry", "response":["Im not having a good day. I just wanna go to sleep."], "background":"house.jpg"},
],
[
{"typename":"Sadodere", "action":"none","img":"image", "response":["see you, {0}! I love you!"], "background":"house.jpg"},
{"map":["sad", "scared", "happy", "angry", "horny"], "action":"Im not feeling that way","img":"image", "response":["Im sorry, how are you feeling right now?"], "background":"house.jpg"},
{"map":["accept invitation"], "action":"horny","img":"embarrassed", "response":["ohh, thats what you were feeling. Thats ok, I can help you out with that ;)"], "background":"house.jpg"},
{"map":["leave","invite her to go do something"], "action":"end","img":"image", "response":["see you, {0}! I love you!"], "background":"house.jpg"},
{"map":["No, Im fine", "Im not feeling that way"], "action":"sad","img":"image", "response":["It seems like you are sad. is that right? Thats too bad! is there anything I can do?"], "background":"house.jpg"},
{"map":["lie on lap","dont lie on lap"], "action":"No, Im fine","img":"image", "response":["I cant give much, but I will support you will all Ive got! come here! *motions to rest on lap*"], "background":"house.jpg"},
{"map":["leave","lie on lap"], "action":"dont lie on lap","img":"image", "response":["come on, just for a little while? come here! *motions to rest on lap*"], "background":"house.jpg"},
{"map":["end"], "action":"lie on lap","img":"image","response": ["Hey. I know you can do it. I love you so much. That will never change. \n*You are my sunshine, My only sunshine\n You make me happy when skies are gray!\n You'll never know, dear, how much I love you!\n please dont take my sunshine away!*\n Did you like my voice? I hope so! *smooch*"], "background":"house.jpg"},
{"map":["hug","kiss","Im really thankful for you!", "Im not feeling that way"], "action":"happy","img":"image", "response":["Thats amazing! im so happy for you!"], "background":"house.jpg"},
{"map":["hug", "kiss", "end"], "action":"Im really thankful for you!","img":"image", "response":["aww, I love you so much! of course Id support you!"], "background":"house.jpg"},
{"map":["end","kiss","invite her to go do something"], "action":"hug","img":"image", "response":["hmmm? You want a hug? of course!! *sqeezes*"], "background":"house.jpg"},
{"map":["end","invite her to go do something"], "action":"kiss","img":"image", "response":["*mwah* I'll see you around! I love you!"], "background":"house.jpg"},
{"map":["kiss"], "action":"Im really thankful for you!","img":"image", "response":["hey. I care about you!! Its only normal.."], "background":"house.jpg"},
{"map":["walk away","lie on lap", "Im not feeling that way"], "action":"angry","img":"image", "response":["Hey. Im not sure if you are in the mood, you seem mad, or annoyed, but wanna rest on my lap?"], "background":"house.jpg"},
{"map":["go with her", "dont follow"], "action":"walk away","img":"image", "response":["Hey I know just the thing! Follow me!"], "background":"house.jpg"},
{"map":["end"], "action":"dont follow","img":"image", "response":["all right. I understand, Ill give you some time. If you wanna talk to me about anything, Im always available to you!"], "background":"house.jpg"},
{"map":["leave","look at stars"], "action":"go with her","img":"image", "response":["this is the night sky! It looks nice, right? You can relax here. I find it nice gazing at the stars"], "background":"nightsky.jpg"},
{"map":["end"], "action":"leave","img":"image", "response":["Im always ready to talk if you need me. I love you! bye!"], "background":"house.jpg"},
{"map":["end"], "action":"look at stars","img":"image", "response":["Its nice right? Ill leave you be for now."], "background":"nightsky.jpg"},
{"map":["end", "Im not feeling that way"], "action":"scared","img":"image", "response":["It sounds like you are scared. I know you are strong. You are also smart! if you cant handle it on your own, find someone to help you! You shouldnt always try to do things on your own!"], "background":"house.jpg"},
{"map":["comfortgf"], "action":"sadgf","img":"sad", "response":["{0}, im feeling really sad! I dont like this! do something!"], "background":"house.jpg"},
{"map":["gaming", "movies", "netflix", "horny"], "action":"invite her to go do something","img":"angry", "response":["What do you want to do together? Im... im open to anything!"], "background":"house.jpg"},
{"map":["accept invitation"], "action":"comfortgf1","img":"embarrassed", "response":["{0}! I.. I wanna f***. Im feeling horny af rn."], "background":"house.jpg"},
{"map":["gaming"], "action":"comfortgf2","img":"image", "response":["I.. I wanna game."], "background":"house.jpg"},
{"map":["netflix"], "action":"comfortgf3","img":"image", "response":["I.. I wanna watch netflix. "], "background":"house.jpg"},
{"map":["movies"], "action":"comfortgf4","img":"image", "response":["I.. I wanna watch a movie."], "background":"house.jpg"},
{"map":["comfortgf1", "comfortgf2", "comfortgf3","comfortgf4"], "action":"comfortgf","img":"angry", "response":["Hmm, I think I know what will cheer me up!"], "background":"house.jpg"},
{"map":["great", "Im not feeling that way"], "action":"happygf","img":"image", "response":["Hey! How are you doing?"], "background":"house.jpg"},
{"map":["yeah sure what?"], "action":"great","img":"embarrassed", "response":["I wanna go do something with you..."], "background":"house.jpg"},
{"map":["gaming"], "action":"gaming","img":"image", "response":["{0}! Lets play a game! I havnt played with you in forever!!"], "background":"house.jpg"},
{"map":["movies"], "action":"movies","img":"image", "response":["{0}! Lets watch a movie!"], "background":"house.jpg"},
{"map":["netflix"], "action":"netflix","img":"image", "response":["{0}! Lets watch netflix"], "background":"house.jpg"},
{"map":["kiss","hug"], "action":"attentionwant","img":"embarrassed", "response":["I want attention!"], "background":"house.jpg"},
{"map":["end", "Im not feeling that way"], "action":"scaredgf","img":"angry", "response":["Im not sure how im gonna pay the rent."], "background":"house.jpg"},
{"map":["end", "Im not feeling that way"], "action":"angrygf","img":"angry", "response":["Im not having a good day. I just wanna go to sleep."], "background":"house.jpg"},
],
[
{"typename":"Kuudere", "action":"none","img":"image", "response":["see you, {0}! I love you!"], "background":"house.jpg"},
{"map":["sad", "scared", "happy", "angry", "horny"], "action":"Im not feeling that way","img":"image", "response":["Im sorry, how are you feeling right now?"], "background":"house.jpg"},
{"map":["accept invitation"], "action":"horny","img":"embarrassed", "response":["ohh, thats what you were feeling. Thats ok, I can help you out with that ;)"], "background":"house.jpg"},
{"map":["leave","invite her to go do something"], "action":"end","img":"image", "response":["see you, {0}! I love you!"], "background":"house.jpg"},
{"map":["No, Im fine", "Im not feeling that way"], "action":"sad","img":"image", "response":["It seems like you are sad. is that right? Thats too bad! is there anything I can do?"], "background":"house.jpg"},
{"map":["lie on lap","dont lie on lap"], "action":"No, Im fine","img":"image", "response":["I cant give much, but I will support you will all Ive got! come here! *motions to rest on lap*"], "background":"house.jpg"},
{"map":["leave","lie on lap"], "action":"dont lie on lap","img":"image", "response":["come on, just for a little while? come here! *motions to rest on lap*"], "background":"house.jpg"},
{"map":["end"], "action":"lie on lap","img":"image","response": ["Hey. I know you can do it. I love you so much. That will never change. \n*You are my sunshine, My only sunshine\n You make me happy when skies are gray!\n You'll never know, dear, how much I love you!\n please dont take my sunshine away!*\n Did you like my voice? I hope so! *smooch*"], "background":"house.jpg"},
{"map":["hug","kiss","Im really thankful for you!", "Im not feeling that way"], "action":"happy","img":"image", "response":["Thats amazing! im so happy for you!"], "background":"house.jpg"},
{"map":["hug", "kiss", "end"], "action":"Im really thankful for you!","img":"image", "response":["aww, I love you so much! of course Id support you!"], "background":"house.jpg"},
{"map":["end","kiss","invite her to go do something"], "action":"hug","img":"image", "response":["hmmm? You want a hug? of course!! *sqeezes*"], "background":"house.jpg"},
{"map":["end","invite her to go do something"], "action":"kiss","img":"image", "response":["*mwah* I'll see you around! I love you!"], "background":"house.jpg"},
{"map":["kiss"], "action":"Im really thankful for you!","img":"image", "response":["hey. I care about you!! Its only normal.."], "background":"house.jpg"},
{"map":["walk away","lie on lap", "Im not feeling that way"], "action":"angry","img":"image", "response":["Hey. Im not sure if you are in the mood, you seem mad, or annoyed, but wanna rest on my lap?"], "background":"house.jpg"},
{"map":["go with her", "dont follow"], "action":"walk away","img":"image", "response":["Hey I know just the thing! Follow me!"], "background":"house.jpg"},
{"map":["end"], "action":"dont follow","img":"image", "response":["all right. I understand, Ill give you some time. If you wanna talk to me about anything, Im always available to you!"], "background":"house.jpg"},
{"map":["leave","look at stars"], "action":"go with her","img":"image", "response":["this is the night sky! It looks nice, right? You can relax here. I find it nice gazing at the stars"], "background":"nightsky.jpg"},
{"map":["end"], "action":"leave","img":"image", "response":["Im always ready to talk if you need me. I love you! bye!"], "background":"house.jpg"},
{"map":["end"], "action":"look at stars","img":"image", "response":["Its nice right? Ill leave you be for now."], "background":"nightsky.jpg"},
{"map":["end", "Im not feeling that way"], "action":"scared","img":"image", "response":["It sounds like you are scared. I know you are strong. You are also smart! if you cant handle it on your own, find someone to help you! You shouldnt always try to do things on your own!"], "background":"house.jpg"},
{"map":["comfortgf"], "action":"sadgf","img":"sad", "response":["{0}, im feeling really sad! I dont like this! do something!"], "background":"house.jpg"},
{"map":["gaming", "movies", "netflix", "horny"], "action":"invite her to go do something","img":"angry", "response":["What do you want to do together? Im... im open to anything!"], "background":"house.jpg"},
{"map":["accept invitation"], "action":"comfortgf1","img":"embarrassed", "response":["{0}! I.. I wanna f***. Im feeling horny af rn."], "background":"house.jpg"},
{"map":["gaming"], "action":"comfortgf2","img":"image", "response":["I.. I wanna game."], "background":"house.jpg"},
{"map":["netflix"], "action":"comfortgf3","img":"image", "response":["I.. I wanna watch netflix. "], "background":"house.jpg"},
{"map":["movies"], "action":"comfortgf4","img":"image", "response":["I.. I wanna watch a movie."], "background":"house.jpg"},
{"map":["comfortgf1", "comfortgf2", "comfortgf3","comfortgf4"], "action":"comfortgf","img":"angry", "response":["Hmm, I think I know what will cheer me up!"], "background":"house.jpg"},
{"map":["great", "Im not feeling that way"], "action":"happygf","img":"image", "response":["Hey! How are you doing?"], "background":"house.jpg"},
{"map":["yeah sure what?"], "action":"great","img":"embarrassed", "response":["I wanna go do something with you..."], "background":"house.jpg"},
{"map":["gaming"], "action":"gaming","img":"image", "response":["{0}! Lets play a game! I havnt played with you in forever!!"], "background":"house.jpg"},
{"map":["movies"], "action":"movies","img":"image", "response":["{0}! Lets watch a movie!"], "background":"house.jpg"},
{"map":["netflix"], "action":"netflix","img":"image", "response":["{0}! Lets watch netflix"], "background":"house.jpg"},
{"map":["kiss","hug"], "action":"attentionwant","img":"embarrassed", "response":["I want attention!"], "background":"house.jpg"},
{"map":["end", "Im not feeling that way"], "action":"scaredgf","img":"angry", "response":["Im not sure how im gonna pay the rent."], "background":"house.jpg"},
{"map":["end", "Im not feeling that way"], "action":"angrygf","img":"angry", "response":["Im not having a good day. I just wanna go to sleep."], "background":"house.jpg"},
],
[
{"typename":"Kamidere", "action":"none","img":"image", "response":["see you, {0}! I love you!"], "background":"house.jpg"},
{"map":["sad", "scared", "happy", "angry", "horny"], "action":"Im not feeling that way","img":"image", "response":["Im sorry, how are you feeling right now?"], "background":"house.jpg"},
{"map":["accept invitation"], "action":"horny","img":"embarrassed", "response":["ohh, thats what you were feeling. Thats ok, I can help you out with that ;)"], "background":"house.jpg"},
{"map":["leave","invite her to go do something"], "action":"end","img":"image", "response":["see you, {0}! I love you!"], "background":"house.jpg"},
{"map":["No, Im fine", "Im not feeling that way"], "action":"sad","img":"image", "response":["It seems like you are sad. is that right? Thats too bad! is there anything I can do?"], "background":"house.jpg"},
{"map":["lie on lap","dont lie on lap"], "action":"No, Im fine","img":"image", "response":["I cant give much, but I will support you will all Ive got! come here! *motions to rest on lap*"], "background":"house.jpg"},
{"map":["leave","lie on lap"], "action":"dont lie on lap","img":"image", "response":["come on, just for a little while? come here! *motions to rest on lap*"], "background":"house.jpg"},
{"map":["end"], "action":"lie on lap","img":"image","response": ["Hey. I know you can do it. I love you so much. That will never change. \n*You are my sunshine, My only sunshine\n You make me happy when skies are gray!\n You'll never know, dear, how much I love you!\n please dont take my sunshine away!*\n Did you like my voice? I hope so! *smooch*"], "background":"house.jpg"},
{"map":["hug","kiss","Im really thankful for you!", "Im not feeling that way"], "action":"happy","img":"image", "response":["Thats amazing! im so happy for you!"], "background":"house.jpg"},
{"map":["hug", "kiss", "end"], "action":"Im really thankful for you!","img":"image", "response":["aww, I love you so much! of course Id support you!"], "background":"house.jpg"},
{"map":["end","kiss","invite her to go do something"], "action":"hug","img":"image", "response":["hmmm? You want a hug? of course!! *sqeezes*"], "background":"house.jpg"},
{"map":["end","invite her to go do something"], "action":"kiss","img":"image", "response":["*mwah* I'll see you around! I love you!"], "background":"house.jpg"},
{"map":["kiss"], "action":"Im really thankful for you!","img":"image", "response":["hey. I care about you!! Its only normal.."], "background":"house.jpg"},
{"map":["walk away","lie on lap", "Im not feeling that way"], "action":"angry","img":"image", "response":["Hey. Im not sure if you are in the mood, you seem mad, or annoyed, but wanna rest on my lap?"], "background":"house.jpg"},
{"map":["go with her", "dont follow"], "action":"walk away","img":"image", "response":["Hey I know just the thing! Follow me!"], "background":"house.jpg"},
{"map":["end"], "action":"dont follow","img":"image", "response":["all right. I understand, Ill give you some time. If you wanna talk to me about anything, Im always available to you!"], "background":"house.jpg"},
{"map":["leave","look at stars"], "action":"go with her","img":"image", "response":["this is the night sky! It looks nice, right? You can relax here. I find it nice gazing at the stars"], "background":"nightsky.jpg"},
{"map":["end"], "action":"leave","img":"image", "response":["Im always ready to talk if you need me. I love you! bye!"], "background":"house.jpg"},
{"map":["end"], "action":"look at stars","img":"image", "response":["Its nice right? Ill leave you be for now."], "background":"nightsky.jpg"},
{"map":["end", "Im not feeling that way"], "action":"scared","img":"image", "response":["It sounds like you are scared. I know you are strong. You are also smart! if you cant handle it on your own, find someone to help you! You shouldnt always try to do things on your own!"], "background":"house.jpg"},
{"map":["comfortgf"], "action":"sadgf","img":"sad", "response":["{0}, im feeling really sad! I dont like this! do something!"], "background":"house.jpg"},
{"map":["gaming", "movies", "netflix", "horny"], "action":"invite her to go do something","img":"angry", "response":["What do you want to do together? Im... im open to anything!"]},
{"map":["accept invitation"], "action":"comfortgf1","img":"embarrassed", "response":["{0}! I.. I wanna f***. Im feeling horny af rn."], "background":"house.jpg"},
{"map":["gaming"], "action":"comfortgf2","img":"image", "response":["I.. I wanna game."], "background":"house.jpg"},
{"map":["netflix"], "action":"comfortgf3","img":"image", "response":["I.. I wanna watch netflix. "], "background":"house.jpg"},
{"map":["movies"], "action":"comfortgf4","img":"image", "response":["I.. I wanna watch a movie."], "background":"house.jpg"},
{"map":["comfortgf1", "comfortgf2", "comfortgf3","comfortgf4"], "action":"comfortgf","img":"angry", "response":["Hmm, I think I know what will cheer me up!"], "background":"house.jpg"},
{"map":["great", "Im not feeling that way"], "action":"happygf","img":"image", "response":["Hey! How are you doing?"], "background":"house.jpg"},
{"map":["yeah sure what?"], "action":"great","img":"embarrassed", "response":["I wanna go do something with you..."], "background":"house.jpg"},
{"map":["gaming"], "action":"gaming","img":"image", "response":["{0}! Lets play a game! I havnt played with you in forever!!"], "background":"house.jpg"},
{"map":["movies"], "action":"movies","img":"image", "response":["{0}! Lets watch a movie!"], "background":"house.jpg"},
{"map":["netflix"], "action":"netflix","img":"image", "response":["{0}! Lets watch netflix"], "background":"house.jpg"},
{"map":["kiss","hug"], "action":"attentionwant","img":"embarrassed", "response":["I want attention!"], "background":"house.jpg"},
{"map":["end", "Im not feeling that way"], "action":"scaredgf","img":"angry", "response":["Im not sure how im gonna pay the rent."], "background":"house.jpg"},
{"map":["end", "Im not feeling that way"], "action":"angrygf","img":"angry", "response":["Im not having a good day. I just wanna go to sleep."], "background":"house.jpg"},
],
]
def getBoinkResponse():
return [
{"typename":"Tsundere",
"start":["Hello!", "again?"],
"kiss":["..thanks! You are really good!", "aww, I love you too!"],
"pin down":["eh? What are you doing?", "again pin dowN"],
"fondle oppai":["*oh* t. That feels really good!", "again? I dont mind though..."],
"suck oppai":["*ahh* How do you like my boobs?", "You really like my boobs, dont you.."],
"finger vegana":["stop.. im really sensitive there!", "I think I might reach my limit! Its amazing!"],
"lick vegana":["How does it taste?", "Youre a greedy boy, {0}, You keep coming back for more, huh?"],
"bite":["awww", "What do you think of my skin?"],
"insert pp":["oh! Its so big!, It feels like heaven!", "againinsert"],
"climax": "*That felt amazing. I love you so so much. I...\n I want 3 kids."},
{"typename":"Yandere",
"start":["What do you plan to do to me today? ;)", "again?"],
"kiss":["huh, feeling horny are you?", "I never get tired of kissing you <3"],
"pin down":["Oh, its new seeing you with the initiative.. I like it!", "again pin dowN"],
"fondle oppai":["These tits are yours. Do you think they are bouncy?", "You really like my tits huh?"],
"suck oppai":["Please feel free to suck on my milkers anytime,", "Came back for more huh?"],
"finger vegana":["That feels sooo good", "This feels great.."],
"lick vegana":["Im sensitive there, but go on. How does this fresh pussy taste, {0}?", "My pussy tasted so good, you came back for more, huh?"],
"bite":["aww, I love you too!", "bite me moree"],
"insert pp":["oh my! You feel even better than I imagined!! I cant tell you how long ive been waiting for this!", "againinsert"],
"climax": "That felt great Lets do this more, and more and more!!!!!"},
{"typename":"Dandere",
"start":["..oh hi!", "again?"],
"kiss":[".. *blushes* thank you..", "I love you too.."],
"pin down":[".. what are you doing?", "again pin dowN"],
"fondle oppai":["oh my.. that feels so good.", "*mph* im sensitive."],
"suck oppai":["that feels so good! I really like this!", "keep going!"],
"finger vegana":["..im sensitive there! I.. might not last long", "*im really sensitive there, {0}-kun"],
"lick vegana":["{0}-kun is licking my..!", "it feels great!"],
"bite":["i want to bite you too!", "let me bite you! *bites back*"],
"insert pp":["i..its so big!", "againinsert"],
"climax": "{0}-kun, that felt amazing!"},
{"typename":"Kuudere",
"start":["Hello.", "again?"],
"kiss":["continue,", "I like your lips."],
"pin down":["Pinning me down now?", "again pin dowN"],
"fondle oppai":["You like these milkers?", "They are bouncy arent they?"],
"suck oppai":["I like this feeling. Keep sucking", "coming back to my milkers, You must like them?"],
"finger vegana":["Im sensitive there. I might come!", "I really like that!"],
"lick vegana":["oh, This feels great", "amazing!!!"],
"bite":["marking me huh? thats pretty kinky.", "*bites back*"],
"insert pp":["its so big!!", "againinsert"],
"climax": "You are great {0}, I love you so much!"},
{"typename":"Sadodere",
"start":["Oh hey, {0}", "again?"],
"kiss":["hmm??? arent you taking initiative!", "these lips really turn you on huh?"],
"pin down":["I never knew this part of you, {0}!", "again pin dowN"],
"fondle oppai":["You go for my tits huh? Pervert!!!!!! hahahahaha, im joking, Go on,", "are my tits that bouncy?"],
"suck oppai":["ara ara, how do my tits taste?", "You like that dont you?"],
"finger vegana":["Is my pussy wetter than you imagined?", "hahaha! It feels great!!"],
"lick vegana":["How does this pussy taste?", "damn, it tastes good huh?"],
"bite":["ooh, {0} is marking me as his! hahaha pervert!! but.. go on. I like it.", "*smacks*"],
"insert pp":["you finally took it out!", "againinsert"],
"climax": "Hey, {0}, youre not that bad. I.. I want 4 kids."},
{"typename":"Sweet",
"start":["oh hello {0}-kun!", "again?"],
"kiss":["huh, you really are taking initiative today!! <3", "I love you so so much!"],
"pin down":["*ehhh?* wha.. what are you doing {0}-kun?\n aha! I see how it is, go on!", "again pin dowN"],
"fondle oppai":["How do these feel? are they bouncy?", "that feels great!"],
"suck oppai":["{0}-kun, you really like my boobs, dont you?", "*mph* keep sucking!"],
"finger vegana":["{0}-kun.... Im really sensitive there!", "ah.. stop, I might come<3"],
"lick vegana":["ohh gosh, thats amazing!", "You came back for more huh? Does my pussy taste that good to you?"],
"bite":["*ahh*, I love you too! *bites back*", "*ahhh*"],
"insert pp":["Its so big!!!! Im so happy! Its even better than I thought!!!!", "againinsert"],
"climax": "You are amazing {0}-kun. I love you so so so much! I wanna be with you forever! I wanna grow old together with you, {0}-kun!"},
{"typename":"Kamidere",
"start":["Oh, hello, {0}", "again?"],
"kiss":["*mph* haha, nice!", "I love you too <3"],
"pin down":["*hmmm?* what are you planning on doing? <3", "again pin dowN"],
"fondle oppai":["*ohh* How do me breasts feel? are they satisfactory?", "You really like my breasts dont you?"],
"suck oppai":["hmm, keep going <3", "oh god, this feels soo good."],
"finger vegana":["I am sensitive there, {0}", "You really want me to orgasm huh?"],
"lick vegana":["How does this fresh taint taste? Is is salty? <3", "mmm, coming back for more huh?"],
"bite":["awww", "Again?"],
"insert pp":["Its.. bigger than I expected..", "againinsert"],
"climax": "That felt great, {0}, I cant wait to spend time with you again <3"},
]
def getGfTypes():
return [
{"typename":"Tsundere", "letter":"a","textresponse": ["You are really bad at that, you know?", "That was fine, I guess.", "That was... Nice. t...tthank you."], "movieresponse": "thanks for taking me out!", "netflixresponse":["That show sucked lmao","that show was ok","Netflix is fun with you!"], "hugresponse":"I love you too! *squeezes*", "kissresponse": "... that was sudden. Youre a great kisser. I wouldnt mind another one <3", "proposeresponse": "YESSS!! YESS I LOVE YOU SOO MUCH {0}!!!"},
{"typename":"Yandere", "letter":"b", "textresponse": ["maybe you should try harder? I will support you in any way I can.", "Thank you for the text.", "Thank you for the text. ily very much." ], "movieresponse": "I want to see more movies with you!", "netflixresponse":["That show sucked lmao","that show was ok","Netflix is fun with you!"], "hugresponse":"Dont move. I wanna stay like this for a few more hours.", "kissresponse": "stop. Dont leave. Kiss me again. Again. And again...", "proposeresponse": "of course Ill marry you!! i want to spend all my time with you! {0}!!!"},
{"typename":"Dandere", "letter":"c", "textresponse": [".. thanks, but.. please try harder next time!","...I appreciate the text.", "Thank you for the text... I love you too."], "movieresponse": "Thank... you.. for taking me out!" , "netflixresponse":["That show sucked lmao","that show was ok","Netflix is fun with you!"], "hugresponse":"T.. thank you.", "kissresponse": "...thanks.", "proposeresponse": ".. of course!!!! I love you so much, {0}!!!"},
{"typename":"Kuudere", "letter":"d", "textresponse": ["That was terrible.", "Decent at best.", "This is great. I love you very much."], "movieresponse": "That was a good movie.", "netflixresponse":["That show sucked lmao","that show was ok","Netflix is fun with you!"], "hugresponse":"Squeeze me more. i like this feeling.", "kissresponse": "Kiss me again. I like that feeling", "proposeresponse": "marry you? yeh sure ig. I guess you are now my fiance, {0}!!!"},
{"typename":"Sweet", "letter":"e", "textresponse": ["Thank you! but try a little bit better next time?", "Thank you! I appreciate what you do!!", "This is amazing!!! Thank you! ily so so much!"], "movieresponse": "woow! that was great! we should do this more often!!", "netflixresponse":["That show sucked lmao","that show was ok","Netflix is fun with you!"], "hugresponse":"aww thanks! I love you too!! *squeezes* I dont ever want to lose you!", "kissresponse": "... that was sudden. Youre a great kisser. I wouldnt mind another one <3 I love you so much!", "proposeresponse": "YES! Of course I want to marry you! I want to spend time with you, Have kids, Grow old together. I love you so much, {0}!!!"},
{"typename":"Sadodere", "letter":"f", "textresponse": ["You are really bad at texting!! I find it amusing.", "That was a decent text! Only Decent though.", "Good job! I am satisfied with that."], "movieresponse": "Isnt that something? Taking your girlfriend out to watch a movie.", "netflixresponse":["That show sucked lmao","that show was ok","Netflix is fun with you!"], "hugresponse":"huh? youre hugging me? Fine. Ill allow it. Pervert.", "kissresponse": ".. AH.. AHAHAHA did you just kiss me? pervert <3", "proposeresponse": "Marry you? Haha, Of course. I love you, {0} <3"},
{"typename":"Kamidere", "letter":"g", "textresponse": ["Your texting skill is poor; It can be improved though.", "That was good effort. However, your text was only decent.", "Excellent. I appreciate it.❤️"], "movieresponse": "Thank you for the invitation. I greatly appreciate it❤️", "netflixresponse":["That show sucked lmao","that show was ok","Netflix is fun with you!"], "hugresponse":"Thank you. I love your embrace.", "kissresponse": "Youre great at that <3.", "proposeresponse": "{0}. Regarding your marriage proposal, I gratefully accept. words cant describe how much you mean to me. I want to spend the rest of my life with you<3."},
]
def getTypeComplaint():
return [
{"typename": "Tsundere",
"strategy": "I dont really like strategy. but I guess its fine."},
{"typename": "Yandere",
"strategy": "strategy isnt my forte. It isnt necessary either. I know everything about you already <3"},
{"typename": "Dandere",
"strategy": "...I would prefer another genre.."},
{"typename": "Kuudere",
"strategy": "strategy isnt fun."},
{"typename": "Sweet",
"strategy": "I really appreciate the thought, but I think we could do another genre?"},
{"typename": "Sadodere",
"strategy": "i dont like strategy. Its gross."},
{"typename": "Kamidere",
"strategy": "I dont enjoy strategy games. They create an uptight atmosphere, that isnt ideal for our relationship."},
]
def getGfGamingResponse():
return [
{"typename":"Tsundere", "poor":"That wasnt really fun.", "medium": "I had a good time i guess, but thats to be expected! its a game after all.", "good":"Again! Lets play again! That was really nice!"},
{"typename":"Yandere", "poor":"I will try to do better next time.", "medium": "That was mediocre at best. Developers are terrible!", "good":"That was amazing. please get more love points so you can do me <3."},
{"typename":"Dandere", "poor":"I.. think we should try again?", "medium": "that was fine!", "good":"I... really enjoyed that! Lets do it again soon?"},
{"typename":"Kuudere", "poor":"You are really bad! Its alright though.", "medium": "That was ok i guess, You arent really the best at this game are you?", "good":"You are pretty good actually."},
{"typename":"Sweet", "poor":"You are really bad! Its alright though.", "medium": "That was ok i guess, You arent really the best at this game are you?", "good":"You are pretty good actually."},
{"typename":"Sadodere", "poor":"You are really bad! Its alright though.", "medium": "That was ok i guess, You arent really the best at this game are you?", "good":"You are pretty good actually."},
{"typename":"Kamidere", "poor":"You are really bad! Its alright though.", "medium": "That was ok i guess, You arent really the best at this game are you?", "good":"You are pretty good actually."},
]
def getTypeGenrePraise():
return [
{"typename": "Tsundere",
"strategy": "I really like strategy!",
"horror":"That wasnt scary at all!",
"fps":"I love FPS games!",
"creativity":"I think Creativity games are the best!",
"adventure":"I think Adventure games are the best!",
"animation":"The animation was great! I think the creators did an amazing job dont you think?",
"action":"Action is great!!"},
{"typename": "Yandere",
"strategy": "I love this uptight atmosphere.",
"horror":"That wasnt scary at all!",
"fps":"I love FPS games!",
"creativity":"I think Creativity games are the best!",
"adventure":"I think Adventure games are the best!",
"animation":"The animation was great! I think the creators did an amazing job dont you think?",
"action":"Action is great!!"},
{"typename": "Dandere",
"strategy": "...I like this genre!",
"horror":"That wasnt scary at all!",
"fps":"I love FPS games!",
"creativity":"I think Creativity games are the best!",
"adventure":"I think Adventure games are the best!",
"animation":"The animation was great! I think the creators did an amazing job dont you think?",
"action":"Action is great!!"},
{"typename": "Kuudere",
"strategy": "strategy is fun.",
"horror":"That wasnt scary at all!",
"fps":"I love FPS games!",
"creativity":"I think Creativity games are the best!",
"adventure":"I think Adventure games are the best!",
"animation":"The animation was great! I think the creators did an amazing job dont you think?",
"action":"Action is great!!"},
{"typename": "Sweet",
"strategy": "woow! this is really fun! strategy is really fun!!",
"horror":"That wasnt scary at all!",
"fps":"I love FPS games!",
"creativity":"I think Creativity games are the best!",
"adventure":"I think Adventure games are the best!",
"animation":"The animation was great! I think the creators did an amazing job dont you think?",
"action":"Action is great!!"},
{"typename": "Sadodere",
"strategy": "strategy. That sounds so much like you!",
"horror":"That wasnt scary at all!",
"fps":"I love FPS games!",
"creativity":"I think Creativity games are the best!",
"adventure":"I think Adventure games are the best!",
"animation":"The animation was great! I think the creators did an amazing job dont you think?",
"action":"Action is great!!"},
{"typename": "Kamidere",
"strategy": "I enjoy strategy. I think its incredibly vital to act logically in a relationship.",
"horror":"That wasnt scary at all!",
"fps":"I love FPS games!",
"creativity":"I think Creativity games are the best!",
"adventure":"I think Adventure games are the best!",
"animation":"The animation was great! I think the creators did an amazing job dont you think?",
"action":"Action is great!!"},
]
def getTypePraise():
return [
{"typename": "Tsundere", "text": "we should text more often.. I care about you a lot.", "gaming":"I really like playing games!", "movies":"I love movies. ", "relaxing":"I love this quality time with you!"},
{"typename": "Yandere", "text": "Lets text more! I want to know everything about you<3", "gaming":"Gaming is incredibly fun with you. We should do this more often.", "movies":"I love movies. ", "relaxing":"I love this quality time with you!"},
{"typename": "Dandere", "text": "...lets do this more often?", "gaming":"I.. really enjoyed that!! maybe we could play more often?", "movies":"I love movies. ", "relaxing":"I love this quality time with you!"},
{"typename": "Kuudere", "text": "Text me more often.", "gaming":"That was fun. We will play more often from now on.", "movies":"I love movies. ", "relaxing":"I love this quality time with you!"},
{"typename": "Sweet", "text": "I love the text! Thank you for keeping me in touch!", "gaming":"wooW! Im so happy we could play games together! Im glad you remembered that I like gaming!", "movies":"I love movies. I love you so much!! ", "relaxing":"I love this quality time with you!"},
{"typename": "Sadodere", "text": "I found that satisfactory! dont get any weird ideas, though!", "gaming":"I wonder how you knew I like gaming? Pervert!!", "movies":"I love movies. ", "relaxing":"I love this quality time with you!"},
{"typename": "Kamidere", "text": "I found that enjoyable. Texting is in fact, the most practical form of communication. I appreciate you.", "gaming":"I found that enjoyable. Thank you for this. We should play more often.<3", "movies":"I love movies. ", "relaxing":"I love this quality time with you!"},
]
def getGFimage(p:discord.Member,emotion:str="image"):
emotions=["embarrassed", "horny","surprised","climax", "image", "bed", "angry", "fear", "sad", "dissapointed"]
gfval = mulah.find_one({"id":p.id}, {"gf"})["gf"]
emotion = emotion.lower()
if emotion in emotions:
try:
return gfval[emotion]
except:
try:
return gfval["image"]
except:
raise noImageError(p)
else:
raise noImageError(p)
def addIrrelevantWarning(em:discord.Embed):
em.add_field(name="Irrelevance warning.", value="It appears you are going off topic. Dont.")
openai.organization = "org-6cx7PCsPB7dbTOcOu2oI6nYX"
openai.api_key = "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"
def gpt3Classification(query, examples, labels):
a=openai.Classification.create(
search_model="ada",
model="curie",
examples=examples,
query=query,
labels=labels,
)
return a["label"]
def classifyGFText(prompt):
labels = ["good","bad","decent"]
examples = [
["I love you", "good"],
["Why dont you do this correctly?", "bad"],
["where do you want to eat?", "decent"],
["Im doing fine.", "decent"],
["You are so pretty", "good"],
["you look fat", "bad"]
]
return gpt3Classification(prompt, examples=examples, labels=labels)
def classifyGFBoinking(prompt):
labels = [
"kiss",
"hug",
"breast groping",
"pinning down",
"about to climax",
"climax",
"filler",
"irrelevant",
]
examples = [
["*kisses passionately", "kiss"],
["*touches lips*", "kiss"],
["your lips are great", "kiss"],
["*pulls you closer*", "hug"],
["*hugs you tightly*","hug"],
["Hug me really really tight", "hug"],
["your tits are great", "breast groping"],
["your boobs are the best", "breast groping"],
["*grabs breasts*", "breast groping"],
["*pins down*", "pinning down"],
["*pushes you down*", "pinning down"],
["im cumming", "climax"],
["OHHHHH IM COMING", "climax"],
["*cums*", "climax"],
["*nuts inside*", "climax"],
["Im nutting", "climax"],
["im about to cum", "about to climax"],
["im going to nut", "about to climax"],
["IM GOING TO CUM", "about to climax"],
["Im going to climax", "about to climax"],
["oooh, im about to cum", "about to climax"],
["takes you to the store", "irrelevant"],
["How are your grades", "irrelevant"],
["What window issues are you having", "irrelevant"],
["Valorant is hard", "irrelevant"],
["gunfight in afghanistan", "irrelevant"],
["Spiderman is terrible, dont you think", "irrelevant"],
["ok", "filler"],
["lets do that then", "filler"],
["take your time", "filler"],
["im sorry i guess", "filler"],
["I love you too", "filler"],
["sure", "filler"]
]
return gpt3Classification(prompt, examples=examples, labels=labels)
def classifyGFTalking(prompt):
labels = [
"kiss",
"hug",
"filler",
"over",
"nsfw"
]
examples = [
["*kisses passionately", "kiss"],
["*touches lips*", "kiss"],
["your lips are great", "kiss"],
["*pulls you closer*", "hug"],
["*hugs you tightly*","hug"],
["Hug me really really tight", "hug"],
["ok", "filler"],
["lets do that then", "filler"],
["take your time", "filler"],
["im sorry i guess", "filler"],
["I love you too", "filler"],
["sure", "filler"],
["yeah thats prettty cool", "filler"],
["I hate when that happens", "filler"],
["yeah that sucks", "filler"],
["your boobs are the best", "nsfw"],
["*grabs breasts*", "nsfw"],
["*pins down*", "nsfw"],
["*pushes you down*", "nsfw"],
["im cumming", "nsfw"],
["OHHHHH IM COMING", "nsfw"],
["*cums*", "nsfw"],
["*nuts inside*", "nsfw"],
["Im nutting", "nsfw"],
["im about to cum", "nsfw"],
["im going to nut", "nsfw"],
["IM GOING TO CUM", "nsfw"],
["Im going to climax", "nsfw"],
["oooh, im about to cum", "nsfw"],
["bye", "over"],
["cya", "over"],
["Ill see you soon!", "over"],
["I got to go!", "over"],
["bye","over"]
]
return gpt3Classification(prompt, examples=examples, labels=labels)
def getModel(id):
gf = mulah.find_one({"id":id}, {"gf"})["gf"]
gftype = gf["type"]
gfdict = {
"Tsundere":"curie:ft-sentientproductions-2021-12-29-17-58-33",
"Yandere":"curie:ft-sentientproductions-2021-12-29-18-01-46",
"Dandere":"curie:ft-sentientproductions-2021-12-29-18-05-49",
"Kuudere":"curie:ft-sentientproductions-2021-12-29-18-08-10",
"Sweet":"curie:ft-sentientproductions-2021-12-29-18-10-57",
"Sadodere":"curie:ft-sentientproductions-2021-12-29-18-13-08",
"Kamidere":"curie:ft-sentientproductions-2021-12-29-18-15-14"
}
return gfdict[gftype]
def classifyGFEmotion(prompt, filterNSFW=True):
nsfw = ["horny","bed","climax"]
examples = [
["sighs", "dissapointed"],
["Why are you like this? Its so annoying", "angry"],
["I love you", "image"],
["that was unexpected", "surprised"],
["stop... you are embarrassing me", "embarrassed"],
["Every time I want to help you, you push me away. It makes me sad.", "sad"],
["thats pretty scary", "fear"],
["You have a nice cock", "horny"],
["put it in me", "horny"],
["ahhh, you are really good at doing this!", "horny"],
["that was great. I want to marry you.", "bed"],
["that was great. we should fuck more often", "bed"],
["AHHHHHHHHhhhhhhhhhhhhhh im coming", "climax"],
["Im cumming!!", "climax"],
["oooohhhhhhh im climaxing!", "climax"]
]
labels = [
"dissapointed",
"angry",
"image",
"surprised",
"embarrassed",
"sad",
"fear",
"horny",
"bed",
"climax"
]
if filterNSFW:
examples = [x for x in examples if x[1] not in nsfw]
labels = [x for x in labels if x not in nsfw]
return gpt3Classification(
query=prompt,
examples=examples,
labels=labels
)
def getGFresponse(prompt,person:discord.Member):
model = getModel(person.id)
background_prompt = chat.getprompt(person)
girlfriend = mulah.find_one({"id":person.id}, {"gf"})["gf"]
final = gpt3completion(
background_prompt+"\n%s:%s\n%s:"%(person.display_name, prompt, girlfriend["name"]),
model,
person.display_name,
girlfriend["name"]
)
return final
def gpt3completion(prompt, model, you,gf):
openai.Engine.retrieve("davinci")
z = openai.Completion.create(
prompt=prompt,
model=model,
temperature=0.9,
max_tokens=150,
top_p=1,
frequency_penalty=0,
presence_penalty=0.6,
stop=["\n", "%s:"%(you), "%s:"%(gf)]
)
return z["choices"][0]["text"]
class chat(object):
def __init__(self, chatlog, you, other, model):
self.chatlog = chatlog
self.you = you
self.other = other
self.model = model
def ask(self,question):
response = openai.Completion.create(
model=self.model,
prompt=self.chatlog +f"\n{self.you}:" +question + f"\n{self.other}:",
temperature=0.9,
max_tokens=150,
top_p=1,
frequency_penalty=0,
presence_penalty=0.6,
stop=["\n", f"{self.you}:", f"{self.other}:"]
)
answer = response["choices"][0]["text"]
self.chatlog += f"\n{self.you}:{question}"+ f"\n{self.other}:{answer}"
return answer
@staticmethod
def getprompt(user:discord.Member):
#{
# "kisses":0,#
# "boinks":0,#
# "dates":0,#
# "hugs":0,#
# "games":0,#
# "text":0,#
# "netflix":0,#
# "movies":0,
# "start": date.today().strftime("%B %d, %Y"),
#}},
gf = mulah.find_one({"id":user.id}, {"gf"})["gf"]
gfdata = mulah.find_one({"id":user.id}, {"gfdata"})["gfdata"]
status = "lover"
if gf["tier"] == 4:
status = "fiance"
prompt = "The following is a conversation between a %s girl whose name is %s and her %s, whose name is %s."%(
gf["type"], gf["name"],status, user.display_name
)
prompt+=" %s has been dating %s since %s. They have kissed %s times, hugged %s times, had sex %s times, played games %s times, texted %s times, watched netlix together %s times, and watched movies %s times"%(
gf["name"], user.display_name, gfdata["start"], gfdata["kisses"], gfdata["hugs"], gfdata["boinks"], gfdata["games"], gfdata["text"], gfdata["netflix"], gfdata["movies"]
)
prompt+=" %s's hobby is %s. Her favorite genre is %s, her least favorite is %s. Her favorite subject is %s."%(
gf["name"], gf["likes"], gf["favorite genre"], gf["dislikes"], gf["favorite subject"]
)
return prompt
def getFunCommands():
return "> `pp`,`roll`,`rate`,`wisdom`, `rickroll`, `yomomma`, `8ball`, `animepic`, `cookie`, `coffee`, `story`"
def getModCommands():
return "> `automod`,`ban`,`kick`,`mute`,`unmute`,`block`,`unblock`,`softban`, `swear`, `announce`,`suggest`, `swearlb`"
def getSolveCommands():
return "> `hangman`, `scramble`"
def getUtilityCommands():
return "> `snipe`, `esnipe`, `poll`, `timer`,`clean`, `choose`,`userinfo`,`serverinfo`,`channellinfo`,`permissions`"
def getGamesCommands():
return "> `mcstatus`, `mcskin`"
def getVcCommands():
return "> `p`,`pause`,`leave`,`resume`,`stop`"
def getMathCommands():
return "> `gcf`,`points`, `simplify`, `herons`, `hardsolve`, `softsolve`"
def getWebCommands():
return "> `question`, `imdb`reddit (group):`sub`,`reset`,`set` "
def getLevelCommands():
return "> `rank`, `ranklb`"
def getEconomyCommands():
return "> `rob`,`work`,`profile`,`worklist`,`apply`,`fish`,`hunt`,`mine`,`farm`,`chop`,`sell`,`craft`,`upgradepoint`,`send`,`achievement`,`achievement`,`balance`,`richlb`,`shop`,`use`, `give`,`gamestats`,`dep`,`withdraw`,`buy`,`inv`,`beg`,`watchlist`,`clearwatchlist` pc (group):`build`,`stats`,`addram`,`install`,`dismantle`,`play`"""
def getGfCommands():
return "> `getgf`,`gfstats`,`breakup`, gf (group):`image`,`netflix`,`hug`,`kiss`,`boink`,`propose`,`date`,`movies`,`text`,`gaming`,`talk`"
def getImageCommands():
return "> `avatar`,`animeface`,`caption`,`ddlc`,`blurpify`,`phcomment`,`toxicity`,`weebify`,`tweet`,`nichijou`,`threats`,`bodypillow`,`baguette`,`deepfry`,`clyde`,`ship`,`lolice`,`fact`,captcha`,`trash`,`whowouldwin`,`awooify`,`changemymind`,`magik`,`jpeg`,`gif`,`cat`,`dog`,`iphonex`,`kannagen`,`minesweeper`,`wanted`,`abouttocry`,`animepic`"
def getDuelsCommands():
return "> `duel`,`equip`,`upgrade`,`begin`"
def getSettingsCommands():
return "> `settings`,config (group): `badword`,`announcement`,`suggestion`,`setprefix`"
##-------------------------------------------------------------------ASYNC FUNCTS
async def Imdb(ctx, moviee):
await ctx.trigger_typing()
moviesDB=IMDb()
movies = moviesDB.search_movie(moviee)
print(movies)
movieID = movies[0].getID()
movie = moviesDB.get_movie(movieID)
yt = YoutubeSearch(str(movie)+" trailer", max_results=1).to_json()
yt_id = str(json.loads(yt)['videos'][0]['id'])
yt_url = 'https://www.youtube.com/watch?v='+yt_id
newyt = YoutubeSearch(str(movie)+" opening", max_results=1).to_json()
newytid = str(json.loads(newyt)['videos'][0]['id'])
thumnail_url = "https://img.youtube.com/vi/%s/maxresdefault.jpg"%(newytid)
try:
embed = discord.Embed(title = "%s, (%s)"%(movie, movie["year"]),url = yt_url,description = " Genre:%s"%(movie["genres"]), color = ctx.author.color)
except:
embed = discord.Embed(title = "%s"%(movie),url = yt_url,description = " Genre:%s"%(movie["genres"]), color = ctx.author.color)
try:
embed.add_field(name = "Synopsis:", value = "%s"%(str(moviesDB.get_movie_synopsis(movieID)["data"]["plot"][0])))
except:
pass
embed.set_image(url = thumnail_url)
embed.add_field(name = "Trailer", value = yt_url, inline=False)
listofdirectories = ["rating"]
for x in listofdirectories:
try:
embed.add_field(name = x, value = "%s"%(movie[x]))
except:
pass
try:
embed.add_field(name= "Episodes:", value = "%s"%(moviesDB.get_movie_episodes(movieID)["data"]["number of episodes"]))
except:
pass
return [embed, movie]
def syntax(command):
cmd_and_aliases = "|".join([str(command), *command.aliases])
params = []
for key, value in command.params.items():
if key not in ("self", "ctx"):
params.append(f"[{key}]" if "NoneType" in str(value) else f"<{key}>")
params = " ".join(params)
return f"```{cmd_and_aliases} {params}```"
def noEmbedSyntax(command):
cmd_and_aliases = "|".join([str(command), *command.aliases])
params = []
for key, value in command.params.items():
if key not in ("self", "ctx"):
params.append(f"[{key}]" if "NoneType" in str(value) else f"<{key}>")
params = " ".join(params)
return f"{cmd_and_aliases} {params}"
def getPrefix(id):
return DiscordGuild.find_one({"id":id}, {"prefix"})["prefix"]
async def ChoiceEmbed(self, ctx, choices:list, TitleOfEmbed:str, ReactionsList=['1️⃣', '2️⃣', '3️⃣', '4️⃣','5️⃣','6️⃣','7️⃣','8️⃣','9️⃣','🔟'],p:discord.Member=None,EmbedToEdit=None):
count = 0
reactionlist = []
emptydict = {}
finalstr = ""
if len(choices)<=len(ReactionsList):
for x in choices:
emptydict[ReactionsList[count]]=x
reactionlist.append(ReactionsList[count])
finalstr+="%s %s\n"%(ReactionsList[count], x)
count+=1
embed = discord.Embed(title = TitleOfEmbed, description = finalstr, color = ctx.author.color)
if EmbedToEdit!=None:
EmbedToEdit = await EmbedToEdit.edit(embed=embed)
EmbedToEdit.clear_reactions()
for x in reactionlist:
await EmbedToEdit.add_reaction(x)
else:
ThisMessage = await ctx.channel.send(embed=embed)
for x in reactionlist:
await ThisMessage.add_reaction(x)
if not p:
p=ctx.author
def check(reaction, user):
return user==p and str(reaction.emoji) in reactionlist and reaction.message == ThisMessage
confirm = await self.client.wait_for('reaction_add',check=check, timeout = 60)
try:
if confirm:
rawreaction = str(confirm[0])
if EmbedToEdit!=None:
return[emptydict[rawreaction], EmbedToEdit]
else:
return [emptydict[rawreaction], ThisMessage]
except TimeoutError:
await ctx.channel.send("You took too long! I guess we arent doing this.")
else:
chosen=False
pgnum=1
while chosen==False:
if pgnum==1:
choices=choices[0:9]
if pgnum==2:
choices=choices[10:len(choices)]
for x in choices:
emptydict[ReactionsList[count]]=x
reactionlist.append(ReactionsList[count])
finalstr+="%s %s\n"%(ReactionsList[count], x)
count+=1
embed = discord.Embed(title = TitleOfEmbed, description = finalstr, color = ctx.author.color)
if EmbedToEdit!=None:
EmbedToEdit = await EmbedToEdit.edit(embed=embed)
EmbedToEdit.clear_reactions()
for x in reactionlist:
await EmbedToEdit.add_reaction(x)
else:
ThisMessage = await ctx.channel.send(embed=embed)
for x in reactionlist:
await ThisMessage.add_reaction(x)
await ThisMessage.add_reaction("➡️")
await ThisMessage.add_reaction("⬅️")
if not p:
p=ctx.author
def check(reaction, user):
return user==p and str(reaction.emoji) in reactionlist and reaction.message == ThisMessage
confirm = await self.client.wait_for('reaction_add',check=check, timeout = 60)
try:
if confirm:
rawreaction = str(confirm[0])
if rawreaction=="➡️":
pgnum+=1
if pgnum>2:
pgnum=2
elif rawreaction=="⬅️":
pgnum-=1
if pgnum<1:
pgnum=1
else:
if EmbedToEdit!=None:
return[emptydict[rawreaction], EmbedToEdit]
else:
return [emptydict[rawreaction], ThisMessage]
except TimeoutError:
await ctx.channel.send("You took too long! I guess we arent doing this.")
async def AddChoices(self, ctx, choices:list, MessageToAddTo, p:discord.Member=None):
for x in choices:
await MessageToAddTo.add_reaction(x)
if p==None:
p=ctx.author
def check(reaction, user):
return user==p and str(reaction.emoji) in choices and reaction.message == MessageToAddTo
confirm = await self.client.wait_for('reaction_add',check=check, timeout = 60)
try:
if confirm:
print("Yes, This check worked")
return str(confirm[0])
except TimeoutError:
await ctx.channel.send("You took too long!")
return "Timeout"
class missingItem(commands.CommandError):
def __init__(self, user, missingItem):
self.user=user
self.missingItem=missingItem
def hasItem(itemToCheckFor):
def predicate(ctx):
if itemToCheckFor.lower()=="pc":
inv = mulah.find_one({"id":ctx.author.id}, {"inv"})["inv"]
for x in inv:
if "parts" in x.keys():
return True
raise missingItem(ctx.author, itemToCheckFor)
elif InvCheck(ctx.author,itemToCheckFor):
return True
else:
raise missingItem(ctx.author, itemToCheckFor)
return commands.check(predicate)
async def StoryEmbed(self, ctx, embedict:list):
complete = False
count = 0
while complete == False:
if count==len(embedict):
complete = True
break
currentembed = embedict[count]
embed = discord.Embed(title = currentembed["title"], description = currentembed["description"] ,color =ctx.author.color)
try:
if "file" in currentembed.keys():
await editthis.edit(embed=embed, file = discord.File(currentembed["file"]))
else:
await editthis.edit(embed=embed)
except:
if "file" in currentembed.keys():
editthis = await ctx.channel.send(embed=embed, file = discord.File(currentembed["file"]))
else:
editthis = await ctx.channel.send(embed=embed)
await editthis.add_reaction("▶️")
def check(reaction,userr):
return userr==ctx.author and str(reaction.emoji)=="▶️" and reaction.message==editthis
confirm = await self.client.wait_for('reaction_add', check=check, timeout = 60)
try:
if confirm:
await editthis.clear_reactions()
pass
count+=1
except asyncio.TimeoutError:
await editthis.edit(embed=discord.Embed(title = "You took too long", color = ctx.author.color))
| [
"boinks",
" PLACEHOLDER's hobby is PLACEHOLDER. Her favorite genre is PLACEHOLDER, her least favorite is PLACEHOLDER. Her favorite subject is PLACEHOLDER.",
"name",
" %s has been dating %s since %s. They have kissed %s times, hugged %s times, had sex %s times, played games %s times, texted %s times, watched netlix together %s times, and watched movies %s times",
"The following is a conversation between a %s girl whose name is %s and her %s, whose name is %s.",
"\nAI:",
"netflix"
] |
2024-01-10 | jbdamask/wkid-smaaht | chat_with_docs~rag_fusion.py | from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from .prompt import QUESTION_VARIANT_PROMPT
class RagFusion:
"""Class for generating multiple questions from an example question, querying a vectorstore,
and ranking the results."""
def __init__(self, openai_api_key, vectordb):
"""
Initialize the RagFusion class.
Args:
openai_api_key (str): The API key for OpenAI.
vectordb (object): The vector database object.
"""
self.OPENAI_API_KEY = openai_api_key
self.embeddings = OpenAIEmbeddings(openai_api_key = openai_api_key)
self.db = vectordb
def generate_question_variants(self, question):
"""
Generate multiple variants of a given question.
Args:
question (str): The original question.
Returns:
list: A list of question variants.
"""
llm = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0.6, openai_api_key=self.OPENAI_API_KEY, streaming=True)
formatted_template = QUESTION_VARIANT_PROMPT.format_messages(text=question)
results = llm(formatted_template)
return results.content.split('\n')
def db_lookup(self, question, search_kwargs=""):
"""
Perform a direct lookup against the vectorstore.
Args:
question (str): The question to search for.
search_kwargs (str, optional): Additional search parameters. Defaults to "".
Returns:
list: A list of search results with relevance scores.
"""
if not question:
return
emb = self.embeddings.embed_query(question)
# Note that this method is a LangChain-specific method for Chromadb. Will need to change if I start supporting other vectorstores
return self.db.similarity_search_by_vector_with_relevance_scores(emb, k=4, search_kwargs=search_kwargs)
def reciprocal_rank_fusion(self, search_results_dict, k=60):
"""
Perform a Reciprocal Rank Fusion (RRF) on the search results. RRF
combines the ranks of the results from multiple search queries to produce a single
ranking list.
The choice of 60 as a constant in the Reciprocal Rank Fusion (RRF) formula is somewhat
arbitrary and is often used as a default value in Information Retrieval tasks.
It's a balance between giving enough weight to high-ranked items and not overly penalizing
lower-ranked ones.
Args:
search_results_dict (dict): The search results dictionary.
k (int, optional): The rank parameter. Defaults to 60.
Returns:
dict: A dictionary of fused scores.
"""
fused_scores = {}
doc_ranks = {}
for query, search_results_list in search_results_dict.items():
fused_scores[query] = {}
# Filter out any empty elements that have crept in
if search_results_list is None:
continue
for doc, score in sorted(search_results_list, key=lambda x: x[1], reverse=True):
doc_name = doc.metadata['filename']
doc_ranks[doc_name] = doc_ranks.get(doc_name, 0) + 1
rank = doc_ranks[doc_name]
fused_scores[query][doc_name] = 1 / (rank + k)
fused_scores[query] = {doc: score for doc, score in sorted(fused_scores[query].items(), key=lambda x: x[1], reverse=True)}
return fused_scores | [] |
2024-01-10 | jbdamask/wkid-smaaht | chat_with_docs~prompt.py | from langchain.prompts import PromptTemplate
CONCISE_SUMMARY_MAP_PROMPT_TEMPLATE = """Write a concise summary of the following:
"{text}"
CONCISE SUMMARY:"""
CONCISE_SUMMARY_MAP_PROMPT = PromptTemplate(
template=CONCISE_SUMMARY_MAP_PROMPT_TEMPLATE,
input_variables=["text"]
)
CONCISE_SUMMARY_COMBINE_PROMPT_TEMPLATE = """Write a concise, comprehensive summary of the following:
"{text}"
Also provide up to five suggested follow-up questions as a bulleted list. Only include questions that are
likely answerable by the text and are not already answered in the summary you provided.
CONCISE SUMMARY:"""
CONCISE_SUMMARY_COMBINE_PROMPT = PromptTemplate(
template=CONCISE_SUMMARY_COMBINE_PROMPT_TEMPLATE,
input_variables=["text"]
)
CONCISE_SUMMARY_PROMPT_TEMPLATE = """Write a concise, comprehensive summary of the following:
"{text}"
CONCISE SUMMARY:"""
CONCISE_SUMMARY_PROMPT = PromptTemplate(
template=CONCISE_SUMMARY_PROMPT_TEMPLATE,
input_variables=["text"]
)
from langchain.prompts import ChatPromptTemplate
from langchain.prompts.chat import SystemMessage, HumanMessagePromptTemplate
QUESTION_VARIANT_PROMPT = ChatPromptTemplate.from_messages(
[
SystemMessage(
content=(
"""Generate five variants of the following text that can be used as prompts for vectorstore lookup.
Maintain the theme of the original. Do not number variants in your output. Output must be separated by newlines."""
)
),
HumanMessagePromptTemplate.from_template("{text}"),
]
) | [
"{text}",
"Write a concise, comprehensive summary of the following:\n\n\n\"{text}\"\n\nAlso provide up to five suggested follow-up questions as a bulleted list. Only include questions that are \nlikely answerable by the text and are not already answered in the summary you provided.\n\nCONCISE SUMMARY:",
"Write a concise, comprehensive summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:",
"Generate five variants of the following text that can be used as prompts for vectorstore lookup. \nMaintain the theme of the original. Do not number variants in your output. Output must be separated by newlines.",
"Write a concise summary of the following:\n\n\n\"{text}\"\n\n\nCONCISE SUMMARY:"
] |
2024-01-10 | jbdamask/wkid-smaaht | localapp.py |
# Borrowed heavily from
# https://learn.deeplearning.ai/chatgpt-building-system
# https://github.com/alex000kim/slack-gpt-bot
import os
import openai
from slack_bolt import App
from slack_bolt.adapter.socket_mode import SocketModeHandler
from pprint import pprint
from src.logger_config import get_logger
import json
from localsrc.utils import (N_CHUNKS_TO_CONCAT_BEFORE_UPDATING, OPENAI_API_KEY,
SLACK_APP_TOKEN, SLACK_BOT_TOKEN, WAIT_MESSAGE,
MAX_TOKENS, DEBUG, prompt,
get_slack_thread, set_prompt_for_user_and_channel, generate_image,
num_tokens_from_messages, process_conversation_history,
update_chat, moderate_messages, get_completion_from_messages,
prepare_payload, get_conversation_history, #process_message,
search_and_chat,
summarize_web_page, summarize_file, register_file, doc_q_and_a) # added imports here
# Configure logging
logger = get_logger(__name__)
# Set the Slack App bot token
app = App(token=SLACK_BOT_TOKEN)
### SLACK EVENT HANDLERS ###
# Slack slash command to return list of all available system prompts
@app.command("/prompts")
def list_prompts(ack, respond):
ack()
p = prompt.list_prompts()
respond(f"{', '.join(p)}")
# Slack slash command to return message associated with a particular system prompt
@app.command("/get_prompt")
def show_prompt(ack, respond, command):
ack()
try:
respond(f"{prompt.get_prompt(command.get('text'))}")
except Exception as e:
respond(f"No such system prompt exsists")
# Slack slash command to change system message. This lets users steer Wkid Smaaht at runtime
@app.command("/set_prompt")
def set_prompt(ack, respond, command):
ack()
logger.info(f"{command.get('user_id')} : {command.get('user_name')}")
if(prompt.get_prompt(command.get('text'))) is not None:
set_prompt_for_user_and_channel(command.get('user_id'), command.get('channel_id'), command.get('text'))
respond(f"Ok, from now on I'll be {command.get('text')}")
else:
respond(f"{command.get('text')} is not a valid prompt key. Type /prompts to see a list of available system prompts")
@app.command("/generate_image")
def make_image(ack, respond, command):
ack({ "response_type": "in_channel", "text": "Command deprecated. Just type @W'kid Smaaht :pix <your text> instead"})
# Listens to incoming messages that contain "hello"
# To learn available listener arguments,
# visit https://slack.dev/bolt-python/api-docs/slack_bolt/kwargs_injection/args.html
# @app.message("hello")
# def message_hello(message, say):
# say(f"Hey there <@{message.get('user')}>!")
# Process direct messages
@app.event("message")
def handle_message_events(body, context, logger):
if is_it_bot(body):
return
# event_router(body, context)
# logger.debug(body)
event = body.get('event')
if event is None:
logger.error("Expected event object in Slack body")
logger.info(body)
return False
# Do nothing if this is a post by the bot
if is_it_bot(body):
logger.debug('Is bot message')
return
bot_user_id = body.get('authorizations')[0]['user_id'] if 'authorizations' in body else context.get('bot_user_id')
channel_id = event.get('channel')
# If the event is from a DM, go ahead and process
if channel_id.startswith('D'):
pass
# If it's an app_mention, this will be handled by Slack's @app.event("app_mention") listener.
# Return so we don't process twice
# elif f"<@{bot_user_id}>" in event.get('text'):
elif 'text' in event and f"<@{bot_user_id}>" in event.get('text'):
return
# If it's neither a DM nor an app_mention, then this is none of our business. Return immediately
else:
return
logger.debug("Processing DM message")
# Check if the event has a subtype
if 'subtype' in body['event']:
# If the subtype is 'file_share', do something
if body['event']['subtype'] == 'file_share':
deal_with_file(body, context)
else:
process_event(body, context)
# Process app mention events
@app.event("app_mention")
def command_handler(body, context):
# event_router(body, context)
event = body.get('event')
if event.get('files') is not None:
deal_with_file(body, context)
else:
process_event(body, context)
# Checks to see if a post is from the W'kid Smaaht bot.
# If so, we don't process
def is_it_bot(body):
if 'message' in body:
b = body.get('message[bot_id]')
if b is not None:
return True
else:
return False
# Processes file upload
def deal_with_file(body, context):
event = body.get('event')
channel_id=event.get('channel')
thread_id = event.get('thread_ts') if event.get('thread_ts') is not None else event.get('ts')
slack_resp = app.client.chat_postMessage(
channel=channel_id,
thread_ts=thread_id,
# text="Ah, I see you uploaded a file. Give me a minute to summarize it for you."
text="Registering doc..."
)
reply_message_ts = slack_resp.get('message', {}).get('ts')
filepath = body['event']['files'][0]['name']
register_file(body['event']['files'][0], channel_id, thread_id)
response = "What would you like to do with this? You can ask me to summarize it or ask questions"
update_chat(app, channel_id, reply_message_ts, response)
# Where the magic happens
def process_event(body, context):
logger.info("process_event() body object:)")
logger.info(body)
logger.debug("process_event() context object:)")
logger.debug(context)
event = body.get('event')
if event is None:
return False
bot_user_id, channel_id, thread_ts, user_id, command_text = prepare_payload(body, context)
if (command_text==WAIT_MESSAGE) or (command_text.startswith("/")):
# No processing needed if the message was generated by this bot or is a Slack slash command
return
if command_text == '':
app.client.chat_postMessage(
channel=channel_id,
thread_ts=thread_ts,
text=f"How can I help you today?"
)
return
slack_resp = app.client.chat_postMessage(
channel=channel_id,
thread_ts=thread_ts,
text=WAIT_MESSAGE
)
reply_message_ts = slack_resp.get('message', {}).get('ts')
conversation_history = get_conversation_history(app, channel_id, thread_ts)
if conversation_history is None:
slack_resp = app.client.chat_postMessage(
channel=channel_id,
thread_ts=thread_ts,
text="Sorry. Slack had a problem processing this message"
)
return
messages = process_conversation_history(conversation_history, bot_user_id, channel_id, thread_ts, user_id)
num_tokens = num_tokens_from_messages(messages)
# TODO: CAN I CONVERT SOME OF THESE TO OPENAI FUNCTIONS?
# https://platform.openai.com/docs/guides/gpt/function-calling
if command_text.startswith(":pix "):
image_text = command_text.replace(":pix ", "").strip()
if image_text:
update_chat(app, channel_id, reply_message_ts, "Generating your image...just a sec")
try:
response = generate_image(image_text)
except Exception as e:
logger.error(response)
update_chat(app, channel_id, reply_message_ts, "Sorry. Error generating image: {e}")
return
app.client.chat_postMessage(
channel=channel_id,
thread_ts=thread_ts,
text=".", # Used to suppress Slack warnings about not including text in the post
blocks=[
{
"type": "image",
"title": {
"type": "plain_text",
"text": image_text,
"emoji": True
},
"image_url": response['blocks'][0]['image_url'],
"alt_text": image_text
}
]
)
else:
update_chat(app, channel_id, reply_message_ts, "You need to provide some text for me to generate an image. For example, A cat eating ice cream.")
elif command_text.startswith(":snc "):
update_chat(app, channel_id, reply_message_ts, "Let me do a bit of research and I'll get right back to you.")
text = command_text.replace(":snc ", "").strip()
response = search_and_chat(messages, text)
update_chat(app, channel_id, reply_message_ts, response)
elif command_text.startswith(":websum "):
update_chat(app, channel_id, reply_message_ts, "I'll try to summarize that page. This may take a minute (literally).")
url = command_text.replace(":websum ", "").split("|")[0].replace("<","").replace(">","").strip()
response = summarize_web_page(url)
update_chat(app, channel_id, reply_message_ts, response)
elif command_text.startswith(":listfiles"):
update_chat(app, channel_id, reply_message_ts, "Listing available files")
# response = list_files_in_thread(channel_id, thread_ts)
files = [message.get('files') for message in conversation_history.data.get('messages') if 'files' in message]
chat_dict = {}
# If you're not sure whether the tuple key exists yet
if (channel_id, thread_ts) not in chat_dict:
chat_dict[(channel_id, thread_ts)] = []
id = 1
for inner_list in files:
for item in inner_list:
chat_dict[(channel_id, thread_ts)].append({
"id": id,
"name": item['name'],
"type": item['filetype'],
"url_private": item['url_private']
})
id += 1
if not chat_dict.get((channel_id, thread_ts), []):
response = "No files in this thread"
else:
response = json.dumps({str(key): value for key, value in chat_dict.items()})
update_chat(app, channel_id, reply_message_ts, response)
elif command_text.startswith(":summarize"):
update_chat(app, channel_id, reply_message_ts, "Give me a minute to summarize this for you.")
# reply_message_ts = slack_resp.get('message', {}).get('ts')
files = [message.get('files') for message in conversation_history.data.get('messages') if 'files' in message]
# Get the most recent file
most_recent_file = files[-1] if files else None
if not most_recent_file:
response = "No files found in this thread"
else:
response = summarize_file(most_recent_file[0].get('name'), app, channel_id, thread_ts, reply_message_ts)
update_chat(app, channel_id, reply_message_ts, response)
elif command_text.startswith(":qa "):
question = command_text.replace(":qa ", "").strip()
update_chat(app, channel_id, reply_message_ts, "Asking questions is a great way to learn! Give me a sec...")
# slack_resp = app.client.chat_postMessage(
# channel=channel_id,
# # thread_ts=thread_ts,
# tread_ts = reply_message_ts,
# text="Asking questions is a great way to learn! Give me a sec..."
# )
# reply_message_ts = slack_resp.get('message', {}).get('ts')
files = [message.get('files') for message in conversation_history.data.get('messages') if 'files' in message]
# Get the most recent file
most_recent_file = files[-1] if files else None
if not most_recent_file:
response = "No files found in this thread"
else:
file = most_recent_file[0]
# TODO I'M NOT LIKING HOW METADATA IS FORMATTED. COMMENT OUT UNTIL I HAVE A BETTER IDEA
# txt, blks = doc_q_and_a(file.get('name'), channel_id, thread_ts, question)
response = doc_q_and_a(file.get('name'), channel_id, thread_ts, question)
# TODO I'M NOT LIKING HOW METADATA IS FORMATTED. COMMENT OUT UNTIL I HAVE A BETTER IDEA
# if blks is not None:
# app.client.chat_update(
# channel=channel_id,
# ts=reply_message_ts,
# blocks=blks
# )
# else:
# update_chat(app, channel_id, reply_message_ts, txt)
app.client.chat_update(
channel=channel_id,
ts=reply_message_ts,
blocks=response
)
else:
try:
openai_response = get_completion_from_messages(messages)
logger.debug("DEBUG: Got response from OpenAI: ", type(openai_response))
chunk_n_update(openai_response, app, channel_id, reply_message_ts)
except Exception as e:
logger.error(f"Error: {e}")
app.client.chat_postMessage(
channel=channel_id,
thread_ts=thread_ts,
text=f"I can't provide a response. Encountered an error:\n`\n{e}\n`"
)
logger.debug("DEBUG: end command_handler")
def chunk_n_update(openai_response, app, channel_id, reply_message_ts):
response_text = ""
ii = 0
for chunk in openai_response:
if chunk.choices[0].delta.get('content'):
ii = ii + 1
response_text += chunk.choices[0].delta.content
if ii > N_CHUNKS_TO_CONCAT_BEFORE_UPDATING:
update_chat(app, channel_id, reply_message_ts, response_text)
ii = 0
elif chunk.choices[0].finish_reason == 'stop':
update_chat(app, channel_id, reply_message_ts, response_text)
response_json = {"response_text": response_text}
logger.info(json.dumps(response_json))
# Start your app
if __name__ == "__main__":
logger.debug("Starting app")
SocketModeHandler(app, SLACK_APP_TOKEN).start()
| [] |
2024-01-10 | jbdamask/wkid-smaaht | wkid_smaaht.py | ###
# SET ENVIRONMENT VARIABLE FOR LOCAL DEVELOPMENT. THIS WILL USE SYSTEM PROMPTS FROM LOCAL FILE.
# FOR PRODUCTION DEPLOYMENT ON AWS, SYSTEM PROMPTS ARE READ FROM DYNAMODB
# export ENV=development
###
from config import get_config
Config = get_config()
import os
import openai
from slack_bolt import App
from slack_bolt.adapter.socket_mode import SocketModeHandler
from pprint import pprint
from src.logger_config import get_logger
import json
# To test locally, change the next line from src.utils to localsrc.utils
from src.utils import (N_CHUNKS_TO_CONCAT_BEFORE_UPDATING, OPENAI_API_KEY,
SLACK_APP_TOKEN, SLACK_BOT_TOKEN, WAIT_MESSAGE,
MAX_TOKENS, DEBUG, prompt,
get_slack_thread, set_prompt_for_user_and_channel, generate_image,
num_tokens_from_messages, process_conversation_history,
update_chat, moderate_messages, get_completion_from_messages,
prepare_payload, get_conversation_history, #process_message,
search_and_chat,
summarize_web_page, summarize_file, register_file, doc_q_and_a) # added imports here
# Configure logging
logger = get_logger(__name__)
# Set the Slack App bot token
app = App(token=SLACK_BOT_TOKEN)
# Commands
commands = [{'Command': ':pix', 'Description': 'Create image from text using Dall E 2', 'Example': '@W\'kid Smaaht :pix Cat in a flying taco'},
{'Command': ':search', 'Description': 'Search the web', 'Example': '@W\'kid Smaaht :search Recent FDA approvals'},
{'Command': ':webchat', 'Description': 'Automatically summarize a web page and make it available for follow-up questions', 'Example': '@W\'kid Smaaht :webchat https://en.wikipedia.org/wiki/Ramones'},
{'Command': ':summarize', 'Description': 'Summarize a document that\'s been uploaded to the channel or thread, immediately preceded by @W\kid Smaaht', 'Example': '@W\'kid Smaaht :summarize'},
{'Command': ':qa', 'Description': 'Ask direct questions about an uploaded document or URL. If URL, you must first run :webchat', 'Example': '@W\'kid Smaaht :qa According to the document I just uploaded, why did the chicken cross the road?'},
]
### SLACK EVENT HANDLERS ###
# Slack slash command to return list of all available system prompts
@app.command("/prompts")
def list_prompts(ack, respond):
ack()
p = prompt.list_prompts()
respond(f"{', '.join(p)}")
# Slack slash command to return message associated with a particular system prompt
@app.command("/get_prompt")
def show_prompt(ack, respond, command):
ack()
try:
respond(f"{prompt.get_prompt(command.get('text'))}")
except Exception as e:
respond(f"No such system prompt exsists")
# Slack slash command to change system message. This lets users steer Wkid Smaaht at runtime
@app.command("/set_prompt")
def set_prompt(ack, respond, command):
ack()
logger.info(f"{command.get('user_id')} : {command.get('user_name')}")
if(prompt.get_prompt(command.get('text'))) is not None:
set_prompt_for_user_and_channel(command.get('user_id'), command.get('channel_id'), command.get('text'))
respond(f"Ok, from now on I'll be {command.get('text')}")
else:
respond(f"{command.get('text')} is not a valid prompt key. Type /prompts to see a list of available system prompts")
@app.command("/generate_image")
def make_image(ack, respond, command):
ack({ "response_type": "in_channel", "text": "Command deprecated. Just type @W'kid Smaaht :pix <your text> instead"})
# Listens to incoming messages that contain "hello"
# To learn available listener arguments,
# visit https://slack.dev/bolt-python/api-docs/slack_bolt/kwargs_injection/args.html
# @app.message("hello")
# def message_hello(message, say):
# say(f"Hey there <@{message.get('user')}>!")
# Process direct messages
@app.event("message")
def handle_message_events(body, context, logger):
if is_it_bot(body):
return
# event_router(body, context)
# logger.debug(body)
event = body.get('event')
if event is None:
logger.error("Expected event object in Slack body")
logger.info(body)
return False
# Do nothing if this is a post by the bot
if is_it_bot(body):
logger.debug('Is bot message')
return
bot_user_id = body.get('authorizations')[0]['user_id'] if 'authorizations' in body else context.get('bot_user_id')
channel_id = event.get('channel')
# If the event is from a DM, go ahead and process
if channel_id.startswith('D'):
pass
# If it's an app_mention, this will be handled by Slack's @app.event("app_mention") listener.
# Return so we don't process twice
# elif f"<@{bot_user_id}>" in event.get('text'):
elif 'text' in event and f"<@{bot_user_id}>" in event.get('text'):
return
# If it's neither a DM nor an app_mention, then this is none of our business. Return immediately
else:
return
logger.debug("Processing DM message")
# Check if the event has a subtype
if 'subtype' in body['event']:
# If the subtype is 'file_share', do something
if body['event']['subtype'] == 'file_share':
deal_with_file(body, context)
else:
process_event(body, context)
# Process app mention events
@app.event("app_mention")
def command_handler(body, context):
# event_router(body, context)
event = body.get('event')
if event.get('files') is not None:
deal_with_file(body, context)
else:
process_event(body, context)
# Checks to see if a post is from the W'kid Smaaht bot.
# If so, we don't process
def is_it_bot(body):
if 'message' in body:
b = body.get('message[bot_id]')
if b is not None:
return True
else:
return False
# Conditional logic to handle Slack events
# def event_router(body, context):
# event = body.get('event', {})
# text = event.get('text', '')
# thread_id=event.get('ts')
# channel_id = event.get('channel', '')
# subtype = event.get('subtype')
# if event.get('type') == 'app_mention':
# # Your code to handle app mentions goes here
# slack_resp = app.client.chat_postMessage(
# channel=channel_id,
# thread_ts=thread_id,
# text=f"Received App mention event of type {event['type']} in {channel_id} SubType: {subtype}"
# )
# elif channel_id.startswith('D') and not text.startswith('<@'):
# # Your code to handle non-mention DMs goes here
# slack_resp = app.client.chat_postMessage(
# channel=channel_id,
# thread_ts=thread_id,
# text=f"Received DM event of type {event['type']} in {channel_id} SubType: {subtype}"
# )
# Processes file upload
def deal_with_file(body, context):
event = body.get('event')
channel_id=event.get('channel')
thread_id = event.get('thread_ts') if event.get('thread_ts') is not None else event.get('ts')
slack_resp = app.client.chat_postMessage(
channel=channel_id,
thread_ts=thread_id,
# text="Ah, I see you uploaded a file. Give me a minute to summarize it for you."
text="Registering doc..."
)
reply_message_ts = slack_resp.get('message', {}).get('ts')
filepath = body['event']['files'][0]['name']
register_file(body['event']['files'][0], channel_id, thread_id)
response = "What would you like to do with this? You can ask me to summarize it by typing :summarize, or ask questions by typing :qa."
update_chat(app, channel_id, reply_message_ts, response)
# Where the magic happens
def process_event(body, context):
logger.info("process_event() body object:)")
logger.info(body)
logger.debug("process_event() context object:)")
logger.debug(context)
event = body.get('event')
if event is None:
return False
bot_user_id, channel_id, thread_ts, user_id, command_text = prepare_payload(body, context)
if (command_text==WAIT_MESSAGE) or (command_text.startswith("/")):
# No processing needed if the message was generated by this bot or is a Slack slash command
return
if command_text == '':
app.client.chat_postMessage(
channel=channel_id,
thread_ts=thread_ts,
text=f"How can I help you today?"
)
return
slack_resp = app.client.chat_postMessage(
channel=channel_id,
thread_ts=thread_ts,
text=WAIT_MESSAGE
)
reply_message_ts = slack_resp.get('message', {}).get('ts')
conversation_history = get_conversation_history(app, channel_id, thread_ts)
if conversation_history is None:
slack_resp = app.client.chat_postMessage(
channel=channel_id,
thread_ts=thread_ts,
text="Sorry. Slack had a problem processing this message"
)
return
messages = process_conversation_history(conversation_history, bot_user_id, channel_id, thread_ts, user_id)
num_tokens = num_tokens_from_messages(messages)
# TODO: CAN I CONVERT SOME OF THESE TO OPENAI FUNCTIONS?
# https://platform.openai.com/docs/guides/gpt/function-calling
if command_text.startswith(":pix "):
image_text = command_text.replace(":pix ", "").strip()
if image_text:
update_chat(app, channel_id, reply_message_ts, "Generating your image...just a sec")
try:
response = generate_image(image_text)
except Exception as e:
logger.error(response)
update_chat(app, channel_id, reply_message_ts, "Sorry. Error generating image: {e}")
return
app.client.chat_postMessage(
channel=channel_id,
thread_ts=thread_ts,
text=".", # Used to suppress Slack warnings about not including text in the post
blocks=[
{
"type": "image",
"title": {
"type": "plain_text",
"text": image_text,
"emoji": True
},
"image_url": response['blocks'][0]['image_url'],
"alt_text": image_text
}
]
)
else:
update_chat(app, channel_id, reply_message_ts, "You need to provide some text for me to generate an image. For example, A cat eating ice cream.")
elif command_text.startswith(":search "):
update_chat(app, channel_id, reply_message_ts, "Let me do a bit of research and I'll get right back to you.")
text = command_text.replace(":search ", "").strip()
response = search_and_chat(messages, text)
update_chat(app, channel_id, reply_message_ts, response)
elif command_text.startswith(":webchat "):
update_chat(app, channel_id, reply_message_ts, "I'll try to summarize that page. This may take a minute (literally).")
url = command_text.replace(":webchat ", "").split("|")[0].replace("<","").replace(">","").strip()
# register_file(url, channel_id, thread_ts)
response = summarize_web_page(url, app=app, channel_id=channel_id, thread_ts=thread_ts, reply_message_ts=reply_message_ts)
update_chat(app, channel_id, reply_message_ts, response)
elif command_text.startswith(":listfiles"):
# FEATURE NOT FULLY IMPLEMENTED YET
update_chat(app, channel_id, reply_message_ts, "Listing available files")
# response = list_files_in_thread(channel_id, thread_ts)
files = [message.get('files') for message in conversation_history.data.get('messages') if 'files' in message]
chat_dict = {}
# If you're not sure whether the tuple key exists yet
if (channel_id, thread_ts) not in chat_dict:
chat_dict[(channel_id, thread_ts)] = []
id = 1
for inner_list in files:
for item in inner_list:
chat_dict[(channel_id, thread_ts)].append({
"id": id,
"name": item['name'],
"type": item['filetype'],
"url_private": item['url_private']
})
id += 1
if not chat_dict.get((channel_id, thread_ts), []):
response = "No files in this thread"
else:
response = json.dumps({str(key): value for key, value in chat_dict.items()})
update_chat(app, channel_id, reply_message_ts, response)
elif command_text.startswith(":summarize"):
update_chat(app, channel_id, reply_message_ts, "Give me a minute to summarize this for you.")
# reply_message_ts = slack_resp.get('message', {}).get('ts')
files = [message.get('files') for message in conversation_history.data.get('messages') if 'files' in message]
# Get the most recent file
most_recent_file = files[-1] if files else None
if not most_recent_file:
response = "No files found in this thread"
else:
response = summarize_file(most_recent_file[0].get('name'), app, channel_id, thread_ts, reply_message_ts)
update_chat(app, channel_id, reply_message_ts, response)
elif command_text.startswith(":qa "):
question = command_text.replace(":qa ", "").strip()
update_chat(app, channel_id, reply_message_ts, "Asking questions is a great way to learn! Give me a sec...")
most_recent_file = ''
wc = ''
file_found = False
webchat_found = False
for message in reversed(conversation_history.data.get('messages')):
if 'files' in message:
files = message.get('files')
most_recent_file = files[-1] if files else None
file_found = True
break
elif 'text' in message:
t = message.get('text')
if ":webchat" in t:
wc = t
webchat_found = True
break
if file_found:
# file = most_recent_file[0]
response = doc_q_and_a(most_recent_file.get('name'), question, app=app, channel_id=channel_id, thread_ts=thread_ts, reply_message_ts=reply_message_ts)
app.client.chat_update(
channel=channel_id,
ts=reply_message_ts,
blocks=response
)
return
elif webchat_found:
# TODO This won't help if Wkid Smaaht is referenced in the middle of a text or along with other user callouts
if wc.startswith('<@'):
wc = wc.split(f"<@{bot_user_id}>")[1].strip()
url = wc.replace(":webchat ", "").split("|")[0].replace("<","").replace(">","").strip()
response = doc_q_and_a(url, question, app=app, channel_id=channel_id, thread_ts=thread_ts, reply_message_ts=reply_message_ts)
app.client.chat_update(
channel=channel_id,
ts=reply_message_ts,
blocks=response
)
return
else:
response = "No files found in this thread"
update_chat(app, channel_id, reply_message_ts, response)
else:
try:
openai_response = get_completion_from_messages(messages)
logger.debug("DEBUG: Got response from OpenAI: ", type(openai_response))
chunk_n_update(openai_response, app, channel_id, reply_message_ts)
except Exception as e:
logger.error(f"Error: {e}")
app.client.chat_postMessage(
channel=channel_id,
thread_ts=thread_ts,
text=f"I can't provide a response. Encountered an error:\n`\n{e}\n`"
)
logger.debug("DEBUG: end command_handler")
def chunk_n_update(openai_response, app, channel_id, reply_message_ts):
response_text = ""
ii = 0
for chunk in openai_response:
if chunk.choices[0].delta.get('content'):
ii = ii + 1
response_text += chunk.choices[0].delta.content
if ii > N_CHUNKS_TO_CONCAT_BEFORE_UPDATING:
update_chat(app, channel_id, reply_message_ts, response_text)
ii = 0
elif chunk.choices[0].finish_reason == 'stop':
update_chat(app, channel_id, reply_message_ts, response_text)
response_json = {"response_text": response_text}
logger.info(json.dumps(response_json))
# Start your app
if __name__ == "__main__":
logger.debug("Starting app")
SocketModeHandler(app, SLACK_APP_TOKEN).start()
| [] |
2024-01-10 | jbdamask/wkid-smaaht | chat_with_docs~lc_file_handler.py | import langchain
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.agents import AgentType, load_tools
from langchain.document_loaders import PyPDFLoader, OnlinePDFLoader, UnstructuredPDFLoader, UnstructuredWordDocumentLoader, UnstructuredFileLoader, WebBaseLoader
# from custom_agent_types import CustomAgentType
import pandas as pd
import abc
import os
import requests
from io import StringIO
from src.logger_config import get_logger
from langchain.vectorstores import Chroma
from langchain.vectorstores import utils
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
# Necessary for ChromaDB to work on Fargate linux instances
__import__('pysqlite3')
import sys
sys.modules['sqlite3'] = sys.modules.pop('pysqlite3')
# Necessary for ChromaDB to work on Fargate linux instances
langchain.verbose = True
# Configure logging
logger = get_logger(__name__)
class Handler(abc.ABC):
first_impression = "What's this file about?"
def __init__(self, file, openai_api_key, slack_bot_token):
# _, file_extension = os.path.splitext(file)
if not isinstance(file, str):
_, file_extension = os.path.splitext(file.get('name'))
self.file_type = file_extension.lstrip('.').lower()
self.file = file
self.openai_api_key = openai_api_key
# self.slack_bot_token = slack_bot_token
self.headers = {'Authorization': f'Bearer {slack_bot_token}'}
self.llm = ChatOpenAI(temperature=0,openai_api_key=self.openai_api_key)
# def handle(self, file):
def handle(self):
raise NotImplementedError
@abc.abstractmethod
def instantiate_loader(self, filename):
pass
# TODO - I think I can remove this
def _read_file_content(self, url, SLACK_BOT_TOKEN) :
headers = {'Authorization': f'Bearer {SLACK_BOT_TOKEN}'}
response = requests.get(url, headers=headers)
return response.content
def download_and_store(self):
# headers = {'Authorization': f'Bearer {self.slack_bot_token}'}
url = self.file.get('url_private')
logger.info(url)
filepath = self.download_local_file()
embeddings = OpenAIEmbeddings(openai_api_key = self.openai_api_key)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
self.instantiate_loader(filepath)
documents = self.loader.load()
self.docs = text_splitter.split_documents(documents)
filename = self.file.get('name')
for idx, text in enumerate(self.docs):
self.docs[idx].metadata['filename'] = filename.split('/')[-1]
filtered_docs = utils.filter_complex_metadata(self.docs)
# self.db = Chroma.from_documents(docs, embeddings)
self.db = Chroma.from_documents(filtered_docs, embeddings)
self.delete_local_file(filepath)
# TODO - May or may not want to keep this method
def q_and_a(self, question):
# Assumes an agent has been configured.
result = None
try:
result = self.agent.run(question)
except Exception as e:
logger.error(e)
result = "Sorry, I ran into a problem with the file"
return result
# return self.agent(question) # This invokes the default __call__ method
# Not all filetypes are accessible by LangChain over the web.
# Some need to be downloaded locally
# def _download_local_file(self, headers, directory='downloads'):
def download_local_file(self):
import requests
import uuid
directory='downloads'
url = self.file.get('url_private')
file_type = url.split('.')[-1]
# response = requests.get(url, headers=headers)
response = requests.get(url, headers=self.headers)
# Generate a random UUID
file_uuid = uuid.uuid4()
# Convert the UUID to a string and append the .docx extension
filename = str(file_uuid) + '.' + file_type
# Check if the directory exists and create it if it doesn't
if not os.path.exists(directory):
os.makedirs(directory)
filepath = os.path.join(directory, filename)
with open(filepath, 'wb') as f:
f.write(response.content)
return filepath
# You slob. Clean up after yourself!
def delete_local_file(self, filepath):
if os.path.isfile(filepath):
os.remove(filepath)
class PDFHandler(Handler):
def handle(self):
return f"Handling PDF file: {self.file}"
def instantiate_loader(self, filename):
# self.loader = UnstructuredPDFLoader(filename, mode="elements", metadata_filename=self.file.get('url_private'))
# self.loader = PyPDFLoader(filename, metadata_filename=self.file.get('url_private'))
self.loader = PyPDFLoader(filename)
class DOCXHandler(Handler):
def handle(self):
return f"Handling DOCX file: {self.file}"
def instantiate_loader(self, filename):
self.loader = UnstructuredWordDocumentLoader(filename, mode="elements")
class TxtHandler(Handler):
def handle(self):
return f"Handling txt file: {self.file}"
def instantiate_loader(self, filename):
self.loader = UnstructuredFileLoader(filename, mode="elements")
class WebHandler(Handler):
def handle(self):
return f"Handling web page: {self.file}"
def instantiate_loader(self, filename):
self.loader = WebBaseLoader(filename)
def load_split_store(self):
embeddings = OpenAIEmbeddings(openai_api_key = self.openai_api_key)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
# self.instantiate_loader(self.file.get('url_private'))
self.instantiate_loader(self.file)
documents = self.loader.load()
self.docs = text_splitter.split_documents(documents)
for idx, text in enumerate(self.docs):
self.docs[idx].metadata['filename'] = self.file
# self.docs[idx].metadata['filename'] = self.file.get('name')
filtered_docs = utils.filter_complex_metadata(self.docs)
self.db = Chroma.from_documents(filtered_docs, embeddings)
def read_file(self, url):
logger.info(url)
loader = WebBaseLoader(url)
self.documents = loader.load_and_split()
return self.documents
# TODO - NOT IMPLEMENTED
class PandasWrapperHandler(Handler):
def handle(self):
return f"Wrapping {self.file} in Pandas dataframe"
def _create_agent(self):
self.df.columns = self.df.columns.str.strip()
tools = load_tools(["python_repl"], llm=self.llm)
self.agent = create_pandas_dataframe_agent(
tools=tools,
llm=self.llm,
df=self.df,
verbose=True,
# These are the only two agents impelemnted for pandas at the moment
agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
# agent_type=AgentType.OPENAI_FUNCTIONS,
# max_execution_time=2,
# early_stopping_method="generate",
format_instructions = FORMAT_INSTRUCTIONS
)
# TODO - NOT IMPLEMENTED
class ExcelHandler(PandasWrapperHandler):
def handle(self):
return f"Handling Excel file: {self.file}"
def read_file(self, url, SLACK_BOT_TOKEN):
file_content = self._read_file_content(url, SLACK_BOT_TOKEN)
self.df = pd.read_excel(file_content, sheet_name=0)
# df.columns = df.columns.str.strip()
# tools = load_tools(["llm-math"], llm=self.llm)
# self.agent = create_pandas_dataframe_agent(
# tools=tools,
# llm=self.llm,
# df=df,
# verbose=True,
# agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
# max_execution_time=1,
# early_stopping_method="generate"
# )
self._create_agent()
return self.q_and_a(self.first_impression)
# TODO - NOT IMPLEMENTED
class CSVHandler(PandasWrapperHandler):
def handle(self):
return f"Handling CSV file: {self.file}"
def read_file(self, url, SLACK_BOT_TOKEN):
file_content = self._read_file_content(url, SLACK_BOT_TOKEN).decode('utf-8')
self.df = pd.read_csv(StringIO(file_content))
# df.columns = df.columns.str.strip()
# tools = load_tools(["llm-math"], llm=self.llm)
# self.agent = create_pandas_dataframe_agent(tools=tools, llm=self.llm, df=df, verbose=True, agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION)
# # print(agent.agent.llm_chain.prompt.template)
# # return (agent.run(self.first_impression))
self._create_agent()
return self.q_and_a(self.first_impression)
# self.agent.run("What's the average rate of change from month to month?"))
# TODO - NOT IMPLEMENTED
class JSONHandler(Handler):
def handle(self):
return f"Handling JSON file: {self.file}"
def read_file(self, url, SLACK_BOT_TOKEN):
file_content = self._read_file_content(url, SLACK_BOT_TOKEN)
str_content = file_content.decode('utf-8')
df = pd.read_json(StringIO(str_content))
df.columns = df.columns.str.strip()
agent = create_pandas_dataframe_agent(OpenAI(temperature=0, openai_api_key=self.openai_api_key), df=df, verbose=True, agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION)
# print(agent.agent.llm_chain.prompt.template)
class MarkdownHandler(Handler):
def handle(self):
return f"Handling Markdown file: {self.file}"
def read_file(self):
pass
class HandlerFactory:
# Note - WebHandler isn't in here because this factory is based on
# filetype. Instead, we instantiate it directly if we know we're
# dealing with a web page
handlers = {
"pdf": PDFHandler,
"docx": DOCXHandler,
"txt": TxtHandler,
# "xlsx": ExcelHandler,
# "json": JSONHandler,
# "md": MarkdownHandler,
# "csv": CSVHandler,
}
@classmethod
def get_handler(cls, file, open_api_key, slack_bot_token):
# _, file_extension = os.path.splitext(file)
_, file_extension = os.path.splitext(file.get('name'))
file_type = file_extension.lstrip('.').lower()
Handler = cls.handlers.get(file_type)
if Handler is None:
raise ValueError(f"No handler for file type {file_type}")
return Handler(file, open_api_key, slack_bot_token)
def create_file_handler(file, openai_api_key, slack_bot_token, webpage=False):
# is_url = 'http://' in file.get('name') or 'https://' in file.get('name')
is_url = 'http://' in file or 'https://' in file
if is_url or webpage:
# file = {'name': file, 'id': file, 'url_private': file}
handler = WebHandler(file, openai_api_key, slack_bot_token)
# elif webpage:
# handler = WebHandler(file, openai_api_key, slack_bot_token)
else:
handler = HandlerFactory.get_handler(file, openai_api_key, slack_bot_token)
return handler
class FileRegistry:
def __init__(self):
self.registry = {}
# def add_file(self, filename, channel_id, thread_ts, file_id, private_url, handler, chatWithDoc):
def add_file(self, filename, channel_id, thread_ts, file_id, url_private, handler):
if filename not in self.registry:
self.registry[filename] = {}
key = (channel_id, thread_ts)
if key not in self.registry[filename]:
self.registry[filename][key] = []
self.registry[filename][key].append(
{'file_id': file_id,
'private_url': url_private,
'handler': handler,
# 'chat': chatWithDoc,
})
def get_files(self, filename, channel_id, thread_ts):
key = (channel_id, thread_ts)
if filename in self.registry and key in self.registry[filename]:
return self.registry[filename][key]
else:
return None
def list_files(self, filename):
if filename in self.registry:
return [file for sublist in self.registry[filename].values() for file in sublist]
else:
return None
| [] |
2024-01-10 | djordjethai/OpenaiST | prevodilac.py | # funkcije vezane za openai vision i gpt-4-turbo
# cita mp3, prikazije mp3, downloaduje mp3, cita sa slike, cita sa slike sa url, downloaduje opis slike
# cita iz txt, pdf i worda, cuva u txt, pdf i word i downloaduje plus mp3 preview i downloaduje mp3
# prevod sa raznih na razne jezike
from openai import OpenAI
import streamlit as st
import os
import io
from myfunc.mojafunkcija import (
st_style,
positive_login,
sacuvaj_dokument,
)
from myfunc.asistenti import (
audio_izlaz,
priprema,
)
import PyPDF2
from langchain.document_loaders import UnstructuredFileLoader
import re
st_style()
client = OpenAI()
version = "19.12.23."
# glavni program odredjuje ulazni dokument i jezike za prevodjenje
def main():
st.markdown(
f"<p style='font-size: 10px; color: grey;'>{version}</p>",
unsafe_allow_html=True,
)
st.subheader("Prevodilac") # Setting the title for Streamlit application
with st.expander("Pročitajte uputstvo"):
st.caption(
"""
### Korisničko Uputstvo za Prevodioca
1. **Ulazni dokument**
- Po potrebi mozete konvertovati sliku ili audio zapis. Inace, mozete ucitati tekstualni dokument u .txt, .pdf ili .docx formatu.
- Odaberite jezik ulaznog dokumenta.
- Odaberije jezik izlaznog dokumenta
- Pritisnite Submit
- Učitajte fajl u odabranom formatu koji želite da prevedete. Za sliku i audio videćete i preview.
- Unesite uputstvo za prevodjenje ili prihvatite default opciju.
- Odaberite opciju "Glasovna naracija" po želji.
2. **Čuvanje**
- U levoj bočnoj traci možete sačuvati izlaz u txt, pdf ili docx formatu.
- Tonski zapis možete sačuvati na samom playeru, desno, tri tačke, download.
**Napomena:**
- Za transkribovanje zvučnih zapisa koristi se OpenAI Whisper model. Zvučni zapis mora biti u .MP3 formatu i ne veći od 25Mb.
- Za prevod teksta i citanje sa slika koristi se odgovarajući OpenAI GPT-4 model.
Srećno sa korišćenjem alata za prevodjenje!
"""
)
if "final_content" not in st.session_state:
st.session_state["final_content"] = "Prevod"
st.subheader("Ulazni dokument")
with st.sidebar:
priprema()
with st.form(key="ulaz", clear_on_submit=False):
jezik_izlaza = st.selectbox("izaberite jezik izlaznog dokumenta", ("srpski", "english", "french", "german", "hungarian", "italian", "spanish"))
submit_button = st.form_submit_button(label="Submit")
citaj_tekst(jezik_izlaza)
# cuvaj dokument, prima tekst, ime fajla i cuva za download u txt, docx i pdf formatu
# cita tekst i upit a izlaz je mp3 player
# cita tekst i prevodi. Prima jezik izlaza i izlaz je prevedeni tekst
def citaj_tekst(jezik_izlaza):
client=OpenAI()
st.info("Čita tekst")
uploaded_file = st.file_uploader(
"Izaberite tekst za prevod",
key="upload_file",
type=["txt", "pdf", "docx"],
help = "Odabir dokumenta",
)
if uploaded_file is not None:
with io.open(uploaded_file.name, "wb") as file:
file.write(uploaded_file.getbuffer())
if ".pdf" in uploaded_file.name:
pdf_reader = PyPDF2.PdfReader(uploaded_file)
num_pages = len(pdf_reader.pages)
text_content = ""
for page in range(num_pages):
page_obj = pdf_reader.pages[page]
text_content += page_obj.extract_text()
text_content = text_content.replace("•", "")
text_content = re.sub(r"(?<=\b\w) (?=\w\b)", "", text_content)
with io.open("temp.txt", "w", encoding="utf-8") as f:
f.write(text_content)
loader = UnstructuredFileLoader("temp.txt", encoding="utf-8")
else:
# Creating a file loader object
loader = UnstructuredFileLoader(file_path=uploaded_file.name, encoding="utf-8")
result = loader.load()
# content = "Zapisnik"
with st.form(key="my_form", clear_on_submit=False):
system_prompt=f"""You are a multi-lingual language expert. You must translate the text to the {jezik_izlaza} language and fix grammar \
and spelling errors but otherwise keep the text as is.
"""
opis = st.text_area(
"Unesite instrukcije za sumarizaciju : ",
system_prompt,
key="prompt_prva",
height=150,
help = "Unos prompta."
)
audio_i = st.checkbox("Glasovna naracija")
opis = f"{opis} {result[0].page_content}"
submit_button = st.form_submit_button(label="Submit")
if submit_button:
with st.spinner("Sačekajte trenutak..."):
response = client.chat.completions.create(
model="gpt-4-1106-preview",
temperature=0,
messages=[
{
"role": "system",
"content": system_prompt
},
{
"role": "user",
"content": opis
}
]
)
content = response.choices[0].message.content
st.session_state["final_content"] = content
if st.session_state.final_content != "Prevod":
if audio_i == True:
st.write("Glasovna naracija")
audio_izlaz(st.session_state.final_content)
with st.expander("Sažetak", True):
st.write(st.session_state.final_content) # Displaying the summary
with st.sidebar:
# if st.session_state.final_content !="Prevod":
# st.session_state["final_content"] = content
sacuvaj_dokument(st.session_state.final_content, uploaded_file.name)
# Deployment on Stremalit Login functionality
deployment_environment = os.environ.get("DEPLOYMENT_ENVIRONMENT")
if deployment_environment == "Streamlit":
name, authentication_status, username = positive_login(main, " ")
else:
if __name__ == "__main__":
main() | [
"You are a multi-lingual language expert. You must translate the text to the PLACEHOLDER language and fix grammar and spelling errors but otherwise keep the text as is. \n",
"f\"{opis} {result[0].page_content}"
] |
2024-01-10 | smart-on-fhir/client-py | fhirclient~models~guidanceresponse_tests.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import guidanceresponse
from .fhirdate import FHIRDate
class GuidanceResponseTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("GuidanceResponse", js["resourceType"])
return guidanceresponse.GuidanceResponse(js)
def testGuidanceResponse1(self):
inst = self.instantiate_from("guidanceresponse-example.json")
self.assertIsNotNone(inst, "Must have instantiated a GuidanceResponse instance")
self.implGuidanceResponse1(inst)
js = inst.as_json()
self.assertEqual("GuidanceResponse", js["resourceType"])
inst2 = guidanceresponse.GuidanceResponse(js)
self.implGuidanceResponse1(inst2)
def implGuidanceResponse1(self, inst):
self.assertEqual(inst.contained[0].id, "outputParameters1")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier[0].system, "http://example.org")
self.assertEqual(inst.identifier[0].value, "guidanceResponse1")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.moduleUri, "http://someguidelineprovider.org/radiology-appropriateness-guidelines.html")
self.assertEqual(inst.occurrenceDateTime.date, FHIRDate("2017-03-10T16:02:00Z").date)
self.assertEqual(inst.occurrenceDateTime.as_json(), "2017-03-10T16:02:00Z")
self.assertEqual(inst.reasonCode[0].text, "Guideline Appropriate Ordering Assessment")
self.assertEqual(inst.requestIdentifier.system, "http://example.org")
self.assertEqual(inst.requestIdentifier.value, "guidanceRequest1")
self.assertEqual(inst.status, "success")
self.assertEqual(inst.text.status, "generated")
| [] |
2024-01-10 | smart-on-fhir/client-py | fhirclient~models~fhirelementfactory.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
class FHIRElementFactory(object):
""" Factory class to instantiate resources by resource name.
"""
@classmethod
def instantiate(cls, resource_type, jsondict):
""" Instantiate a resource of the type correlating to "resource_type".
:param str resource_type: The name/type of the resource to instantiate
:param dict jsondict: The JSON dictionary to use for data
:returns: A resource of the respective type or `Element`
"""
if "Account" == resource_type:
from . import account
return account.Account(jsondict)
if "AccountCoverage" == resource_type:
from . import account
return account.AccountCoverage(jsondict)
if "AccountGuarantor" == resource_type:
from . import account
return account.AccountGuarantor(jsondict)
if "ActivityDefinition" == resource_type:
from . import activitydefinition
return activitydefinition.ActivityDefinition(jsondict)
if "ActivityDefinitionDynamicValue" == resource_type:
from . import activitydefinition
return activitydefinition.ActivityDefinitionDynamicValue(jsondict)
if "ActivityDefinitionParticipant" == resource_type:
from . import activitydefinition
return activitydefinition.ActivityDefinitionParticipant(jsondict)
if "Address" == resource_type:
from . import address
return address.Address(jsondict)
if "AdverseEvent" == resource_type:
from . import adverseevent
return adverseevent.AdverseEvent(jsondict)
if "AdverseEventSuspectEntity" == resource_type:
from . import adverseevent
return adverseevent.AdverseEventSuspectEntity(jsondict)
if "AdverseEventSuspectEntityCausality" == resource_type:
from . import adverseevent
return adverseevent.AdverseEventSuspectEntityCausality(jsondict)
if "Age" == resource_type:
from . import age
return age.Age(jsondict)
if "AllergyIntolerance" == resource_type:
from . import allergyintolerance
return allergyintolerance.AllergyIntolerance(jsondict)
if "AllergyIntoleranceReaction" == resource_type:
from . import allergyintolerance
return allergyintolerance.AllergyIntoleranceReaction(jsondict)
if "Annotation" == resource_type:
from . import annotation
return annotation.Annotation(jsondict)
if "Appointment" == resource_type:
from . import appointment
return appointment.Appointment(jsondict)
if "AppointmentParticipant" == resource_type:
from . import appointment
return appointment.AppointmentParticipant(jsondict)
if "AppointmentResponse" == resource_type:
from . import appointmentresponse
return appointmentresponse.AppointmentResponse(jsondict)
if "Attachment" == resource_type:
from . import attachment
return attachment.Attachment(jsondict)
if "AuditEvent" == resource_type:
from . import auditevent
return auditevent.AuditEvent(jsondict)
if "AuditEventAgent" == resource_type:
from . import auditevent
return auditevent.AuditEventAgent(jsondict)
if "AuditEventAgentNetwork" == resource_type:
from . import auditevent
return auditevent.AuditEventAgentNetwork(jsondict)
if "AuditEventEntity" == resource_type:
from . import auditevent
return auditevent.AuditEventEntity(jsondict)
if "AuditEventEntityDetail" == resource_type:
from . import auditevent
return auditevent.AuditEventEntityDetail(jsondict)
if "AuditEventSource" == resource_type:
from . import auditevent
return auditevent.AuditEventSource(jsondict)
if "BackboneElement" == resource_type:
from . import backboneelement
return backboneelement.BackboneElement(jsondict)
if "Basic" == resource_type:
from . import basic
return basic.Basic(jsondict)
if "Binary" == resource_type:
from . import binary
return binary.Binary(jsondict)
if "BiologicallyDerivedProduct" == resource_type:
from . import biologicallyderivedproduct
return biologicallyderivedproduct.BiologicallyDerivedProduct(jsondict)
if "BiologicallyDerivedProductCollection" == resource_type:
from . import biologicallyderivedproduct
return biologicallyderivedproduct.BiologicallyDerivedProductCollection(jsondict)
if "BiologicallyDerivedProductManipulation" == resource_type:
from . import biologicallyderivedproduct
return biologicallyderivedproduct.BiologicallyDerivedProductManipulation(jsondict)
if "BiologicallyDerivedProductProcessing" == resource_type:
from . import biologicallyderivedproduct
return biologicallyderivedproduct.BiologicallyDerivedProductProcessing(jsondict)
if "BiologicallyDerivedProductStorage" == resource_type:
from . import biologicallyderivedproduct
return biologicallyderivedproduct.BiologicallyDerivedProductStorage(jsondict)
if "BodyStructure" == resource_type:
from . import bodystructure
return bodystructure.BodyStructure(jsondict)
if "Bundle" == resource_type:
from . import bundle
return bundle.Bundle(jsondict)
if "BundleEntry" == resource_type:
from . import bundle
return bundle.BundleEntry(jsondict)
if "BundleEntryRequest" == resource_type:
from . import bundle
return bundle.BundleEntryRequest(jsondict)
if "BundleEntryResponse" == resource_type:
from . import bundle
return bundle.BundleEntryResponse(jsondict)
if "BundleEntrySearch" == resource_type:
from . import bundle
return bundle.BundleEntrySearch(jsondict)
if "BundleLink" == resource_type:
from . import bundle
return bundle.BundleLink(jsondict)
if "CapabilityStatement" == resource_type:
from . import capabilitystatement
return capabilitystatement.CapabilityStatement(jsondict)
if "CapabilityStatementDocument" == resource_type:
from . import capabilitystatement
return capabilitystatement.CapabilityStatementDocument(jsondict)
if "CapabilityStatementImplementation" == resource_type:
from . import capabilitystatement
return capabilitystatement.CapabilityStatementImplementation(jsondict)
if "CapabilityStatementMessaging" == resource_type:
from . import capabilitystatement
return capabilitystatement.CapabilityStatementMessaging(jsondict)
if "CapabilityStatementMessagingEndpoint" == resource_type:
from . import capabilitystatement
return capabilitystatement.CapabilityStatementMessagingEndpoint(jsondict)
if "CapabilityStatementMessagingSupportedMessage" == resource_type:
from . import capabilitystatement
return capabilitystatement.CapabilityStatementMessagingSupportedMessage(jsondict)
if "CapabilityStatementRest" == resource_type:
from . import capabilitystatement
return capabilitystatement.CapabilityStatementRest(jsondict)
if "CapabilityStatementRestInteraction" == resource_type:
from . import capabilitystatement
return capabilitystatement.CapabilityStatementRestInteraction(jsondict)
if "CapabilityStatementRestResource" == resource_type:
from . import capabilitystatement
return capabilitystatement.CapabilityStatementRestResource(jsondict)
if "CapabilityStatementRestResourceInteraction" == resource_type:
from . import capabilitystatement
return capabilitystatement.CapabilityStatementRestResourceInteraction(jsondict)
if "CapabilityStatementRestResourceOperation" == resource_type:
from . import capabilitystatement
return capabilitystatement.CapabilityStatementRestResourceOperation(jsondict)
if "CapabilityStatementRestResourceSearchParam" == resource_type:
from . import capabilitystatement
return capabilitystatement.CapabilityStatementRestResourceSearchParam(jsondict)
if "CapabilityStatementRestSecurity" == resource_type:
from . import capabilitystatement
return capabilitystatement.CapabilityStatementRestSecurity(jsondict)
if "CapabilityStatementSoftware" == resource_type:
from . import capabilitystatement
return capabilitystatement.CapabilityStatementSoftware(jsondict)
if "CarePlan" == resource_type:
from . import careplan
return careplan.CarePlan(jsondict)
if "CarePlanActivity" == resource_type:
from . import careplan
return careplan.CarePlanActivity(jsondict)
if "CarePlanActivityDetail" == resource_type:
from . import careplan
return careplan.CarePlanActivityDetail(jsondict)
if "CareTeam" == resource_type:
from . import careteam
return careteam.CareTeam(jsondict)
if "CareTeamParticipant" == resource_type:
from . import careteam
return careteam.CareTeamParticipant(jsondict)
if "CatalogEntry" == resource_type:
from . import catalogentry
return catalogentry.CatalogEntry(jsondict)
if "CatalogEntryRelatedEntry" == resource_type:
from . import catalogentry
return catalogentry.CatalogEntryRelatedEntry(jsondict)
if "ChargeItem" == resource_type:
from . import chargeitem
return chargeitem.ChargeItem(jsondict)
if "ChargeItemDefinition" == resource_type:
from . import chargeitemdefinition
return chargeitemdefinition.ChargeItemDefinition(jsondict)
if "ChargeItemDefinitionApplicability" == resource_type:
from . import chargeitemdefinition
return chargeitemdefinition.ChargeItemDefinitionApplicability(jsondict)
if "ChargeItemDefinitionPropertyGroup" == resource_type:
from . import chargeitemdefinition
return chargeitemdefinition.ChargeItemDefinitionPropertyGroup(jsondict)
if "ChargeItemDefinitionPropertyGroupPriceComponent" == resource_type:
from . import chargeitemdefinition
return chargeitemdefinition.ChargeItemDefinitionPropertyGroupPriceComponent(jsondict)
if "ChargeItemPerformer" == resource_type:
from . import chargeitem
return chargeitem.ChargeItemPerformer(jsondict)
if "Claim" == resource_type:
from . import claim
return claim.Claim(jsondict)
if "ClaimAccident" == resource_type:
from . import claim
return claim.ClaimAccident(jsondict)
if "ClaimCareTeam" == resource_type:
from . import claim
return claim.ClaimCareTeam(jsondict)
if "ClaimDiagnosis" == resource_type:
from . import claim
return claim.ClaimDiagnosis(jsondict)
if "ClaimInsurance" == resource_type:
from . import claim
return claim.ClaimInsurance(jsondict)
if "ClaimItem" == resource_type:
from . import claim
return claim.ClaimItem(jsondict)
if "ClaimItemDetail" == resource_type:
from . import claim
return claim.ClaimItemDetail(jsondict)
if "ClaimItemDetailSubDetail" == resource_type:
from . import claim
return claim.ClaimItemDetailSubDetail(jsondict)
if "ClaimPayee" == resource_type:
from . import claim
return claim.ClaimPayee(jsondict)
if "ClaimProcedure" == resource_type:
from . import claim
return claim.ClaimProcedure(jsondict)
if "ClaimRelated" == resource_type:
from . import claim
return claim.ClaimRelated(jsondict)
if "ClaimResponse" == resource_type:
from . import claimresponse
return claimresponse.ClaimResponse(jsondict)
if "ClaimResponseAddItem" == resource_type:
from . import claimresponse
return claimresponse.ClaimResponseAddItem(jsondict)
if "ClaimResponseAddItemDetail" == resource_type:
from . import claimresponse
return claimresponse.ClaimResponseAddItemDetail(jsondict)
if "ClaimResponseAddItemDetailSubDetail" == resource_type:
from . import claimresponse
return claimresponse.ClaimResponseAddItemDetailSubDetail(jsondict)
if "ClaimResponseError" == resource_type:
from . import claimresponse
return claimresponse.ClaimResponseError(jsondict)
if "ClaimResponseInsurance" == resource_type:
from . import claimresponse
return claimresponse.ClaimResponseInsurance(jsondict)
if "ClaimResponseItem" == resource_type:
from . import claimresponse
return claimresponse.ClaimResponseItem(jsondict)
if "ClaimResponseItemAdjudication" == resource_type:
from . import claimresponse
return claimresponse.ClaimResponseItemAdjudication(jsondict)
if "ClaimResponseItemDetail" == resource_type:
from . import claimresponse
return claimresponse.ClaimResponseItemDetail(jsondict)
if "ClaimResponseItemDetailSubDetail" == resource_type:
from . import claimresponse
return claimresponse.ClaimResponseItemDetailSubDetail(jsondict)
if "ClaimResponsePayment" == resource_type:
from . import claimresponse
return claimresponse.ClaimResponsePayment(jsondict)
if "ClaimResponseProcessNote" == resource_type:
from . import claimresponse
return claimresponse.ClaimResponseProcessNote(jsondict)
if "ClaimResponseTotal" == resource_type:
from . import claimresponse
return claimresponse.ClaimResponseTotal(jsondict)
if "ClaimSupportingInfo" == resource_type:
from . import claim
return claim.ClaimSupportingInfo(jsondict)
if "ClinicalImpression" == resource_type:
from . import clinicalimpression
return clinicalimpression.ClinicalImpression(jsondict)
if "ClinicalImpressionFinding" == resource_type:
from . import clinicalimpression
return clinicalimpression.ClinicalImpressionFinding(jsondict)
if "ClinicalImpressionInvestigation" == resource_type:
from . import clinicalimpression
return clinicalimpression.ClinicalImpressionInvestigation(jsondict)
if "CodeSystem" == resource_type:
from . import codesystem
return codesystem.CodeSystem(jsondict)
if "CodeSystemConcept" == resource_type:
from . import codesystem
return codesystem.CodeSystemConcept(jsondict)
if "CodeSystemConceptDesignation" == resource_type:
from . import codesystem
return codesystem.CodeSystemConceptDesignation(jsondict)
if "CodeSystemConceptProperty" == resource_type:
from . import codesystem
return codesystem.CodeSystemConceptProperty(jsondict)
if "CodeSystemFilter" == resource_type:
from . import codesystem
return codesystem.CodeSystemFilter(jsondict)
if "CodeSystemProperty" == resource_type:
from . import codesystem
return codesystem.CodeSystemProperty(jsondict)
if "CodeableConcept" == resource_type:
from . import codeableconcept
return codeableconcept.CodeableConcept(jsondict)
if "Coding" == resource_type:
from . import coding
return coding.Coding(jsondict)
if "Communication" == resource_type:
from . import communication
return communication.Communication(jsondict)
if "CommunicationPayload" == resource_type:
from . import communication
return communication.CommunicationPayload(jsondict)
if "CommunicationRequest" == resource_type:
from . import communicationrequest
return communicationrequest.CommunicationRequest(jsondict)
if "CommunicationRequestPayload" == resource_type:
from . import communicationrequest
return communicationrequest.CommunicationRequestPayload(jsondict)
if "CompartmentDefinition" == resource_type:
from . import compartmentdefinition
return compartmentdefinition.CompartmentDefinition(jsondict)
if "CompartmentDefinitionResource" == resource_type:
from . import compartmentdefinition
return compartmentdefinition.CompartmentDefinitionResource(jsondict)
if "Composition" == resource_type:
from . import composition
return composition.Composition(jsondict)
if "CompositionAttester" == resource_type:
from . import composition
return composition.CompositionAttester(jsondict)
if "CompositionEvent" == resource_type:
from . import composition
return composition.CompositionEvent(jsondict)
if "CompositionRelatesTo" == resource_type:
from . import composition
return composition.CompositionRelatesTo(jsondict)
if "CompositionSection" == resource_type:
from . import composition
return composition.CompositionSection(jsondict)
if "ConceptMap" == resource_type:
from . import conceptmap
return conceptmap.ConceptMap(jsondict)
if "ConceptMapGroup" == resource_type:
from . import conceptmap
return conceptmap.ConceptMapGroup(jsondict)
if "ConceptMapGroupElement" == resource_type:
from . import conceptmap
return conceptmap.ConceptMapGroupElement(jsondict)
if "ConceptMapGroupElementTarget" == resource_type:
from . import conceptmap
return conceptmap.ConceptMapGroupElementTarget(jsondict)
if "ConceptMapGroupElementTargetDependsOn" == resource_type:
from . import conceptmap
return conceptmap.ConceptMapGroupElementTargetDependsOn(jsondict)
if "ConceptMapGroupUnmapped" == resource_type:
from . import conceptmap
return conceptmap.ConceptMapGroupUnmapped(jsondict)
if "Condition" == resource_type:
from . import condition
return condition.Condition(jsondict)
if "ConditionEvidence" == resource_type:
from . import condition
return condition.ConditionEvidence(jsondict)
if "ConditionStage" == resource_type:
from . import condition
return condition.ConditionStage(jsondict)
if "Consent" == resource_type:
from . import consent
return consent.Consent(jsondict)
if "ConsentPolicy" == resource_type:
from . import consent
return consent.ConsentPolicy(jsondict)
if "ConsentProvision" == resource_type:
from . import consent
return consent.ConsentProvision(jsondict)
if "ConsentProvisionActor" == resource_type:
from . import consent
return consent.ConsentProvisionActor(jsondict)
if "ConsentProvisionData" == resource_type:
from . import consent
return consent.ConsentProvisionData(jsondict)
if "ConsentVerification" == resource_type:
from . import consent
return consent.ConsentVerification(jsondict)
if "ContactDetail" == resource_type:
from . import contactdetail
return contactdetail.ContactDetail(jsondict)
if "ContactPoint" == resource_type:
from . import contactpoint
return contactpoint.ContactPoint(jsondict)
if "Contract" == resource_type:
from . import contract
return contract.Contract(jsondict)
if "ContractContentDefinition" == resource_type:
from . import contract
return contract.ContractContentDefinition(jsondict)
if "ContractFriendly" == resource_type:
from . import contract
return contract.ContractFriendly(jsondict)
if "ContractLegal" == resource_type:
from . import contract
return contract.ContractLegal(jsondict)
if "ContractRule" == resource_type:
from . import contract
return contract.ContractRule(jsondict)
if "ContractSigner" == resource_type:
from . import contract
return contract.ContractSigner(jsondict)
if "ContractTerm" == resource_type:
from . import contract
return contract.ContractTerm(jsondict)
if "ContractTermAction" == resource_type:
from . import contract
return contract.ContractTermAction(jsondict)
if "ContractTermActionSubject" == resource_type:
from . import contract
return contract.ContractTermActionSubject(jsondict)
if "ContractTermAsset" == resource_type:
from . import contract
return contract.ContractTermAsset(jsondict)
if "ContractTermAssetContext" == resource_type:
from . import contract
return contract.ContractTermAssetContext(jsondict)
if "ContractTermAssetValuedItem" == resource_type:
from . import contract
return contract.ContractTermAssetValuedItem(jsondict)
if "ContractTermOffer" == resource_type:
from . import contract
return contract.ContractTermOffer(jsondict)
if "ContractTermOfferAnswer" == resource_type:
from . import contract
return contract.ContractTermOfferAnswer(jsondict)
if "ContractTermOfferParty" == resource_type:
from . import contract
return contract.ContractTermOfferParty(jsondict)
if "ContractTermSecurityLabel" == resource_type:
from . import contract
return contract.ContractTermSecurityLabel(jsondict)
if "Contributor" == resource_type:
from . import contributor
return contributor.Contributor(jsondict)
if "Count" == resource_type:
from . import count
return count.Count(jsondict)
if "Coverage" == resource_type:
from . import coverage
return coverage.Coverage(jsondict)
if "CoverageClass" == resource_type:
from . import coverage
return coverage.CoverageClass(jsondict)
if "CoverageCostToBeneficiary" == resource_type:
from . import coverage
return coverage.CoverageCostToBeneficiary(jsondict)
if "CoverageCostToBeneficiaryException" == resource_type:
from . import coverage
return coverage.CoverageCostToBeneficiaryException(jsondict)
if "CoverageEligibilityRequest" == resource_type:
from . import coverageeligibilityrequest
return coverageeligibilityrequest.CoverageEligibilityRequest(jsondict)
if "CoverageEligibilityRequestInsurance" == resource_type:
from . import coverageeligibilityrequest
return coverageeligibilityrequest.CoverageEligibilityRequestInsurance(jsondict)
if "CoverageEligibilityRequestItem" == resource_type:
from . import coverageeligibilityrequest
return coverageeligibilityrequest.CoverageEligibilityRequestItem(jsondict)
if "CoverageEligibilityRequestItemDiagnosis" == resource_type:
from . import coverageeligibilityrequest
return coverageeligibilityrequest.CoverageEligibilityRequestItemDiagnosis(jsondict)
if "CoverageEligibilityRequestSupportingInfo" == resource_type:
from . import coverageeligibilityrequest
return coverageeligibilityrequest.CoverageEligibilityRequestSupportingInfo(jsondict)
if "CoverageEligibilityResponse" == resource_type:
from . import coverageeligibilityresponse
return coverageeligibilityresponse.CoverageEligibilityResponse(jsondict)
if "CoverageEligibilityResponseError" == resource_type:
from . import coverageeligibilityresponse
return coverageeligibilityresponse.CoverageEligibilityResponseError(jsondict)
if "CoverageEligibilityResponseInsurance" == resource_type:
from . import coverageeligibilityresponse
return coverageeligibilityresponse.CoverageEligibilityResponseInsurance(jsondict)
if "CoverageEligibilityResponseInsuranceItem" == resource_type:
from . import coverageeligibilityresponse
return coverageeligibilityresponse.CoverageEligibilityResponseInsuranceItem(jsondict)
if "CoverageEligibilityResponseInsuranceItemBenefit" == resource_type:
from . import coverageeligibilityresponse
return coverageeligibilityresponse.CoverageEligibilityResponseInsuranceItemBenefit(jsondict)
if "DataRequirement" == resource_type:
from . import datarequirement
return datarequirement.DataRequirement(jsondict)
if "DataRequirementCodeFilter" == resource_type:
from . import datarequirement
return datarequirement.DataRequirementCodeFilter(jsondict)
if "DataRequirementDateFilter" == resource_type:
from . import datarequirement
return datarequirement.DataRequirementDateFilter(jsondict)
if "DataRequirementSort" == resource_type:
from . import datarequirement
return datarequirement.DataRequirementSort(jsondict)
if "DetectedIssue" == resource_type:
from . import detectedissue
return detectedissue.DetectedIssue(jsondict)
if "DetectedIssueEvidence" == resource_type:
from . import detectedissue
return detectedissue.DetectedIssueEvidence(jsondict)
if "DetectedIssueMitigation" == resource_type:
from . import detectedissue
return detectedissue.DetectedIssueMitigation(jsondict)
if "Device" == resource_type:
from . import device
return device.Device(jsondict)
if "DeviceDefinition" == resource_type:
from . import devicedefinition
return devicedefinition.DeviceDefinition(jsondict)
if "DeviceDefinitionCapability" == resource_type:
from . import devicedefinition
return devicedefinition.DeviceDefinitionCapability(jsondict)
if "DeviceDefinitionDeviceName" == resource_type:
from . import devicedefinition
return devicedefinition.DeviceDefinitionDeviceName(jsondict)
if "DeviceDefinitionMaterial" == resource_type:
from . import devicedefinition
return devicedefinition.DeviceDefinitionMaterial(jsondict)
if "DeviceDefinitionProperty" == resource_type:
from . import devicedefinition
return devicedefinition.DeviceDefinitionProperty(jsondict)
if "DeviceDefinitionSpecialization" == resource_type:
from . import devicedefinition
return devicedefinition.DeviceDefinitionSpecialization(jsondict)
if "DeviceDefinitionUdiDeviceIdentifier" == resource_type:
from . import devicedefinition
return devicedefinition.DeviceDefinitionUdiDeviceIdentifier(jsondict)
if "DeviceDeviceName" == resource_type:
from . import device
return device.DeviceDeviceName(jsondict)
if "DeviceMetric" == resource_type:
from . import devicemetric
return devicemetric.DeviceMetric(jsondict)
if "DeviceMetricCalibration" == resource_type:
from . import devicemetric
return devicemetric.DeviceMetricCalibration(jsondict)
if "DeviceProperty" == resource_type:
from . import device
return device.DeviceProperty(jsondict)
if "DeviceRequest" == resource_type:
from . import devicerequest
return devicerequest.DeviceRequest(jsondict)
if "DeviceRequestParameter" == resource_type:
from . import devicerequest
return devicerequest.DeviceRequestParameter(jsondict)
if "DeviceSpecialization" == resource_type:
from . import device
return device.DeviceSpecialization(jsondict)
if "DeviceUdiCarrier" == resource_type:
from . import device
return device.DeviceUdiCarrier(jsondict)
if "DeviceUseStatement" == resource_type:
from . import deviceusestatement
return deviceusestatement.DeviceUseStatement(jsondict)
if "DeviceVersion" == resource_type:
from . import device
return device.DeviceVersion(jsondict)
if "DiagnosticReport" == resource_type:
from . import diagnosticreport
return diagnosticreport.DiagnosticReport(jsondict)
if "DiagnosticReportMedia" == resource_type:
from . import diagnosticreport
return diagnosticreport.DiagnosticReportMedia(jsondict)
if "Distance" == resource_type:
from . import distance
return distance.Distance(jsondict)
if "DocumentManifest" == resource_type:
from . import documentmanifest
return documentmanifest.DocumentManifest(jsondict)
if "DocumentManifestRelated" == resource_type:
from . import documentmanifest
return documentmanifest.DocumentManifestRelated(jsondict)
if "DocumentReference" == resource_type:
from . import documentreference
return documentreference.DocumentReference(jsondict)
if "DocumentReferenceContent" == resource_type:
from . import documentreference
return documentreference.DocumentReferenceContent(jsondict)
if "DocumentReferenceContext" == resource_type:
from . import documentreference
return documentreference.DocumentReferenceContext(jsondict)
if "DocumentReferenceRelatesTo" == resource_type:
from . import documentreference
return documentreference.DocumentReferenceRelatesTo(jsondict)
if "DomainResource" == resource_type:
from . import domainresource
return domainresource.DomainResource(jsondict)
if "Dosage" == resource_type:
from . import dosage
return dosage.Dosage(jsondict)
if "DosageDoseAndRate" == resource_type:
from . import dosage
return dosage.DosageDoseAndRate(jsondict)
if "Duration" == resource_type:
from . import duration
return duration.Duration(jsondict)
if "EffectEvidenceSynthesis" == resource_type:
from . import effectevidencesynthesis
return effectevidencesynthesis.EffectEvidenceSynthesis(jsondict)
if "EffectEvidenceSynthesisCertainty" == resource_type:
from . import effectevidencesynthesis
return effectevidencesynthesis.EffectEvidenceSynthesisCertainty(jsondict)
if "EffectEvidenceSynthesisCertaintyCertaintySubcomponent" == resource_type:
from . import effectevidencesynthesis
return effectevidencesynthesis.EffectEvidenceSynthesisCertaintyCertaintySubcomponent(jsondict)
if "EffectEvidenceSynthesisEffectEstimate" == resource_type:
from . import effectevidencesynthesis
return effectevidencesynthesis.EffectEvidenceSynthesisEffectEstimate(jsondict)
if "EffectEvidenceSynthesisEffectEstimatePrecisionEstimate" == resource_type:
from . import effectevidencesynthesis
return effectevidencesynthesis.EffectEvidenceSynthesisEffectEstimatePrecisionEstimate(jsondict)
if "EffectEvidenceSynthesisResultsByExposure" == resource_type:
from . import effectevidencesynthesis
return effectevidencesynthesis.EffectEvidenceSynthesisResultsByExposure(jsondict)
if "EffectEvidenceSynthesisSampleSize" == resource_type:
from . import effectevidencesynthesis
return effectevidencesynthesis.EffectEvidenceSynthesisSampleSize(jsondict)
if "Element" == resource_type:
from . import element
return element.Element(jsondict)
if "ElementDefinition" == resource_type:
from . import elementdefinition
return elementdefinition.ElementDefinition(jsondict)
if "ElementDefinitionBase" == resource_type:
from . import elementdefinition
return elementdefinition.ElementDefinitionBase(jsondict)
if "ElementDefinitionBinding" == resource_type:
from . import elementdefinition
return elementdefinition.ElementDefinitionBinding(jsondict)
if "ElementDefinitionConstraint" == resource_type:
from . import elementdefinition
return elementdefinition.ElementDefinitionConstraint(jsondict)
if "ElementDefinitionExample" == resource_type:
from . import elementdefinition
return elementdefinition.ElementDefinitionExample(jsondict)
if "ElementDefinitionMapping" == resource_type:
from . import elementdefinition
return elementdefinition.ElementDefinitionMapping(jsondict)
if "ElementDefinitionSlicing" == resource_type:
from . import elementdefinition
return elementdefinition.ElementDefinitionSlicing(jsondict)
if "ElementDefinitionSlicingDiscriminator" == resource_type:
from . import elementdefinition
return elementdefinition.ElementDefinitionSlicingDiscriminator(jsondict)
if "ElementDefinitionType" == resource_type:
from . import elementdefinition
return elementdefinition.ElementDefinitionType(jsondict)
if "Encounter" == resource_type:
from . import encounter
return encounter.Encounter(jsondict)
if "EncounterClassHistory" == resource_type:
from . import encounter
return encounter.EncounterClassHistory(jsondict)
if "EncounterDiagnosis" == resource_type:
from . import encounter
return encounter.EncounterDiagnosis(jsondict)
if "EncounterHospitalization" == resource_type:
from . import encounter
return encounter.EncounterHospitalization(jsondict)
if "EncounterLocation" == resource_type:
from . import encounter
return encounter.EncounterLocation(jsondict)
if "EncounterParticipant" == resource_type:
from . import encounter
return encounter.EncounterParticipant(jsondict)
if "EncounterStatusHistory" == resource_type:
from . import encounter
return encounter.EncounterStatusHistory(jsondict)
if "Endpoint" == resource_type:
from . import endpoint
return endpoint.Endpoint(jsondict)
if "EnrollmentRequest" == resource_type:
from . import enrollmentrequest
return enrollmentrequest.EnrollmentRequest(jsondict)
if "EnrollmentResponse" == resource_type:
from . import enrollmentresponse
return enrollmentresponse.EnrollmentResponse(jsondict)
if "EpisodeOfCare" == resource_type:
from . import episodeofcare
return episodeofcare.EpisodeOfCare(jsondict)
if "EpisodeOfCareDiagnosis" == resource_type:
from . import episodeofcare
return episodeofcare.EpisodeOfCareDiagnosis(jsondict)
if "EpisodeOfCareStatusHistory" == resource_type:
from . import episodeofcare
return episodeofcare.EpisodeOfCareStatusHistory(jsondict)
if "EventDefinition" == resource_type:
from . import eventdefinition
return eventdefinition.EventDefinition(jsondict)
if "Evidence" == resource_type:
from . import evidence
return evidence.Evidence(jsondict)
if "EvidenceVariable" == resource_type:
from . import evidencevariable
return evidencevariable.EvidenceVariable(jsondict)
if "EvidenceVariableCharacteristic" == resource_type:
from . import evidencevariable
return evidencevariable.EvidenceVariableCharacteristic(jsondict)
if "ExampleScenario" == resource_type:
from . import examplescenario
return examplescenario.ExampleScenario(jsondict)
if "ExampleScenarioActor" == resource_type:
from . import examplescenario
return examplescenario.ExampleScenarioActor(jsondict)
if "ExampleScenarioInstance" == resource_type:
from . import examplescenario
return examplescenario.ExampleScenarioInstance(jsondict)
if "ExampleScenarioInstanceContainedInstance" == resource_type:
from . import examplescenario
return examplescenario.ExampleScenarioInstanceContainedInstance(jsondict)
if "ExampleScenarioInstanceVersion" == resource_type:
from . import examplescenario
return examplescenario.ExampleScenarioInstanceVersion(jsondict)
if "ExampleScenarioProcess" == resource_type:
from . import examplescenario
return examplescenario.ExampleScenarioProcess(jsondict)
if "ExampleScenarioProcessStep" == resource_type:
from . import examplescenario
return examplescenario.ExampleScenarioProcessStep(jsondict)
if "ExampleScenarioProcessStepAlternative" == resource_type:
from . import examplescenario
return examplescenario.ExampleScenarioProcessStepAlternative(jsondict)
if "ExampleScenarioProcessStepOperation" == resource_type:
from . import examplescenario
return examplescenario.ExampleScenarioProcessStepOperation(jsondict)
if "ExplanationOfBenefit" == resource_type:
from . import explanationofbenefit
return explanationofbenefit.ExplanationOfBenefit(jsondict)
if "ExplanationOfBenefitAccident" == resource_type:
from . import explanationofbenefit
return explanationofbenefit.ExplanationOfBenefitAccident(jsondict)
if "ExplanationOfBenefitAddItem" == resource_type:
from . import explanationofbenefit
return explanationofbenefit.ExplanationOfBenefitAddItem(jsondict)
if "ExplanationOfBenefitAddItemDetail" == resource_type:
from . import explanationofbenefit
return explanationofbenefit.ExplanationOfBenefitAddItemDetail(jsondict)
if "ExplanationOfBenefitAddItemDetailSubDetail" == resource_type:
from . import explanationofbenefit
return explanationofbenefit.ExplanationOfBenefitAddItemDetailSubDetail(jsondict)
if "ExplanationOfBenefitBenefitBalance" == resource_type:
from . import explanationofbenefit
return explanationofbenefit.ExplanationOfBenefitBenefitBalance(jsondict)
if "ExplanationOfBenefitBenefitBalanceFinancial" == resource_type:
from . import explanationofbenefit
return explanationofbenefit.ExplanationOfBenefitBenefitBalanceFinancial(jsondict)
if "ExplanationOfBenefitCareTeam" == resource_type:
from . import explanationofbenefit
return explanationofbenefit.ExplanationOfBenefitCareTeam(jsondict)
if "ExplanationOfBenefitDiagnosis" == resource_type:
from . import explanationofbenefit
return explanationofbenefit.ExplanationOfBenefitDiagnosis(jsondict)
if "ExplanationOfBenefitInsurance" == resource_type:
from . import explanationofbenefit
return explanationofbenefit.ExplanationOfBenefitInsurance(jsondict)
if "ExplanationOfBenefitItem" == resource_type:
from . import explanationofbenefit
return explanationofbenefit.ExplanationOfBenefitItem(jsondict)
if "ExplanationOfBenefitItemAdjudication" == resource_type:
from . import explanationofbenefit
return explanationofbenefit.ExplanationOfBenefitItemAdjudication(jsondict)
if "ExplanationOfBenefitItemDetail" == resource_type:
from . import explanationofbenefit
return explanationofbenefit.ExplanationOfBenefitItemDetail(jsondict)
if "ExplanationOfBenefitItemDetailSubDetail" == resource_type:
from . import explanationofbenefit
return explanationofbenefit.ExplanationOfBenefitItemDetailSubDetail(jsondict)
if "ExplanationOfBenefitPayee" == resource_type:
from . import explanationofbenefit
return explanationofbenefit.ExplanationOfBenefitPayee(jsondict)
if "ExplanationOfBenefitPayment" == resource_type:
from . import explanationofbenefit
return explanationofbenefit.ExplanationOfBenefitPayment(jsondict)
if "ExplanationOfBenefitProcedure" == resource_type:
from . import explanationofbenefit
return explanationofbenefit.ExplanationOfBenefitProcedure(jsondict)
if "ExplanationOfBenefitProcessNote" == resource_type:
from . import explanationofbenefit
return explanationofbenefit.ExplanationOfBenefitProcessNote(jsondict)
if "ExplanationOfBenefitRelated" == resource_type:
from . import explanationofbenefit
return explanationofbenefit.ExplanationOfBenefitRelated(jsondict)
if "ExplanationOfBenefitSupportingInfo" == resource_type:
from . import explanationofbenefit
return explanationofbenefit.ExplanationOfBenefitSupportingInfo(jsondict)
if "ExplanationOfBenefitTotal" == resource_type:
from . import explanationofbenefit
return explanationofbenefit.ExplanationOfBenefitTotal(jsondict)
if "Expression" == resource_type:
from . import expression
return expression.Expression(jsondict)
if "Extension" == resource_type:
from . import extension
return extension.Extension(jsondict)
if "FamilyMemberHistory" == resource_type:
from . import familymemberhistory
return familymemberhistory.FamilyMemberHistory(jsondict)
if "FamilyMemberHistoryCondition" == resource_type:
from . import familymemberhistory
return familymemberhistory.FamilyMemberHistoryCondition(jsondict)
if "Flag" == resource_type:
from . import flag
return flag.Flag(jsondict)
if "Goal" == resource_type:
from . import goal
return goal.Goal(jsondict)
if "GoalTarget" == resource_type:
from . import goal
return goal.GoalTarget(jsondict)
if "GraphDefinition" == resource_type:
from . import graphdefinition
return graphdefinition.GraphDefinition(jsondict)
if "GraphDefinitionLink" == resource_type:
from . import graphdefinition
return graphdefinition.GraphDefinitionLink(jsondict)
if "GraphDefinitionLinkTarget" == resource_type:
from . import graphdefinition
return graphdefinition.GraphDefinitionLinkTarget(jsondict)
if "GraphDefinitionLinkTargetCompartment" == resource_type:
from . import graphdefinition
return graphdefinition.GraphDefinitionLinkTargetCompartment(jsondict)
if "Group" == resource_type:
from . import group
return group.Group(jsondict)
if "GroupCharacteristic" == resource_type:
from . import group
return group.GroupCharacteristic(jsondict)
if "GroupMember" == resource_type:
from . import group
return group.GroupMember(jsondict)
if "GuidanceResponse" == resource_type:
from . import guidanceresponse
return guidanceresponse.GuidanceResponse(jsondict)
if "HealthcareService" == resource_type:
from . import healthcareservice
return healthcareservice.HealthcareService(jsondict)
if "HealthcareServiceAvailableTime" == resource_type:
from . import healthcareservice
return healthcareservice.HealthcareServiceAvailableTime(jsondict)
if "HealthcareServiceEligibility" == resource_type:
from . import healthcareservice
return healthcareservice.HealthcareServiceEligibility(jsondict)
if "HealthcareServiceNotAvailable" == resource_type:
from . import healthcareservice
return healthcareservice.HealthcareServiceNotAvailable(jsondict)
if "HumanName" == resource_type:
from . import humanname
return humanname.HumanName(jsondict)
if "Identifier" == resource_type:
from . import identifier
return identifier.Identifier(jsondict)
if "ImagingStudy" == resource_type:
from . import imagingstudy
return imagingstudy.ImagingStudy(jsondict)
if "ImagingStudySeries" == resource_type:
from . import imagingstudy
return imagingstudy.ImagingStudySeries(jsondict)
if "ImagingStudySeriesInstance" == resource_type:
from . import imagingstudy
return imagingstudy.ImagingStudySeriesInstance(jsondict)
if "ImagingStudySeriesPerformer" == resource_type:
from . import imagingstudy
return imagingstudy.ImagingStudySeriesPerformer(jsondict)
if "Immunization" == resource_type:
from . import immunization
return immunization.Immunization(jsondict)
if "ImmunizationEducation" == resource_type:
from . import immunization
return immunization.ImmunizationEducation(jsondict)
if "ImmunizationEvaluation" == resource_type:
from . import immunizationevaluation
return immunizationevaluation.ImmunizationEvaluation(jsondict)
if "ImmunizationPerformer" == resource_type:
from . import immunization
return immunization.ImmunizationPerformer(jsondict)
if "ImmunizationProtocolApplied" == resource_type:
from . import immunization
return immunization.ImmunizationProtocolApplied(jsondict)
if "ImmunizationReaction" == resource_type:
from . import immunization
return immunization.ImmunizationReaction(jsondict)
if "ImmunizationRecommendation" == resource_type:
from . import immunizationrecommendation
return immunizationrecommendation.ImmunizationRecommendation(jsondict)
if "ImmunizationRecommendationRecommendation" == resource_type:
from . import immunizationrecommendation
return immunizationrecommendation.ImmunizationRecommendationRecommendation(jsondict)
if "ImmunizationRecommendationRecommendationDateCriterion" == resource_type:
from . import immunizationrecommendation
return immunizationrecommendation.ImmunizationRecommendationRecommendationDateCriterion(jsondict)
if "ImplementationGuide" == resource_type:
from . import implementationguide
return implementationguide.ImplementationGuide(jsondict)
if "ImplementationGuideDefinition" == resource_type:
from . import implementationguide
return implementationguide.ImplementationGuideDefinition(jsondict)
if "ImplementationGuideDefinitionGrouping" == resource_type:
from . import implementationguide
return implementationguide.ImplementationGuideDefinitionGrouping(jsondict)
if "ImplementationGuideDefinitionPage" == resource_type:
from . import implementationguide
return implementationguide.ImplementationGuideDefinitionPage(jsondict)
if "ImplementationGuideDefinitionParameter" == resource_type:
from . import implementationguide
return implementationguide.ImplementationGuideDefinitionParameter(jsondict)
if "ImplementationGuideDefinitionResource" == resource_type:
from . import implementationguide
return implementationguide.ImplementationGuideDefinitionResource(jsondict)
if "ImplementationGuideDefinitionTemplate" == resource_type:
from . import implementationguide
return implementationguide.ImplementationGuideDefinitionTemplate(jsondict)
if "ImplementationGuideDependsOn" == resource_type:
from . import implementationguide
return implementationguide.ImplementationGuideDependsOn(jsondict)
if "ImplementationGuideGlobal" == resource_type:
from . import implementationguide
return implementationguide.ImplementationGuideGlobal(jsondict)
if "ImplementationGuideManifest" == resource_type:
from . import implementationguide
return implementationguide.ImplementationGuideManifest(jsondict)
if "ImplementationGuideManifestPage" == resource_type:
from . import implementationguide
return implementationguide.ImplementationGuideManifestPage(jsondict)
if "ImplementationGuideManifestResource" == resource_type:
from . import implementationguide
return implementationguide.ImplementationGuideManifestResource(jsondict)
if "InsurancePlan" == resource_type:
from . import insuranceplan
return insuranceplan.InsurancePlan(jsondict)
if "InsurancePlanContact" == resource_type:
from . import insuranceplan
return insuranceplan.InsurancePlanContact(jsondict)
if "InsurancePlanCoverage" == resource_type:
from . import insuranceplan
return insuranceplan.InsurancePlanCoverage(jsondict)
if "InsurancePlanCoverageBenefit" == resource_type:
from . import insuranceplan
return insuranceplan.InsurancePlanCoverageBenefit(jsondict)
if "InsurancePlanCoverageBenefitLimit" == resource_type:
from . import insuranceplan
return insuranceplan.InsurancePlanCoverageBenefitLimit(jsondict)
if "InsurancePlanPlan" == resource_type:
from . import insuranceplan
return insuranceplan.InsurancePlanPlan(jsondict)
if "InsurancePlanPlanGeneralCost" == resource_type:
from . import insuranceplan
return insuranceplan.InsurancePlanPlanGeneralCost(jsondict)
if "InsurancePlanPlanSpecificCost" == resource_type:
from . import insuranceplan
return insuranceplan.InsurancePlanPlanSpecificCost(jsondict)
if "InsurancePlanPlanSpecificCostBenefit" == resource_type:
from . import insuranceplan
return insuranceplan.InsurancePlanPlanSpecificCostBenefit(jsondict)
if "InsurancePlanPlanSpecificCostBenefitCost" == resource_type:
from . import insuranceplan
return insuranceplan.InsurancePlanPlanSpecificCostBenefitCost(jsondict)
if "Invoice" == resource_type:
from . import invoice
return invoice.Invoice(jsondict)
if "InvoiceLineItem" == resource_type:
from . import invoice
return invoice.InvoiceLineItem(jsondict)
if "InvoiceLineItemPriceComponent" == resource_type:
from . import invoice
return invoice.InvoiceLineItemPriceComponent(jsondict)
if "InvoiceParticipant" == resource_type:
from . import invoice
return invoice.InvoiceParticipant(jsondict)
if "Library" == resource_type:
from . import library
return library.Library(jsondict)
if "Linkage" == resource_type:
from . import linkage
return linkage.Linkage(jsondict)
if "LinkageItem" == resource_type:
from . import linkage
return linkage.LinkageItem(jsondict)
if "List" == resource_type:
from . import list
return list.List(jsondict)
if "ListEntry" == resource_type:
from . import list
return list.ListEntry(jsondict)
if "Location" == resource_type:
from . import location
return location.Location(jsondict)
if "LocationHoursOfOperation" == resource_type:
from . import location
return location.LocationHoursOfOperation(jsondict)
if "LocationPosition" == resource_type:
from . import location
return location.LocationPosition(jsondict)
if "MarketingStatus" == resource_type:
from . import marketingstatus
return marketingstatus.MarketingStatus(jsondict)
if "Measure" == resource_type:
from . import measure
return measure.Measure(jsondict)
if "MeasureGroup" == resource_type:
from . import measure
return measure.MeasureGroup(jsondict)
if "MeasureGroupPopulation" == resource_type:
from . import measure
return measure.MeasureGroupPopulation(jsondict)
if "MeasureGroupStratifier" == resource_type:
from . import measure
return measure.MeasureGroupStratifier(jsondict)
if "MeasureGroupStratifierComponent" == resource_type:
from . import measure
return measure.MeasureGroupStratifierComponent(jsondict)
if "MeasureReport" == resource_type:
from . import measurereport
return measurereport.MeasureReport(jsondict)
if "MeasureReportGroup" == resource_type:
from . import measurereport
return measurereport.MeasureReportGroup(jsondict)
if "MeasureReportGroupPopulation" == resource_type:
from . import measurereport
return measurereport.MeasureReportGroupPopulation(jsondict)
if "MeasureReportGroupStratifier" == resource_type:
from . import measurereport
return measurereport.MeasureReportGroupStratifier(jsondict)
if "MeasureReportGroupStratifierStratum" == resource_type:
from . import measurereport
return measurereport.MeasureReportGroupStratifierStratum(jsondict)
if "MeasureReportGroupStratifierStratumComponent" == resource_type:
from . import measurereport
return measurereport.MeasureReportGroupStratifierStratumComponent(jsondict)
if "MeasureReportGroupStratifierStratumPopulation" == resource_type:
from . import measurereport
return measurereport.MeasureReportGroupStratifierStratumPopulation(jsondict)
if "MeasureSupplementalData" == resource_type:
from . import measure
return measure.MeasureSupplementalData(jsondict)
if "Media" == resource_type:
from . import media
return media.Media(jsondict)
if "Medication" == resource_type:
from . import medication
return medication.Medication(jsondict)
if "MedicationAdministration" == resource_type:
from . import medicationadministration
return medicationadministration.MedicationAdministration(jsondict)
if "MedicationAdministrationDosage" == resource_type:
from . import medicationadministration
return medicationadministration.MedicationAdministrationDosage(jsondict)
if "MedicationAdministrationPerformer" == resource_type:
from . import medicationadministration
return medicationadministration.MedicationAdministrationPerformer(jsondict)
if "MedicationBatch" == resource_type:
from . import medication
return medication.MedicationBatch(jsondict)
if "MedicationDispense" == resource_type:
from . import medicationdispense
return medicationdispense.MedicationDispense(jsondict)
if "MedicationDispensePerformer" == resource_type:
from . import medicationdispense
return medicationdispense.MedicationDispensePerformer(jsondict)
if "MedicationDispenseSubstitution" == resource_type:
from . import medicationdispense
return medicationdispense.MedicationDispenseSubstitution(jsondict)
if "MedicationIngredient" == resource_type:
from . import medication
return medication.MedicationIngredient(jsondict)
if "MedicationKnowledge" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledge(jsondict)
if "MedicationKnowledgeAdministrationGuidelines" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeAdministrationGuidelines(jsondict)
if "MedicationKnowledgeAdministrationGuidelinesDosage" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeAdministrationGuidelinesDosage(jsondict)
if "MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeAdministrationGuidelinesPatientCharacteristics(jsondict)
if "MedicationKnowledgeCost" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeCost(jsondict)
if "MedicationKnowledgeDrugCharacteristic" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeDrugCharacteristic(jsondict)
if "MedicationKnowledgeIngredient" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeIngredient(jsondict)
if "MedicationKnowledgeKinetics" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeKinetics(jsondict)
if "MedicationKnowledgeMedicineClassification" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeMedicineClassification(jsondict)
if "MedicationKnowledgeMonitoringProgram" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeMonitoringProgram(jsondict)
if "MedicationKnowledgeMonograph" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeMonograph(jsondict)
if "MedicationKnowledgePackaging" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgePackaging(jsondict)
if "MedicationKnowledgeRegulatory" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeRegulatory(jsondict)
if "MedicationKnowledgeRegulatoryMaxDispense" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeRegulatoryMaxDispense(jsondict)
if "MedicationKnowledgeRegulatorySchedule" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeRegulatorySchedule(jsondict)
if "MedicationKnowledgeRegulatorySubstitution" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeRegulatorySubstitution(jsondict)
if "MedicationKnowledgeRelatedMedicationKnowledge" == resource_type:
from . import medicationknowledge
return medicationknowledge.MedicationKnowledgeRelatedMedicationKnowledge(jsondict)
if "MedicationRequest" == resource_type:
from . import medicationrequest
return medicationrequest.MedicationRequest(jsondict)
if "MedicationRequestDispenseRequest" == resource_type:
from . import medicationrequest
return medicationrequest.MedicationRequestDispenseRequest(jsondict)
if "MedicationRequestDispenseRequestInitialFill" == resource_type:
from . import medicationrequest
return medicationrequest.MedicationRequestDispenseRequestInitialFill(jsondict)
if "MedicationRequestSubstitution" == resource_type:
from . import medicationrequest
return medicationrequest.MedicationRequestSubstitution(jsondict)
if "MedicationStatement" == resource_type:
from . import medicationstatement
return medicationstatement.MedicationStatement(jsondict)
if "MedicinalProduct" == resource_type:
from . import medicinalproduct
return medicinalproduct.MedicinalProduct(jsondict)
if "MedicinalProductAuthorization" == resource_type:
from . import medicinalproductauthorization
return medicinalproductauthorization.MedicinalProductAuthorization(jsondict)
if "MedicinalProductAuthorizationJurisdictionalAuthorization" == resource_type:
from . import medicinalproductauthorization
return medicinalproductauthorization.MedicinalProductAuthorizationJurisdictionalAuthorization(jsondict)
if "MedicinalProductAuthorizationProcedure" == resource_type:
from . import medicinalproductauthorization
return medicinalproductauthorization.MedicinalProductAuthorizationProcedure(jsondict)
if "MedicinalProductContraindication" == resource_type:
from . import medicinalproductcontraindication
return medicinalproductcontraindication.MedicinalProductContraindication(jsondict)
if "MedicinalProductContraindicationOtherTherapy" == resource_type:
from . import medicinalproductcontraindication
return medicinalproductcontraindication.MedicinalProductContraindicationOtherTherapy(jsondict)
if "MedicinalProductIndication" == resource_type:
from . import medicinalproductindication
return medicinalproductindication.MedicinalProductIndication(jsondict)
if "MedicinalProductIndicationOtherTherapy" == resource_type:
from . import medicinalproductindication
return medicinalproductindication.MedicinalProductIndicationOtherTherapy(jsondict)
if "MedicinalProductIngredient" == resource_type:
from . import medicinalproductingredient
return medicinalproductingredient.MedicinalProductIngredient(jsondict)
if "MedicinalProductIngredientSpecifiedSubstance" == resource_type:
from . import medicinalproductingredient
return medicinalproductingredient.MedicinalProductIngredientSpecifiedSubstance(jsondict)
if "MedicinalProductIngredientSpecifiedSubstanceStrength" == resource_type:
from . import medicinalproductingredient
return medicinalproductingredient.MedicinalProductIngredientSpecifiedSubstanceStrength(jsondict)
if "MedicinalProductIngredientSpecifiedSubstanceStrengthReferenceStrength" == resource_type:
from . import medicinalproductingredient
return medicinalproductingredient.MedicinalProductIngredientSpecifiedSubstanceStrengthReferenceStrength(jsondict)
if "MedicinalProductIngredientSubstance" == resource_type:
from . import medicinalproductingredient
return medicinalproductingredient.MedicinalProductIngredientSubstance(jsondict)
if "MedicinalProductInteraction" == resource_type:
from . import medicinalproductinteraction
return medicinalproductinteraction.MedicinalProductInteraction(jsondict)
if "MedicinalProductInteractionInteractant" == resource_type:
from . import medicinalproductinteraction
return medicinalproductinteraction.MedicinalProductInteractionInteractant(jsondict)
if "MedicinalProductManufactured" == resource_type:
from . import medicinalproductmanufactured
return medicinalproductmanufactured.MedicinalProductManufactured(jsondict)
if "MedicinalProductManufacturingBusinessOperation" == resource_type:
from . import medicinalproduct
return medicinalproduct.MedicinalProductManufacturingBusinessOperation(jsondict)
if "MedicinalProductName" == resource_type:
from . import medicinalproduct
return medicinalproduct.MedicinalProductName(jsondict)
if "MedicinalProductNameCountryLanguage" == resource_type:
from . import medicinalproduct
return medicinalproduct.MedicinalProductNameCountryLanguage(jsondict)
if "MedicinalProductNameNamePart" == resource_type:
from . import medicinalproduct
return medicinalproduct.MedicinalProductNameNamePart(jsondict)
if "MedicinalProductPackaged" == resource_type:
from . import medicinalproductpackaged
return medicinalproductpackaged.MedicinalProductPackaged(jsondict)
if "MedicinalProductPackagedBatchIdentifier" == resource_type:
from . import medicinalproductpackaged
return medicinalproductpackaged.MedicinalProductPackagedBatchIdentifier(jsondict)
if "MedicinalProductPackagedPackageItem" == resource_type:
from . import medicinalproductpackaged
return medicinalproductpackaged.MedicinalProductPackagedPackageItem(jsondict)
if "MedicinalProductPharmaceutical" == resource_type:
from . import medicinalproductpharmaceutical
return medicinalproductpharmaceutical.MedicinalProductPharmaceutical(jsondict)
if "MedicinalProductPharmaceuticalCharacteristics" == resource_type:
from . import medicinalproductpharmaceutical
return medicinalproductpharmaceutical.MedicinalProductPharmaceuticalCharacteristics(jsondict)
if "MedicinalProductPharmaceuticalRouteOfAdministration" == resource_type:
from . import medicinalproductpharmaceutical
return medicinalproductpharmaceutical.MedicinalProductPharmaceuticalRouteOfAdministration(jsondict)
if "MedicinalProductPharmaceuticalRouteOfAdministrationTargetSpecies" == resource_type:
from . import medicinalproductpharmaceutical
return medicinalproductpharmaceutical.MedicinalProductPharmaceuticalRouteOfAdministrationTargetSpecies(jsondict)
if "MedicinalProductPharmaceuticalRouteOfAdministrationTargetSpeciesWithdrawalPeriod" == resource_type:
from . import medicinalproductpharmaceutical
return medicinalproductpharmaceutical.MedicinalProductPharmaceuticalRouteOfAdministrationTargetSpeciesWithdrawalPeriod(jsondict)
if "MedicinalProductSpecialDesignation" == resource_type:
from . import medicinalproduct
return medicinalproduct.MedicinalProductSpecialDesignation(jsondict)
if "MedicinalProductUndesirableEffect" == resource_type:
from . import medicinalproductundesirableeffect
return medicinalproductundesirableeffect.MedicinalProductUndesirableEffect(jsondict)
if "MessageDefinition" == resource_type:
from . import messagedefinition
return messagedefinition.MessageDefinition(jsondict)
if "MessageDefinitionAllowedResponse" == resource_type:
from . import messagedefinition
return messagedefinition.MessageDefinitionAllowedResponse(jsondict)
if "MessageDefinitionFocus" == resource_type:
from . import messagedefinition
return messagedefinition.MessageDefinitionFocus(jsondict)
if "MessageHeader" == resource_type:
from . import messageheader
return messageheader.MessageHeader(jsondict)
if "MessageHeaderDestination" == resource_type:
from . import messageheader
return messageheader.MessageHeaderDestination(jsondict)
if "MessageHeaderResponse" == resource_type:
from . import messageheader
return messageheader.MessageHeaderResponse(jsondict)
if "MessageHeaderSource" == resource_type:
from . import messageheader
return messageheader.MessageHeaderSource(jsondict)
if "Meta" == resource_type:
from . import meta
return meta.Meta(jsondict)
if "MetadataResource" == resource_type:
from . import metadataresource
return metadataresource.MetadataResource(jsondict)
if "MolecularSequence" == resource_type:
from . import molecularsequence
return molecularsequence.MolecularSequence(jsondict)
if "MolecularSequenceQuality" == resource_type:
from . import molecularsequence
return molecularsequence.MolecularSequenceQuality(jsondict)
if "MolecularSequenceQualityRoc" == resource_type:
from . import molecularsequence
return molecularsequence.MolecularSequenceQualityRoc(jsondict)
if "MolecularSequenceReferenceSeq" == resource_type:
from . import molecularsequence
return molecularsequence.MolecularSequenceReferenceSeq(jsondict)
if "MolecularSequenceRepository" == resource_type:
from . import molecularsequence
return molecularsequence.MolecularSequenceRepository(jsondict)
if "MolecularSequenceStructureVariant" == resource_type:
from . import molecularsequence
return molecularsequence.MolecularSequenceStructureVariant(jsondict)
if "MolecularSequenceStructureVariantInner" == resource_type:
from . import molecularsequence
return molecularsequence.MolecularSequenceStructureVariantInner(jsondict)
if "MolecularSequenceStructureVariantOuter" == resource_type:
from . import molecularsequence
return molecularsequence.MolecularSequenceStructureVariantOuter(jsondict)
if "MolecularSequenceVariant" == resource_type:
from . import molecularsequence
return molecularsequence.MolecularSequenceVariant(jsondict)
if "Money" == resource_type:
from . import money
return money.Money(jsondict)
if "NamingSystem" == resource_type:
from . import namingsystem
return namingsystem.NamingSystem(jsondict)
if "NamingSystemUniqueId" == resource_type:
from . import namingsystem
return namingsystem.NamingSystemUniqueId(jsondict)
if "Narrative" == resource_type:
from . import narrative
return narrative.Narrative(jsondict)
if "NutritionOrder" == resource_type:
from . import nutritionorder
return nutritionorder.NutritionOrder(jsondict)
if "NutritionOrderEnteralFormula" == resource_type:
from . import nutritionorder
return nutritionorder.NutritionOrderEnteralFormula(jsondict)
if "NutritionOrderEnteralFormulaAdministration" == resource_type:
from . import nutritionorder
return nutritionorder.NutritionOrderEnteralFormulaAdministration(jsondict)
if "NutritionOrderOralDiet" == resource_type:
from . import nutritionorder
return nutritionorder.NutritionOrderOralDiet(jsondict)
if "NutritionOrderOralDietNutrient" == resource_type:
from . import nutritionorder
return nutritionorder.NutritionOrderOralDietNutrient(jsondict)
if "NutritionOrderOralDietTexture" == resource_type:
from . import nutritionorder
return nutritionorder.NutritionOrderOralDietTexture(jsondict)
if "NutritionOrderSupplement" == resource_type:
from . import nutritionorder
return nutritionorder.NutritionOrderSupplement(jsondict)
if "Observation" == resource_type:
from . import observation
return observation.Observation(jsondict)
if "ObservationComponent" == resource_type:
from . import observation
return observation.ObservationComponent(jsondict)
if "ObservationDefinition" == resource_type:
from . import observationdefinition
return observationdefinition.ObservationDefinition(jsondict)
if "ObservationDefinitionQualifiedInterval" == resource_type:
from . import observationdefinition
return observationdefinition.ObservationDefinitionQualifiedInterval(jsondict)
if "ObservationDefinitionQuantitativeDetails" == resource_type:
from . import observationdefinition
return observationdefinition.ObservationDefinitionQuantitativeDetails(jsondict)
if "ObservationReferenceRange" == resource_type:
from . import observation
return observation.ObservationReferenceRange(jsondict)
if "OperationDefinition" == resource_type:
from . import operationdefinition
return operationdefinition.OperationDefinition(jsondict)
if "OperationDefinitionOverload" == resource_type:
from . import operationdefinition
return operationdefinition.OperationDefinitionOverload(jsondict)
if "OperationDefinitionParameter" == resource_type:
from . import operationdefinition
return operationdefinition.OperationDefinitionParameter(jsondict)
if "OperationDefinitionParameterBinding" == resource_type:
from . import operationdefinition
return operationdefinition.OperationDefinitionParameterBinding(jsondict)
if "OperationDefinitionParameterReferencedFrom" == resource_type:
from . import operationdefinition
return operationdefinition.OperationDefinitionParameterReferencedFrom(jsondict)
if "OperationOutcome" == resource_type:
from . import operationoutcome
return operationoutcome.OperationOutcome(jsondict)
if "OperationOutcomeIssue" == resource_type:
from . import operationoutcome
return operationoutcome.OperationOutcomeIssue(jsondict)
if "Organization" == resource_type:
from . import organization
return organization.Organization(jsondict)
if "OrganizationAffiliation" == resource_type:
from . import organizationaffiliation
return organizationaffiliation.OrganizationAffiliation(jsondict)
if "OrganizationContact" == resource_type:
from . import organization
return organization.OrganizationContact(jsondict)
if "ParameterDefinition" == resource_type:
from . import parameterdefinition
return parameterdefinition.ParameterDefinition(jsondict)
if "Parameters" == resource_type:
from . import parameters
return parameters.Parameters(jsondict)
if "ParametersParameter" == resource_type:
from . import parameters
return parameters.ParametersParameter(jsondict)
if "Patient" == resource_type:
from . import patient
return patient.Patient(jsondict)
if "PatientCommunication" == resource_type:
from . import patient
return patient.PatientCommunication(jsondict)
if "PatientContact" == resource_type:
from . import patient
return patient.PatientContact(jsondict)
if "PatientLink" == resource_type:
from . import patient
return patient.PatientLink(jsondict)
if "PaymentNotice" == resource_type:
from . import paymentnotice
return paymentnotice.PaymentNotice(jsondict)
if "PaymentReconciliation" == resource_type:
from . import paymentreconciliation
return paymentreconciliation.PaymentReconciliation(jsondict)
if "PaymentReconciliationDetail" == resource_type:
from . import paymentreconciliation
return paymentreconciliation.PaymentReconciliationDetail(jsondict)
if "PaymentReconciliationProcessNote" == resource_type:
from . import paymentreconciliation
return paymentreconciliation.PaymentReconciliationProcessNote(jsondict)
if "Period" == resource_type:
from . import period
return period.Period(jsondict)
if "Person" == resource_type:
from . import person
return person.Person(jsondict)
if "PersonLink" == resource_type:
from . import person
return person.PersonLink(jsondict)
if "PlanDefinition" == resource_type:
from . import plandefinition
return plandefinition.PlanDefinition(jsondict)
if "PlanDefinitionAction" == resource_type:
from . import plandefinition
return plandefinition.PlanDefinitionAction(jsondict)
if "PlanDefinitionActionCondition" == resource_type:
from . import plandefinition
return plandefinition.PlanDefinitionActionCondition(jsondict)
if "PlanDefinitionActionDynamicValue" == resource_type:
from . import plandefinition
return plandefinition.PlanDefinitionActionDynamicValue(jsondict)
if "PlanDefinitionActionParticipant" == resource_type:
from . import plandefinition
return plandefinition.PlanDefinitionActionParticipant(jsondict)
if "PlanDefinitionActionRelatedAction" == resource_type:
from . import plandefinition
return plandefinition.PlanDefinitionActionRelatedAction(jsondict)
if "PlanDefinitionGoal" == resource_type:
from . import plandefinition
return plandefinition.PlanDefinitionGoal(jsondict)
if "PlanDefinitionGoalTarget" == resource_type:
from . import plandefinition
return plandefinition.PlanDefinitionGoalTarget(jsondict)
if "Population" == resource_type:
from . import population
return population.Population(jsondict)
if "Practitioner" == resource_type:
from . import practitioner
return practitioner.Practitioner(jsondict)
if "PractitionerQualification" == resource_type:
from . import practitioner
return practitioner.PractitionerQualification(jsondict)
if "PractitionerRole" == resource_type:
from . import practitionerrole
return practitionerrole.PractitionerRole(jsondict)
if "PractitionerRoleAvailableTime" == resource_type:
from . import practitionerrole
return practitionerrole.PractitionerRoleAvailableTime(jsondict)
if "PractitionerRoleNotAvailable" == resource_type:
from . import practitionerrole
return practitionerrole.PractitionerRoleNotAvailable(jsondict)
if "Procedure" == resource_type:
from . import procedure
return procedure.Procedure(jsondict)
if "ProcedureFocalDevice" == resource_type:
from . import procedure
return procedure.ProcedureFocalDevice(jsondict)
if "ProcedurePerformer" == resource_type:
from . import procedure
return procedure.ProcedurePerformer(jsondict)
if "ProdCharacteristic" == resource_type:
from . import prodcharacteristic
return prodcharacteristic.ProdCharacteristic(jsondict)
if "ProductShelfLife" == resource_type:
from . import productshelflife
return productshelflife.ProductShelfLife(jsondict)
if "Provenance" == resource_type:
from . import provenance
return provenance.Provenance(jsondict)
if "ProvenanceAgent" == resource_type:
from . import provenance
return provenance.ProvenanceAgent(jsondict)
if "ProvenanceEntity" == resource_type:
from . import provenance
return provenance.ProvenanceEntity(jsondict)
if "Quantity" == resource_type:
from . import quantity
return quantity.Quantity(jsondict)
if "Quantity" == resource_type:
from . import quantity
return quantity.Quantity(jsondict)
if "Questionnaire" == resource_type:
from . import questionnaire
return questionnaire.Questionnaire(jsondict)
if "QuestionnaireItem" == resource_type:
from . import questionnaire
return questionnaire.QuestionnaireItem(jsondict)
if "QuestionnaireItemAnswerOption" == resource_type:
from . import questionnaire
return questionnaire.QuestionnaireItemAnswerOption(jsondict)
if "QuestionnaireItemEnableWhen" == resource_type:
from . import questionnaire
return questionnaire.QuestionnaireItemEnableWhen(jsondict)
if "QuestionnaireItemInitial" == resource_type:
from . import questionnaire
return questionnaire.QuestionnaireItemInitial(jsondict)
if "QuestionnaireResponse" == resource_type:
from . import questionnaireresponse
return questionnaireresponse.QuestionnaireResponse(jsondict)
if "QuestionnaireResponseItem" == resource_type:
from . import questionnaireresponse
return questionnaireresponse.QuestionnaireResponseItem(jsondict)
if "QuestionnaireResponseItemAnswer" == resource_type:
from . import questionnaireresponse
return questionnaireresponse.QuestionnaireResponseItemAnswer(jsondict)
if "Range" == resource_type:
from . import range
return range.Range(jsondict)
if "Ratio" == resource_type:
from . import ratio
return ratio.Ratio(jsondict)
if "Reference" == resource_type:
from . import reference
return reference.Reference(jsondict)
if "RelatedArtifact" == resource_type:
from . import relatedartifact
return relatedartifact.RelatedArtifact(jsondict)
if "RelatedPerson" == resource_type:
from . import relatedperson
return relatedperson.RelatedPerson(jsondict)
if "RelatedPersonCommunication" == resource_type:
from . import relatedperson
return relatedperson.RelatedPersonCommunication(jsondict)
if "RequestGroup" == resource_type:
from . import requestgroup
return requestgroup.RequestGroup(jsondict)
if "RequestGroupAction" == resource_type:
from . import requestgroup
return requestgroup.RequestGroupAction(jsondict)
if "RequestGroupActionCondition" == resource_type:
from . import requestgroup
return requestgroup.RequestGroupActionCondition(jsondict)
if "RequestGroupActionRelatedAction" == resource_type:
from . import requestgroup
return requestgroup.RequestGroupActionRelatedAction(jsondict)
if "ResearchDefinition" == resource_type:
from . import researchdefinition
return researchdefinition.ResearchDefinition(jsondict)
if "ResearchElementDefinition" == resource_type:
from . import researchelementdefinition
return researchelementdefinition.ResearchElementDefinition(jsondict)
if "ResearchElementDefinitionCharacteristic" == resource_type:
from . import researchelementdefinition
return researchelementdefinition.ResearchElementDefinitionCharacteristic(jsondict)
if "ResearchStudy" == resource_type:
from . import researchstudy
return researchstudy.ResearchStudy(jsondict)
if "ResearchStudyArm" == resource_type:
from . import researchstudy
return researchstudy.ResearchStudyArm(jsondict)
if "ResearchStudyObjective" == resource_type:
from . import researchstudy
return researchstudy.ResearchStudyObjective(jsondict)
if "ResearchSubject" == resource_type:
from . import researchsubject
return researchsubject.ResearchSubject(jsondict)
if "Resource" == resource_type:
from . import resource
return resource.Resource(jsondict)
if "RiskAssessment" == resource_type:
from . import riskassessment
return riskassessment.RiskAssessment(jsondict)
if "RiskAssessmentPrediction" == resource_type:
from . import riskassessment
return riskassessment.RiskAssessmentPrediction(jsondict)
if "RiskEvidenceSynthesis" == resource_type:
from . import riskevidencesynthesis
return riskevidencesynthesis.RiskEvidenceSynthesis(jsondict)
if "RiskEvidenceSynthesisCertainty" == resource_type:
from . import riskevidencesynthesis
return riskevidencesynthesis.RiskEvidenceSynthesisCertainty(jsondict)
if "RiskEvidenceSynthesisCertaintyCertaintySubcomponent" == resource_type:
from . import riskevidencesynthesis
return riskevidencesynthesis.RiskEvidenceSynthesisCertaintyCertaintySubcomponent(jsondict)
if "RiskEvidenceSynthesisRiskEstimate" == resource_type:
from . import riskevidencesynthesis
return riskevidencesynthesis.RiskEvidenceSynthesisRiskEstimate(jsondict)
if "RiskEvidenceSynthesisRiskEstimatePrecisionEstimate" == resource_type:
from . import riskevidencesynthesis
return riskevidencesynthesis.RiskEvidenceSynthesisRiskEstimatePrecisionEstimate(jsondict)
if "RiskEvidenceSynthesisSampleSize" == resource_type:
from . import riskevidencesynthesis
return riskevidencesynthesis.RiskEvidenceSynthesisSampleSize(jsondict)
if "SampledData" == resource_type:
from . import sampleddata
return sampleddata.SampledData(jsondict)
if "Schedule" == resource_type:
from . import schedule
return schedule.Schedule(jsondict)
if "SearchParameter" == resource_type:
from . import searchparameter
return searchparameter.SearchParameter(jsondict)
if "SearchParameterComponent" == resource_type:
from . import searchparameter
return searchparameter.SearchParameterComponent(jsondict)
if "ServiceRequest" == resource_type:
from . import servicerequest
return servicerequest.ServiceRequest(jsondict)
if "Signature" == resource_type:
from . import signature
return signature.Signature(jsondict)
if "Slot" == resource_type:
from . import slot
return slot.Slot(jsondict)
if "Specimen" == resource_type:
from . import specimen
return specimen.Specimen(jsondict)
if "SpecimenCollection" == resource_type:
from . import specimen
return specimen.SpecimenCollection(jsondict)
if "SpecimenContainer" == resource_type:
from . import specimen
return specimen.SpecimenContainer(jsondict)
if "SpecimenDefinition" == resource_type:
from . import specimendefinition
return specimendefinition.SpecimenDefinition(jsondict)
if "SpecimenDefinitionTypeTested" == resource_type:
from . import specimendefinition
return specimendefinition.SpecimenDefinitionTypeTested(jsondict)
if "SpecimenDefinitionTypeTestedContainer" == resource_type:
from . import specimendefinition
return specimendefinition.SpecimenDefinitionTypeTestedContainer(jsondict)
if "SpecimenDefinitionTypeTestedContainerAdditive" == resource_type:
from . import specimendefinition
return specimendefinition.SpecimenDefinitionTypeTestedContainerAdditive(jsondict)
if "SpecimenDefinitionTypeTestedHandling" == resource_type:
from . import specimendefinition
return specimendefinition.SpecimenDefinitionTypeTestedHandling(jsondict)
if "SpecimenProcessing" == resource_type:
from . import specimen
return specimen.SpecimenProcessing(jsondict)
if "StructureDefinition" == resource_type:
from . import structuredefinition
return structuredefinition.StructureDefinition(jsondict)
if "StructureDefinitionContext" == resource_type:
from . import structuredefinition
return structuredefinition.StructureDefinitionContext(jsondict)
if "StructureDefinitionDifferential" == resource_type:
from . import structuredefinition
return structuredefinition.StructureDefinitionDifferential(jsondict)
if "StructureDefinitionMapping" == resource_type:
from . import structuredefinition
return structuredefinition.StructureDefinitionMapping(jsondict)
if "StructureDefinitionSnapshot" == resource_type:
from . import structuredefinition
return structuredefinition.StructureDefinitionSnapshot(jsondict)
if "StructureMap" == resource_type:
from . import structuremap
return structuremap.StructureMap(jsondict)
if "StructureMapGroup" == resource_type:
from . import structuremap
return structuremap.StructureMapGroup(jsondict)
if "StructureMapGroupInput" == resource_type:
from . import structuremap
return structuremap.StructureMapGroupInput(jsondict)
if "StructureMapGroupRule" == resource_type:
from . import structuremap
return structuremap.StructureMapGroupRule(jsondict)
if "StructureMapGroupRuleDependent" == resource_type:
from . import structuremap
return structuremap.StructureMapGroupRuleDependent(jsondict)
if "StructureMapGroupRuleSource" == resource_type:
from . import structuremap
return structuremap.StructureMapGroupRuleSource(jsondict)
if "StructureMapGroupRuleTarget" == resource_type:
from . import structuremap
return structuremap.StructureMapGroupRuleTarget(jsondict)
if "StructureMapGroupRuleTargetParameter" == resource_type:
from . import structuremap
return structuremap.StructureMapGroupRuleTargetParameter(jsondict)
if "StructureMapStructure" == resource_type:
from . import structuremap
return structuremap.StructureMapStructure(jsondict)
if "Subscription" == resource_type:
from . import subscription
return subscription.Subscription(jsondict)
if "SubscriptionChannel" == resource_type:
from . import subscription
return subscription.SubscriptionChannel(jsondict)
if "Substance" == resource_type:
from . import substance
return substance.Substance(jsondict)
if "SubstanceAmount" == resource_type:
from . import substanceamount
return substanceamount.SubstanceAmount(jsondict)
if "SubstanceAmountReferenceRange" == resource_type:
from . import substanceamount
return substanceamount.SubstanceAmountReferenceRange(jsondict)
if "SubstanceIngredient" == resource_type:
from . import substance
return substance.SubstanceIngredient(jsondict)
if "SubstanceInstance" == resource_type:
from . import substance
return substance.SubstanceInstance(jsondict)
if "SubstanceNucleicAcid" == resource_type:
from . import substancenucleicacid
return substancenucleicacid.SubstanceNucleicAcid(jsondict)
if "SubstanceNucleicAcidSubunit" == resource_type:
from . import substancenucleicacid
return substancenucleicacid.SubstanceNucleicAcidSubunit(jsondict)
if "SubstanceNucleicAcidSubunitLinkage" == resource_type:
from . import substancenucleicacid
return substancenucleicacid.SubstanceNucleicAcidSubunitLinkage(jsondict)
if "SubstanceNucleicAcidSubunitSugar" == resource_type:
from . import substancenucleicacid
return substancenucleicacid.SubstanceNucleicAcidSubunitSugar(jsondict)
if "SubstancePolymer" == resource_type:
from . import substancepolymer
return substancepolymer.SubstancePolymer(jsondict)
if "SubstancePolymerMonomerSet" == resource_type:
from . import substancepolymer
return substancepolymer.SubstancePolymerMonomerSet(jsondict)
if "SubstancePolymerMonomerSetStartingMaterial" == resource_type:
from . import substancepolymer
return substancepolymer.SubstancePolymerMonomerSetStartingMaterial(jsondict)
if "SubstancePolymerRepeat" == resource_type:
from . import substancepolymer
return substancepolymer.SubstancePolymerRepeat(jsondict)
if "SubstancePolymerRepeatRepeatUnit" == resource_type:
from . import substancepolymer
return substancepolymer.SubstancePolymerRepeatRepeatUnit(jsondict)
if "SubstancePolymerRepeatRepeatUnitDegreeOfPolymerisation" == resource_type:
from . import substancepolymer
return substancepolymer.SubstancePolymerRepeatRepeatUnitDegreeOfPolymerisation(jsondict)
if "SubstancePolymerRepeatRepeatUnitStructuralRepresentation" == resource_type:
from . import substancepolymer
return substancepolymer.SubstancePolymerRepeatRepeatUnitStructuralRepresentation(jsondict)
if "SubstanceProtein" == resource_type:
from . import substanceprotein
return substanceprotein.SubstanceProtein(jsondict)
if "SubstanceProteinSubunit" == resource_type:
from . import substanceprotein
return substanceprotein.SubstanceProteinSubunit(jsondict)
if "SubstanceReferenceInformation" == resource_type:
from . import substancereferenceinformation
return substancereferenceinformation.SubstanceReferenceInformation(jsondict)
if "SubstanceReferenceInformationClassification" == resource_type:
from . import substancereferenceinformation
return substancereferenceinformation.SubstanceReferenceInformationClassification(jsondict)
if "SubstanceReferenceInformationGene" == resource_type:
from . import substancereferenceinformation
return substancereferenceinformation.SubstanceReferenceInformationGene(jsondict)
if "SubstanceReferenceInformationGeneElement" == resource_type:
from . import substancereferenceinformation
return substancereferenceinformation.SubstanceReferenceInformationGeneElement(jsondict)
if "SubstanceReferenceInformationTarget" == resource_type:
from . import substancereferenceinformation
return substancereferenceinformation.SubstanceReferenceInformationTarget(jsondict)
if "SubstanceSourceMaterial" == resource_type:
from . import substancesourcematerial
return substancesourcematerial.SubstanceSourceMaterial(jsondict)
if "SubstanceSourceMaterialFractionDescription" == resource_type:
from . import substancesourcematerial
return substancesourcematerial.SubstanceSourceMaterialFractionDescription(jsondict)
if "SubstanceSourceMaterialOrganism" == resource_type:
from . import substancesourcematerial
return substancesourcematerial.SubstanceSourceMaterialOrganism(jsondict)
if "SubstanceSourceMaterialOrganismAuthor" == resource_type:
from . import substancesourcematerial
return substancesourcematerial.SubstanceSourceMaterialOrganismAuthor(jsondict)
if "SubstanceSourceMaterialOrganismHybrid" == resource_type:
from . import substancesourcematerial
return substancesourcematerial.SubstanceSourceMaterialOrganismHybrid(jsondict)
if "SubstanceSourceMaterialOrganismOrganismGeneral" == resource_type:
from . import substancesourcematerial
return substancesourcematerial.SubstanceSourceMaterialOrganismOrganismGeneral(jsondict)
if "SubstanceSourceMaterialPartDescription" == resource_type:
from . import substancesourcematerial
return substancesourcematerial.SubstanceSourceMaterialPartDescription(jsondict)
if "SubstanceSpecification" == resource_type:
from . import substancespecification
return substancespecification.SubstanceSpecification(jsondict)
if "SubstanceSpecificationMoiety" == resource_type:
from . import substancespecification
return substancespecification.SubstanceSpecificationMoiety(jsondict)
if "SubstanceSpecificationName" == resource_type:
from . import substancespecification
return substancespecification.SubstanceSpecificationName(jsondict)
if "SubstanceSpecificationNameOfficial" == resource_type:
from . import substancespecification
return substancespecification.SubstanceSpecificationNameOfficial(jsondict)
if "SubstanceSpecificationProperty" == resource_type:
from . import substancespecification
return substancespecification.SubstanceSpecificationProperty(jsondict)
if "SubstanceSpecificationRelationship" == resource_type:
from . import substancespecification
return substancespecification.SubstanceSpecificationRelationship(jsondict)
if "SubstanceSpecificationStructure" == resource_type:
from . import substancespecification
return substancespecification.SubstanceSpecificationStructure(jsondict)
if "SubstanceSpecificationStructureIsotope" == resource_type:
from . import substancespecification
return substancespecification.SubstanceSpecificationStructureIsotope(jsondict)
if "SubstanceSpecificationStructureIsotopeMolecularWeight" == resource_type:
from . import substancespecification
return substancespecification.SubstanceSpecificationStructureIsotopeMolecularWeight(jsondict)
if "SubstanceSpecificationStructureRepresentation" == resource_type:
from . import substancespecification
return substancespecification.SubstanceSpecificationStructureRepresentation(jsondict)
if "SubstanceSpecificationstr" == resource_type:
from . import substancespecification
return substancespecification.SubstanceSpecificationstr(jsondict)
if "SupplyDelivery" == resource_type:
from . import supplydelivery
return supplydelivery.SupplyDelivery(jsondict)
if "SupplyDeliverySuppliedItem" == resource_type:
from . import supplydelivery
return supplydelivery.SupplyDeliverySuppliedItem(jsondict)
if "SupplyRequest" == resource_type:
from . import supplyrequest
return supplyrequest.SupplyRequest(jsondict)
if "SupplyRequestParameter" == resource_type:
from . import supplyrequest
return supplyrequest.SupplyRequestParameter(jsondict)
if "Task" == resource_type:
from . import task
return task.Task(jsondict)
if "TaskInput" == resource_type:
from . import task
return task.TaskInput(jsondict)
if "TaskOutput" == resource_type:
from . import task
return task.TaskOutput(jsondict)
if "TaskRestriction" == resource_type:
from . import task
return task.TaskRestriction(jsondict)
if "TerminologyCapabilities" == resource_type:
from . import terminologycapabilities
return terminologycapabilities.TerminologyCapabilities(jsondict)
if "TerminologyCapabilitiesClosure" == resource_type:
from . import terminologycapabilities
return terminologycapabilities.TerminologyCapabilitiesClosure(jsondict)
if "TerminologyCapabilitiesCodeSystem" == resource_type:
from . import terminologycapabilities
return terminologycapabilities.TerminologyCapabilitiesCodeSystem(jsondict)
if "TerminologyCapabilitiesCodeSystemVersion" == resource_type:
from . import terminologycapabilities
return terminologycapabilities.TerminologyCapabilitiesCodeSystemVersion(jsondict)
if "TerminologyCapabilitiesCodeSystemVersionFilter" == resource_type:
from . import terminologycapabilities
return terminologycapabilities.TerminologyCapabilitiesCodeSystemVersionFilter(jsondict)
if "TerminologyCapabilitiesExpansion" == resource_type:
from . import terminologycapabilities
return terminologycapabilities.TerminologyCapabilitiesExpansion(jsondict)
if "TerminologyCapabilitiesExpansionParameter" == resource_type:
from . import terminologycapabilities
return terminologycapabilities.TerminologyCapabilitiesExpansionParameter(jsondict)
if "TerminologyCapabilitiesImplementation" == resource_type:
from . import terminologycapabilities
return terminologycapabilities.TerminologyCapabilitiesImplementation(jsondict)
if "TerminologyCapabilitiesSoftware" == resource_type:
from . import terminologycapabilities
return terminologycapabilities.TerminologyCapabilitiesSoftware(jsondict)
if "TerminologyCapabilitiesTranslation" == resource_type:
from . import terminologycapabilities
return terminologycapabilities.TerminologyCapabilitiesTranslation(jsondict)
if "TerminologyCapabilitiesValidateCode" == resource_type:
from . import terminologycapabilities
return terminologycapabilities.TerminologyCapabilitiesValidateCode(jsondict)
if "TestReport" == resource_type:
from . import testreport
return testreport.TestReport(jsondict)
if "TestReportParticipant" == resource_type:
from . import testreport
return testreport.TestReportParticipant(jsondict)
if "TestReportSetup" == resource_type:
from . import testreport
return testreport.TestReportSetup(jsondict)
if "TestReportSetupAction" == resource_type:
from . import testreport
return testreport.TestReportSetupAction(jsondict)
if "TestReportSetupActionAssert" == resource_type:
from . import testreport
return testreport.TestReportSetupActionAssert(jsondict)
if "TestReportSetupActionOperation" == resource_type:
from . import testreport
return testreport.TestReportSetupActionOperation(jsondict)
if "TestReportTeardown" == resource_type:
from . import testreport
return testreport.TestReportTeardown(jsondict)
if "TestReportTeardownAction" == resource_type:
from . import testreport
return testreport.TestReportTeardownAction(jsondict)
if "TestReportTest" == resource_type:
from . import testreport
return testreport.TestReportTest(jsondict)
if "TestReportTestAction" == resource_type:
from . import testreport
return testreport.TestReportTestAction(jsondict)
if "TestScript" == resource_type:
from . import testscript
return testscript.TestScript(jsondict)
if "TestScriptDestination" == resource_type:
from . import testscript
return testscript.TestScriptDestination(jsondict)
if "TestScriptFixture" == resource_type:
from . import testscript
return testscript.TestScriptFixture(jsondict)
if "TestScriptMetadata" == resource_type:
from . import testscript
return testscript.TestScriptMetadata(jsondict)
if "TestScriptMetadataCapability" == resource_type:
from . import testscript
return testscript.TestScriptMetadataCapability(jsondict)
if "TestScriptMetadataLink" == resource_type:
from . import testscript
return testscript.TestScriptMetadataLink(jsondict)
if "TestScriptOrigin" == resource_type:
from . import testscript
return testscript.TestScriptOrigin(jsondict)
if "TestScriptSetup" == resource_type:
from . import testscript
return testscript.TestScriptSetup(jsondict)
if "TestScriptSetupAction" == resource_type:
from . import testscript
return testscript.TestScriptSetupAction(jsondict)
if "TestScriptSetupActionAssert" == resource_type:
from . import testscript
return testscript.TestScriptSetupActionAssert(jsondict)
if "TestScriptSetupActionOperation" == resource_type:
from . import testscript
return testscript.TestScriptSetupActionOperation(jsondict)
if "TestScriptSetupActionOperationRequestHeader" == resource_type:
from . import testscript
return testscript.TestScriptSetupActionOperationRequestHeader(jsondict)
if "TestScriptTeardown" == resource_type:
from . import testscript
return testscript.TestScriptTeardown(jsondict)
if "TestScriptTeardownAction" == resource_type:
from . import testscript
return testscript.TestScriptTeardownAction(jsondict)
if "TestScriptTest" == resource_type:
from . import testscript
return testscript.TestScriptTest(jsondict)
if "TestScriptTestAction" == resource_type:
from . import testscript
return testscript.TestScriptTestAction(jsondict)
if "TestScriptVariable" == resource_type:
from . import testscript
return testscript.TestScriptVariable(jsondict)
if "Timing" == resource_type:
from . import timing
return timing.Timing(jsondict)
if "TimingRepeat" == resource_type:
from . import timing
return timing.TimingRepeat(jsondict)
if "TriggerDefinition" == resource_type:
from . import triggerdefinition
return triggerdefinition.TriggerDefinition(jsondict)
if "UsageContext" == resource_type:
from . import usagecontext
return usagecontext.UsageContext(jsondict)
if "ValueSet" == resource_type:
from . import valueset
return valueset.ValueSet(jsondict)
if "ValueSetCompose" == resource_type:
from . import valueset
return valueset.ValueSetCompose(jsondict)
if "ValueSetComposeInclude" == resource_type:
from . import valueset
return valueset.ValueSetComposeInclude(jsondict)
if "ValueSetComposeIncludeConcept" == resource_type:
from . import valueset
return valueset.ValueSetComposeIncludeConcept(jsondict)
if "ValueSetComposeIncludeConceptDesignation" == resource_type:
from . import valueset
return valueset.ValueSetComposeIncludeConceptDesignation(jsondict)
if "ValueSetComposeIncludeFilter" == resource_type:
from . import valueset
return valueset.ValueSetComposeIncludeFilter(jsondict)
if "ValueSetExpansion" == resource_type:
from . import valueset
return valueset.ValueSetExpansion(jsondict)
if "ValueSetExpansionContains" == resource_type:
from . import valueset
return valueset.ValueSetExpansionContains(jsondict)
if "ValueSetExpansionParameter" == resource_type:
from . import valueset
return valueset.ValueSetExpansionParameter(jsondict)
if "VerificationResult" == resource_type:
from . import verificationresult
return verificationresult.VerificationResult(jsondict)
if "VerificationResultAttestation" == resource_type:
from . import verificationresult
return verificationresult.VerificationResultAttestation(jsondict)
if "VerificationResultPrimarySource" == resource_type:
from . import verificationresult
return verificationresult.VerificationResultPrimarySource(jsondict)
if "VerificationResultValidator" == resource_type:
from . import verificationresult
return verificationresult.VerificationResultValidator(jsondict)
if "VisionPrescription" == resource_type:
from . import visionprescription
return visionprescription.VisionPrescription(jsondict)
if "VisionPrescriptionLensSpecification" == resource_type:
from . import visionprescription
return visionprescription.VisionPrescriptionLensSpecification(jsondict)
if "VisionPrescriptionLensSpecificationPrism" == resource_type:
from . import visionprescription
return visionprescription.VisionPrescriptionLensSpecificationPrism(jsondict)
from . import element
return element.Element(jsondict)
| [] |
2024-01-10 | Ixitxachitl/Pywiki-Lite | src~pywiki_lite.py | #!/usr/bin/python
import collections
import os
import ctypes
import json
import queue
import sys
import threading
import time
import configparser
import random
import traceback
import irc.bot
import requests
import argparse
import openai
from datetime import datetime, timezone
from dateutil.relativedelta import relativedelta
from tkinter import messagebox, ttk, font, IntVar
import tkinter.scrolledtext as tkscrolled
import tkinter as tk
from http.server import BaseHTTPRequestHandler, HTTPServer
import webbrowser
# import websocket
import gpt4all
import io
from contextlib import redirect_stdout
from html import escape
def resource_path(relative_path):
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
def get_version():
return "1.66" # Version Number
class TwitchBotGUI(tk.Tk):
def __init__(self):
super().__init__()
self.title("pyWiki Lite")
self.geometry("1000x425")
self.iconbitmap(default=resource_path('icon.ico'))
# Make the window non-resizable
self.resizable(False, False)
# Variables for Twitch Bot configuration
self.username = tk.StringVar()
self.client_id = tk.StringVar()
self.client_secret = tk.StringVar()
self.bot_token = tk.StringVar()
self.refresh_token = tk.StringVar()
self.channel = tk.StringVar()
self.openai_api_key = tk.StringVar()
self.openai_api_model = tk.StringVar()
self.ignore_userlist = IntVar()
# Variable to keep track of the bot state
self.bot_running = False
self.mute = False
self.openai_models = ['gpt-4', 'gpt-3.5-turbo']
if os.path.exists('ggml-mpt-7b-chat.bin'):
self.openai_models.append('mpt-7b-chat')
if os.path.exists('wizardlm-13b-v1.1-superhot-8k.ggmlv3.q4_0.bin'):
self.openai_models.append('WizardLM-13B')
self.create_widgets()
# Load configuration from the INI file
if not os.path.exists('config.ini'):
self.save_configuration()
self.load_configuration()
# Bind the on_exit function to the closing event of the Tkinter window
self.protocol("WM_DELETE_WINDOW", self.on_exit)
# Initialize a Queue for handling log messages
self.log_queue = queue.Queue()
# Start a separate thread to update the log asynchronously
self.log_thread = threading.Thread(target=self.process_log_queue)
self.log_thread.daemon = True
self.log_thread.start()
# Function to handle selection change
def on_selection_change(self, event):
self.openai_api_model.set(self.openai_model_entry.get())
print(self.openai_model_entry.get() + ' set')
self.append_to_log(self.openai_model_entry.get() + ' set')
def show_about_popup(self):
about_text = "pyWiki Lite " + get_version() + "\n©2023 Ixitxachitl\nAnd ChatGPT"
thread = threading.Thread(target=lambda: messagebox.showinfo("About", about_text))
thread.start()
def append_to_log(self, message):
self.log_queue.put(message)
def process_log_queue(self):
while True:
try:
message = self.log_queue.get_nowait()
self.log_text.config(state=tk.NORMAL) # Enable the Text widget for editing
self.log_text.insert(tk.END, message + "\n")
self.log_text.see(tk.END) # Scroll to the bottom of the text widget
self.log_text.config(state=tk.DISABLED) # Disable the Text widget for editing
except queue.Empty:
pass
time.sleep(.1)
def toggle_stay_on_top(self):
if self.attributes("-topmost"):
self.attributes("-topmost", False)
self.stay_on_top_button.config(relief="raised")
else:
self.attributes("-topmost", True)
self.stay_on_top_button.config(relief="sunken")
def toggle_mute(self):
if self.mute == True:
self.mute = False
self.append_to_log('Unmuted')
self.stay_mute_button.config(relief="raised")
else:
self.mute = True
self.append_to_log('Muted')
self.stay_mute_button.config(relief="sunken")
def create_widgets(self):
# Set the column weight to make text inputs expand horizontally
self.columnconfigure(1, weight=1)
tk.Label(self, text="pyWiki Lite Configuration", font=("Helvetica", 16)).grid(row=0, column=0, columnspan=2,
pady=10, padx=10, sticky='w')
tk.Label(self, text="Context", font=("Helvetica", 16)).grid(row=0, column=3, columnspan=1,
pady=0, padx=(0, 10), sticky='w')
tk.Label(self, text="Users", font=("Helvetica", 16)).grid(row=0, column=5, columnspan=1, padx=(0, 10),
sticky='w')
self.user_count = tk.Label(self, text="", font=("Helvetica 16 bold"))
self.user_count.grid(row=0, column=6, columnspan=1, pady=10, padx=(0, 10), sticky='w')
# Twitch Bot Username Entry
tk.Label(self, text="Username:").grid(row=1, column=0, padx=(10, 5), sticky="e")
'''
self.bot_username_entry = tk.Entry(self, textvariable=self.username, width=50)
self.bot_username_entry.grid(row=1, column=1, sticky="ew", padx=(0, 10), pady=(2, 0))
'''
self.bot_username_entry = tk.Label(self, textvariable=self.username)
self.bot_username_entry.grid(row=1, column=1, sticky="w", padx=(0, 10), pady=(2, 0))
self.login_button = tk.Button(self, text="Login", command=self.twitch_login)
self.login_button.grid(row=1, column=1, sticky="e", padx=10)
# ClientID Entry
tk.Label(self, text="ClientID:").grid(row=2, column=0, padx=(10, 5), sticky="e")
self.client_id_entry = tk.Entry(self, show="*", textvariable=self.client_id, width=50)
self.client_id_entry.grid(row=2, column=1, sticky="ew", padx=(0, 10))
# Client Secret Entry
tk.Label(self, text="Client Secret:").grid(row=3, column=0, padx=(10, 5), sticky="e")
self.client_secret_entry = tk.Entry(self, show="*", textvariable=self.client_secret, width=50)
self.client_secret_entry.grid(row=3, column=1, sticky="ew", padx=(0, 10))
'''
# Twitch Bot Token Entry
tk.Label(self, text="Bot OAuth Token:").grid(row=4, column=0, padx=(10, 5), sticky="e")
self.bot_token_entry = tk.Entry(self, show="*", textvariable=self.bot_token, width=50)
self.bot_token_entry.grid(row=4, column=1, sticky="ew", padx=(0, 10))
'''
# Channel Entry
tk.Label(self, text="Channel:").grid(row=4, column=0, padx=(10, 5), sticky="e")
self.channel_entry = tk.Entry(self, textvariable=self.channel, width=50)
self.channel_entry.grid(row=4, column=1, sticky="ew", padx=(0, 10))
# OpenAI API Key Entry
tk.Label(self, text="OpenAI API Key:").grid(row=5, column=0, padx=(10, 5), sticky="e")
self.openai_api_key_entry = tk.Entry(self, show="*", textvariable=self.openai_api_key, width=50)
self.openai_api_key_entry.grid(row=5, column=1, sticky="ew", padx=(0, 10))
# OpenAI Model
self.openai_model_entry = ttk.Combobox(self, textvariable=self.openai_api_model, state="readonly")
self.openai_model_entry['values'] = self.openai_models
self.openai_model_entry.grid(row=0, column=4, sticky="e", padx=10)
# Set the default value for the dropdown box
self.openai_model_entry.current(0)
# Bind the event handler to the selection change event
self.openai_model_entry.bind('<<ComboboxSelected>>', self.on_selection_change)
self.stay_on_top_button = tk.Button(self, text="📌", command=self.toggle_stay_on_top)
self.stay_on_top_button.grid(row=0, column=7, sticky="e", padx=10)
self.about_button = tk.Button(self, text="ℹ️", command=self.show_about_popup, borderwidth=0)
self.about_button.grid(row=0, column=7, columnspan=2, sticky="e")
self.stay_mute_button = tk.Button(self, text="🔇", font=font.Font(size=14), justify='center',
command=self.toggle_mute)
self.stay_mute_button.grid(row=6, column=0, columnspan=2, sticky="e", padx=(0, 10))
# Create a slider widget
self.frequency_slider = tk.Scale(self, from_=0, to=100, orient=tk.HORIZONTAL)
self.frequency_slider.grid(row=6, column=0, columnspan=2, padx=(10, 60), pady=0, sticky="ew")
self.frequency_slider.bind("<Enter>", self.on_frequency_slider_enter)
self.frequency_slider.bind("<Leave>", self.on_frequency_slider_leave)
# Start/Stop Bot Button
self.bot_toggle_button = tk.Button(self, text="Start Bot", command=self.toggle_bot)
self.bot_toggle_button.grid(row=0, column=1, columnspan=1, sticky="e", pady=10, padx=10)
# Create a Text widget to display bot messages
self.log_text = tkscrolled.ScrolledText(self, wrap="word", height=11, state=tk.DISABLED)
self.log_text.grid(row=7, column=0, columnspan=2, padx=10, pady=(10, 0), sticky="ewn")
# Create a Text widget to display the input string
self.input_text = tkscrolled.ScrolledText(self, wrap="word", height=22, width=40, undo=True,
autoseparators=True, maxundo=-1)
self.input_text.grid(row=1, column=3, columnspan=2, rowspan=7, padx=(0, 10), pady=(10, 0), sticky="ne")
# Create a Listbox to display users
self.ignore_userlist_check = tk.Checkbutton(self, text="Ignore User List", variable=self.ignore_userlist,
onvalue=1,
offvalue=0)
self.ignore_userlist_check.grid(row=1, column=5, columnspan=3, sticky='nw', pady=0)
self.user_list_scroll = tk.Scrollbar(self, orient="vertical")
self.user_list_scroll.grid(row=2, column=8, columnspan=1, rowspan=6, pady=0, padx=(0, 10), sticky="ns")
self.user_list = tk.Listbox(self, height=21, selectmode='SINGLE', width=30,
yscrollcommand=self.user_list_scroll.set)
self.user_list_scroll.config(command=self.user_list.yview)
self.user_list.grid(row=2, column=5, columnspan=3, rowspan=6, pady=0, sticky="ne")
self.user_list.bind('<FocusOut>', lambda e: self.user_list.selection_clear(0, tk.END))
self.user_list.bind('<Double-Button-1>', self.show_popup)
self.user_list.bind('<Button-3>', self.message_user)
def on_frequency_slider_enter(self, event):
self.frequency_slider.bind("<MouseWheel>", self.on_frequency_slider_scroll)
def on_frequency_slider_leave(self, event):
self.frequency_slider.unbind("<MouseWheel>")
def on_frequency_slider_scroll(self, event):
current_value = self.frequency_slider.get()
if event.delta > 0:
new_value = min(current_value + 1, self.frequency_slider['to'])
else:
new_value = max(current_value - 1, self.frequency_slider['from'])
self.frequency_slider.set(new_value)
def message_user(self, event):
selected_index = self.user_list.curselection()
if selected_index:
item_index = int(selected_index[0])
selected_item = self.user_list.get(item_index)
if selected_item.lower() in self.bot.last_message.keys():
thread = threading.Thread(
target=lambda: self.bot.generate_response(selected_item,
self.bot.last_message[selected_item.lower()]))
else:
thread = threading.Thread(
target=lambda: self.bot.generate_response(selected_item, '@ ' + selected_item))
thread.start()
def show_popup(self, event):
selected_index = self.user_list.curselection()
if selected_index:
item_index = int(selected_index[0])
selected_item = self.user_list.get(item_index)
url = 'https://api.twitch.tv/helix/users?login=' + selected_item
headers = {
'Authorization': 'Bearer ' + self.bot_token.get(),
'Client-Id': self.client_id.get(),
'Content-Type': 'application/json',
}
while True:
response = requests.get(url, headers=headers)
if response.status_code == 200:
break
elif response.status_code == 401:
self.refresh_login()
else:
# Handle other status codes if needed
messagebox.showerror("Error", "Error fetching data: " + str(response.status_code))
return
# Now you can safely access the data from the response
try:
created_at = response.json()['data'][0]['created_at']
followed_at = self.bot.get_followage(selected_item)
try:
con_followed_at = datetime.strptime(followed_at, '%Y-%m-%dT%H:%M:%SZ')
follow_time = relativedelta(datetime.utcnow(), con_followed_at)
time_units = [('year', follow_time.years), ('month', follow_time.months), ('day', follow_time.days),
('hour', follow_time.hours)]
time_strings = [f"{value} {unit}" if value == 1 else f"{value} {unit}s" for unit, value in
time_units if
value > 0]
time_string = ', '.join(time_strings)
except ValueError:
time_string = ''
thread = threading.Thread(target=lambda: messagebox.showinfo(selected_item, 'Created on: ' + created_at
+ '\nFollowed on: ' + followed_at + '\n' +
time_string))
thread.start()
except KeyError:
messagebox.showerror("Error", "Error parsing response data")
except IndexError:
messagebox.showerror("Error", "Missing response data")
def twitch_login(self):
self.open_browser_and_start_server()
def refresh_login(self):
auth_params = {
'client_id': self.client_id.get(),
'client_secret': self.client_secret.get(),
'grant_type': 'refresh_token',
'refresh_token': self.refresh_token.get(),
}
response = requests.post('https://id.twitch.tv/oauth2/token', data=auth_params)
data = response.json()
self.bot_token.set(data['access_token'])
self.refresh_token.set(data['refresh_token'])
def open_browser_and_start_server(self):
print('Logging in...')
print(self.client_id.get())
# Open the authorization URL in the default web browser
auth_params = {
'client_id': self.client_id.get(),
'redirect_uri': 'http://localhost:3000',
'response_type': 'code',
'scope': 'chat:read+chat:edit+channel:moderate+whispers:read+whispers:edit+channel_editor+user:read:follows+moderator:read:followers+channel:read:redemptions',
'force_verify': 'true',
}
auth_url = 'https://id.twitch.tv/oauth2/authorize?' + '&'.join([f'{k}={v}' for k, v in auth_params.items()])
webbrowser.open(auth_url)
# Start the server in a separate thread
server_address = ('', 3000)
httpd = HTTPServer(server_address, CallbackHandler)
httpd.handle_request()
def write_to_text_file(self, file_path, content):
try:
with open(file_path, 'w', encoding='utf-8') as file:
file.write(content)
print(f"Successfully wrote to {file_path}")
except Exception as e:
print(f"Error occurred while writing to {file_path}: {e}")
def load_configuration(self):
config = configparser.ConfigParser()
if not config.read('config.ini'):
return
section = config['TwitchBot']
self.username.set(section.get('username', ''))
self.client_id.set(section.get('ClientID', ''))
self.client_secret.set(section.get('ClientSecret', ''))
self.bot_token.set(section.get('BotOAuthToken', ''))
self.refresh_token.set(section.get('RefreshToken', ''))
self.channel.set(section.get('InitialChannels', ''))
self.openai_api_key.set(section.get('OpenAIAPIKey', ''))
if not section.get('InputString', ''):
self.input_text.insert(tk.END, "You are a twitch chatbot, your username is <name> and your pronouns "
"are They/Them. The name of the streamer is <channel> and their "
"pronouns are <streamer_pronouns>. The streamer is playing <game>. The "
"name of the chatter is <author> and their pronouns are "
"<chatter_pronouns>. The current date and time are: <time>. A list of "
"users in chat are: <users>. Global twitch emotes that you can use are"
" <emotes>.")
else:
self.input_text.insert(tk.END, section.get('InputString', ''))
if not section.get('Model', ''):
self.openai_api_model.set('gpt-4-0613')
else:
self.openai_api_model.set(section.get('Model', ''))
if not section.get('Frequency', ''):
self.frequency_slider.set(0)
else:
self.frequency_slider.set(section.get('Frequency', ''))
self.ignore_userlist.set(int(section.get('IgnoreUsers', '0')))
def save_configuration(self):
config = configparser.ConfigParser()
config['TwitchBot'] = {
'username': self.username.get(),
'ClientID': self.client_id.get(),
'ClientSecret': self.client_secret.get(),
'BotOAuthToken': self.bot_token.get(),
'RefreshToken': self.refresh_token.get(),
'InitialChannels': self.channel.get(),
'OpenAIAPIKey': self.openai_api_key.get(),
'InputString': self.input_text.get('1.0', 'end'),
'Model': self.openai_api_model.get(),
'Frequency': self.frequency_slider.get(),
'IgnoreUsers': self.ignore_userlist.get()
}
with open('config.ini', 'w') as configfile:
config.write(configfile)
def start_bot(self):
if not self.bot_running:
self.refresh_login()
self.bot_running = True
self.bot_toggle_button.config(text="Stop Bot")
# Start the bot in a separate thread
self.bot_thread = threading.Thread(target=self.run_bot, daemon=False)
self.bot_thread.start()
return
def run_bot(self):
# This method will be executed in a separate thread
# Create and run the bot here
self.bot = TwitchBot(self.username.get(), self.client_id.get(), self.client_secret.get(), self.bot_token.get(),
self.channel.get(), self.openai_api_key.get())
self.bot.start()
return
def stop_bot(self):
if self.bot_running:
self.bot_running = False
self.bot_toggle_button.config(text="Start Bot")
self.write_to_text_file("log.txt", self.log_text.get("1.0", tk.END).strip())
self.user_list.delete(0, tk.END)
app.user_count.config(text="")
if hasattr(self, "bot"):
try:
self.bot.connection.quit()
self.bot.disconnect()
except Exception as e:
print(e)
# self.bot.die()
# self.bot_thread.join()
self.terminate_thread(self.bot_thread)
self.bot.users = []
print("Stopped")
self.append_to_log("Stopped")
def terminate_thread(self, thread):
"""Terminates a python thread from another thread.
:param thread: a threading.Thread instance
"""
if not thread.is_alive():
return
exc = ctypes.py_object(SystemExit)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_long(thread.ident), exc)
if res == 0:
raise ValueError("nonexistent thread id")
elif res > 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def on_exit(self):
self.save_configuration()
self.stop_bot()
self.destroy()
def toggle_bot(self):
self.save_configuration()
if self.bot_running:
self.bot_toggle_button.config(relief="raised")
self.login_button.config(state=tk.NORMAL)
self.client_id_entry.config(state="normal")
self.client_secret_entry.config(state="normal")
self.channel_entry.config(state="normal")
self.openai_api_key_entry.config(state="normal")
self.stop_bot()
else:
self.bot_toggle_button.config(relief="sunken")
self.login_button.config(state=tk.DISABLED)
self.client_id_entry.config(state="disabled")
self.client_secret_entry.config(state="disabled")
self.channel_entry.config(state="disabled")
self.openai_api_key_entry.config(state="disabled")
self.start_bot()
class CallbackHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
# Extract access token from the URL fragment
fragment = self.path.split('?')[1]
fragment_params = dict(param.split('=') for param in fragment.split('&'))
code = fragment_params.get('code')
token_params = {
'client_id': app.client_id.get(),
'client_secret': app.client_secret.get(),
'code': code,
'grant_type': 'authorization_code',
'redirect_uri': 'http://localhost:3000',
}
response = requests.post('https://id.twitch.tv/oauth2/token', data=token_params)
data = response.json()
access_token = data['access_token']
refresh_token = data['refresh_token']
print('Access Token: ' + access_token)
print('Refresh Token: ' + refresh_token)
url = 'https://api.twitch.tv/helix/users'
headers = {'Authorization': 'Bearer ' + access_token,
'Client-ID': app.client_id.get(),
'Content-Type': 'application/json'}
response = requests.get(url, headers=headers).json()
username = response['data'][0]['login']
print('Login: ' + username)
app.bot_token.set(access_token)
app.refresh_token.set(refresh_token)
app.username.set(username)
# Now you can use the access_token to make authenticated API requests
self.wfile.write(b'Authorization successful! You can close this window now.')
class TwitchBot(irc.bot.SingleServerIRCBot):
def __init__(self, username, client_id, client_secret, token, channel, openai_api_key):
self.username = username
self.client_id = client_id
self.client_secret = client_secret
self.token = token
self.channel = '#' + channel
self.client_credentials = requests.post('https://id.twitch.tv/oauth2/token?client_id='
+ self.client_id
+ '&client_secret='
+ self.client_secret
+ '&grant_type=client_credentials'
+ '').json()
print(self.client_credentials)
self.openai_api_key = openai_api_key
openai.api_key = self.openai_api_key
self.pronoun_cache = {}
self.users = []
self.message_queue = collections.deque(maxlen=10)
self.last_message = {}
self.verify()
self.channel_id = self.get_channel_id(channel)
self.user_id = self.get_channel_id(username)
self.emotes = self.get_emotes()
self.functions = [
# {
# "name": "get_user_pronouns",
# "description": "Get the pronouns of a specified user",
# "parameters": {
# "type": "object",
# "properties": {
# "user": {
# "type": "string",
# "description": "The name of the person to look up pronouns for",
# },
# },
# "required": ["user"],
# },
# },
{
"name": "get_launch",
"description": "Get the next or previous scheduled space launch",
"parameters": {
"type": "object",
"properties": {
"when": {
"type": "string",
"enum": ["next", "previous"]
},
},
"required": ["when"],
},
},
{
"name": "get_users",
"description": "Get a list of users in chat",
"parameters": {
"type": "object",
"properties": {
},
},
},
{
"name": "get_stream",
"description": "Gets information about a stream by streamer",
"parameters": {
"type": "object",
"properties": {
"streamer": {
"type": "string",
"description": "the name of the streamer to look up"
},
},
"required": ["streamer"],
},
},
{
"name": "get_game_info",
"description": "Gets information about a game",
"parameters": {
"type": "object",
"properties": {
"game": {
"type": "string",
"description": "the name of the game to look up"
},
},
"required": ["game"],
},
},
{
"name": "send_message_delayed",
"description": "sends a message after a number of seconds",
"parameters": {
"type": "object",
"properties": {
"message": {
"type": "string",
"description": "the message to send"
},
"delay_seconds": {
"type": "string",
"description": "the number of seconds to delay"
},
},
"required": ["message", "delay_seconds"],
},
},
]
# Create IRC bot connection
server = 'irc.chat.twitch.tv'
port = 6667
print('Connecting to ' + server + ' on port ' + str(port) + '...')
app.append_to_log('Connecting to ' + server + ' on port ' + str(port) + '...')
irc.bot.SingleServerIRCBot.__init__(self, [(server, port, 'oauth:' + token)], username, username)
'''
def receive_twitch_events(self):
twitch_uri = "wss://eventsub.wss.twitch.tv/ws"
def on_message(ws, message):
data = json.loads(message)
print(data)
if data['metadata']['message_type'] == "PING":
# Respond with a pong
ws.send(json.dumps({"type": "PONG"}))
print('PONG')
elif data['metadata']['message_type'] == "session_welcome":
session_id = data['payload']['session']['id']
headers = {
'Authorization': 'Bearer ' + app.bot_token.get(),
'Client-Id': app.client_id.get(),
'Content-Type': 'application/json',
}
auth_params = {
'type': 'channel.channel_points_custom_reward_redemption.add',
'version': '1',
'condition': {"broadcaster_user_id": self.channel_id},
'transport': {"method": "websocket", "session_id": session_id},
}
response = requests.post('https://api.twitch.tv/helix/eventsub/subscriptions', json=auth_params, headers=headers)
print(response.json())
ws = websocket.WebSocketApp(twitch_uri, on_message=on_message)
ws.run_forever()
'''
def verify(self):
url = 'https://id.twitch.tv/oauth2/validate'
headers = {'Authorization': 'OAuth ' + app.bot_token.get()}
while True:
response = requests.get(url, headers=headers)
if response.status_code == 200:
break
elif response.status_code == 401:
app.refresh_login()
else:
# Handle other status codes if needed
return "Error fetching data: " + str(response.status_code)
# Now you can safely access the data from the response
try:
verification = response.json()
return verification
except KeyError:
return "Error parsing response data"
except IndexError:
return "Missing response data"
def get_channel_id(self, channel, **kwargs):
# Get the channel id, we will need this for v5 API calls
print('Called get_channel_id for ' + channel)
app.append_to_log('Called get_channel_id for ' + channel)
url = 'https://api.twitch.tv/helix/users?login=' + escape(channel)
headers = {
'Authorization': 'Bearer ' + app.bot_token.get(),
'Client-Id': app.client_id.get(),
'Content-Type': 'application/json',
}
while True:
response = requests.get(url, headers=headers)
if response.status_code == 200:
break
elif response.status_code == 401:
app.refresh_login()
else:
# Handle other status codes if needed
return "Error fetching data: " + str(response.status_code)
# Now you can safely access the data from the response
try:
channel_id = response.json()['data'][0]['id']
return channel_id
except KeyError:
return "Error parsing response data"
except IndexError:
return "Missing response data"
def get_game(self, channel, **kwargs):
print('Called get_game for ' + channel)
app.append_to_log('Called get_game for ' + channel)
# Get the current game
url = 'https://api.twitch.tv/helix/channels?broadcaster_id=' + escape(channel)
headers = {
'Authorization': 'Bearer ' + app.bot_token.get(),
'Client-Id': app.client_id.get(),
'Content-Type': 'application/json',
}
while True:
response = requests.get(url, headers=headers)
if response.status_code == 200:
break
elif response.status_code == 401:
app.refresh_login()
else:
# Handle other status codes if needed
return "Error fetching data: " + str(response.status_code) + " " + str(response.content)
# Now you can safely access the data from the response
try:
game_name = response.json()['data'][0]['game_name']
return game_name
except KeyError:
return "Error parsing response data"
except IndexError:
return "Missing response data"
def get_game_info(self, game, **kwargs):
print('Called get_game_info for ' + game)
app.append_to_log('Called get_game_info for ' + game)
url = 'https://api.igdb.com/v4/games'
headers = {
'Authorization': 'Bearer ' + self.client_credentials['access_token'],
'Client-Id': app.client_id.get(),
'Content-Type': 'application/json',
}
data = 'fields *; where name ~ "' + escape(game) + '";'
print(data)
response = requests.post(url, headers=headers, data=data)
print(response)
game_info = json.dumps(response.json())
print(game_info)
return game_info
def get_emotes(self, **kwargs):
# Get list of global emotes
print('Called get_emotes')
app.append_to_log('Called get_emotes')
url = 'https://api.twitch.tv/helix/chat/emotes/global'
headers = {
'Authorization': 'Bearer ' + app.bot_token.get(),
'Client-Id': app.client_id.get(),
'Content-Type': 'application/json',
}
while True:
response = requests.get(url, headers=headers)
if response.status_code == 200:
break
elif response.status_code == 401:
app.refresh_login()
else:
# Handle other status codes if needed
return "Error fetching data: " + str(response.status_code)
# Now you can safely access the data from the response
try:
emotes = []
for emote in response.json()['data']:
emotes.append(emote['name'])
return emotes
except KeyError:
return "Error parsing response data"
except IndexError:
return "Missing response data"
def get_stream(self, streamer, **kwargs):
print('Called get_stream for ' + streamer)
app.append_to_log('Called get_stream for ' + streamer)
if streamer == None:
streamer = self.channel[1:]
url = 'https://api.twitch.tv/helix/search/channels?query=' + escape(streamer) + '&first=1'
headers = {
'Authorization': 'Bearer ' + app.bot_token.get(),
'Client-Id': app.client_id.get(),
'Content-Type': 'application/json',
}
while True:
response = requests.get(url, headers=headers)
if response.status_code == 200:
break
elif response.status_code == 401:
app.refresh_login()
else:
# Handle other status codes if needed
return "Error fetching data: " + str(response.status_code)
# Now you can safely access the data from the response
try:
print(response.json())
stream = json.dumps(response.json()['data'][0])
return stream
except KeyError:
return "Error parsing response data"
except IndexError:
return "Missing response data"
def get_followage(self, user, **kwargs):
print('Called get_followage for ' + user)
app.append_to_log('Called get_followage for ' + user)
headers = {'Authorization': 'Bearer ' + app.bot_token.get(),
'Client-ID': app.client_id.get(),
'Content-Type': 'application/json'}
url = 'https://api.twitch.tv/helix/channels/followers?user_id=' + escape(self.get_channel_id(
user)) + '&broadcaster_id=' + escape(self.channel_id)
while True:
response = requests.get(url, headers=headers)
if response.status_code == 200:
break
elif response.status_code == 401:
app.refresh_login()
else:
# Handle other status codes if needed
return "Error fetching data: " + str(response.json()['message'])
# Now you can safely access the data from the response
try:
followage = response.json()['data'][0]['followed_at']
return followage
except KeyError:
return "Error parsing response data"
except IndexError:
return "Not Following"
def get_users(self, **kwargs):
print('Called get_users')
app.append_to_log('Called get_users')
self.connection.users()
if app.ignore_userlist.get() == 1:
return 'unknown'
else:
return str(app.user_list.get(0, tk.END))
def on_namreply(self, c, e):
for key in self.channels[self.channel].users():
if key not in self.users:
self.users.append(key)
for user in self.users:
if user not in str(app.user_list.get(0, tk.END)):
app.user_list.insert(tk.END, user)
app.user_count.config(text=app.user_list.size())
print(', '.join(map(str, self.users)))
def on_join(self, c, e):
user = e.source.nick
if user not in str(app.user_list.get(0, tk.END)):
self.users.append(user)
app.user_list.insert(tk.END, user)
app.user_count.config(text=app.user_list.size())
print(user + ' joined')
def on_part(self, c, e):
user = e.source.nick
if user in str(self.users):
self.users.remove(user)
for index in range(app.user_list.size(), -1, -1):
item_name = app.user_list.get(index)
if item_name == user:
app.user_list.delete(index)
app.user_count.config(text=app.user_list.size())
print(user + ' left')
def on_welcome(self, c, e):
print('Joining ' + self.channel)
app.append_to_log('Joining ' + self.channel)
self.connection = c
# You must request specific capabilities before you can use them
c.cap('REQ', ':twitch.tv/membership')
c.cap('REQ', ':twitch.tv/tags')
c.cap('REQ', ':twitch.tv/commands')
c.join(self.channel)
self.connection.users()
'''
thread = threading.Thread(target=self.receive_twitch_events)
thread.start()
thread.join()
'''
def get_launch(self, when, **kwargs):
print('Called get_launch on ' + when)
app.append_to_log('Called get_launch on ' + when)
if when == 'next':
url = 'https://ll.thespacedevs.com/2.2.0/launch/upcoming/?mode=list'
else:
url = 'https://ll.thespacedevs.com/2.2.0/launch/previous/?mode=list'
return json.dumps(requests.get(url).json()["results"][:2])
def get_pronouns(self, author, **kwargs):
print('Called get_pronouns for ' + author)
app.append_to_log('Called get_pronouns for ' + author)
# Check if pronouns exist in the cache
if author.lower() in self.pronoun_cache:
return self.pronoun_cache[author.lower()]
url = 'https://pronouns.alejo.io/api/users/' + escape(author.lower())
r = requests.get(url).json()
pronoun_mapping = {
'aeaer': 'Ae/Aer',
'any': 'Any',
'eem': 'E/Em',
'faefaer': 'Fae/Faer',
'hehim': 'He/Him',
'heshe': 'He/She',
'hethem': 'He/They',
'itits': 'It/Its',
'other': 'Other',
'perper': 'Per/Per',
'sheher': 'She/Her',
'shethey': 'She/They',
'theythem': 'They/Them',
'vever': 'Ve/Ver',
'xexem': 'Xe/Xem',
'ziehir': 'Zie/Hir'
}
pronouns = r[0]['pronoun_id'] if r else 'unknown'
pronoun = pronoun_mapping.get(pronouns, 'unknown')
print('Got ' + author + ' pronouns ' + pronoun)
app.append_to_log('Got ' + author + ' pronouns ' + pronoun)
self.pronoun_cache[author.lower()] = pronoun
return pronoun
def parse_string(self, input_string, author, user_message):
replacements = {
"<name>": self.username,
"<channel>": self.channel[1:],
"<game>": self.get_game(self.channel_id),
"<author>": author,
"<emotes>": ', '.join(map(str, self.emotes)),
"<UTC>": str(datetime.now(timezone.utc)),
"<time>": str(datetime.now()),
"<chatter_pronouns>": self.get_pronouns(author),
"<streamer_pronouns>": self.get_pronouns(self.channel[1:]),
"<users>": ', '.join(map(str, self.get_users()))
}
for placeholder, replacement in replacements.items():
input_string = input_string.replace(placeholder, replacement)
sentences = input_string.split('. ')
parsed_list = [{"role": "system", "content": sentence} for sentence in sentences]
for m in self.message_queue:
if m.split(': ')[0] == self.username.lower():
parsed_list.append({"role": "assistant", "content": m.split(': ')[1]})
elif m.split(': ')[1] != user_message:
parsed_list.append({"role": "system", "name": m.split(': ')[0], "content": m.split(': ')[1]})
if app.openai_api_model.get() == 'mpt-7b-chat' or app.openai_api_model.get() == 'WizardLM-13B':
return parsed_list
parsed_list.append({"role": "user", "name": author, "content": user_message})
return parsed_list
def send_message_delayed(self, message, delay_seconds, **kwargs):
print('Called send_message_delayed ' + message + ' in ' + delay_seconds + ' seconds')
app.append_to_log('Called send_message_delayed ' + message + ' in ' + delay_seconds + ' seconds')
def delayed_print():
seconds = int(delay_seconds)
while seconds > 0 and app.bot_running:
time.sleep(1)
seconds -= 1
if app.bot_running:
self.connection.privmsg(self.channel, message)
app.append_to_log(self.username + ': ' + message)
print(self.username + ': ' + message)
self.message_queue.append(self.username + ': ' + message)
thread = threading.Thread(target=delayed_print)
thread.start()
return 'Timer Set'
def on_disconnect(self, c, e):
self.message_queue.clear()
print('Disconnected')
app.append_to_log('Disconnected')
def on_ctcp(self, c, e):
nick = e.source.nick
if e.arguments[0] == "VERSION":
c.ctcp_reply(nick, "VERSION " + self.get_version())
elif e.arguments[0] == "PING":
if len(e.arguments) > 1:
c.ctcp_reply(nick, "PING " + e.arguments[1])
elif (
e.arguments[0] == "DCC"
and e.arguments[1].split(" ", 1)[0] == "CHAT"
):
self.on_dccchat(c, e)
message = e.arguments[1]
author = ''
for tag in e.tags:
if tag['key'] == 'display-name':
author = tag['value']
break
print(author + " " + message)
app.append_to_log((author + " " + message))
def on_pubmsg(self, c, e):
message = e.arguments[0]
author = ''
for tag in e.tags:
if tag['key'] == 'display-name':
author = tag['value']
break
print(author + ": " + message)
app.append_to_log(author + ": " + message)
self.message_queue.append(author + ": " + message)
self.last_message[author.lower()] = message
if author.lower() not in str(app.user_list.get(0, tk.END)):
self.users.append(author.lower())
app.user_list.insert(tk.END, author.lower())
app.user_count.config(text=app.user_list.size())
# If a chat message starts with an exclamation point, try to run it as a command
if e.arguments[0].startswith('!'):
cmd = e.arguments[0][1:].split()
if len(cmd) > 0:
print('Received command: ' + cmd[0])
app.append_to_log('Received command: ' + cmd[0])
self.do_command(e, cmd)
return
rand_chat = random.random()
if app.mute and rand_chat > float(app.frequency_slider.get()) / 100:
return
elif message.lower() == (self.username + " yes").lower() or message.lower() == \
('@' + self.username + " yes").lower():
c.privmsg(self.channel, ":)")
app.append_to_log(self.username + ': ' + ":)")
elif message.lower() == (self.username + " no").lower() or message.lower() == \
('@' + self.username + " no").lower():
c.privmsg(self.channel, ":(")
app.append_to_log(self.username + ': ' + ":(")
elif message.lower().startswith(("thanks " + self.username).lower()) or \
message.lower().startswith(("thanks @" + self.username).lower()):
c.privmsg(self.channel, "np")
app.append_to_log(self.username + ': ' + "np")
else:
if rand_chat <= float(app.frequency_slider.get()) / 100 or self.username.lower() in message.lower() or \
"@" + self.username.lower() in message.lower():
thread = threading.Thread(target=lambda: self.generate_response(author, message))
thread.start()
def generate_response(self, author, message):
self.input_text = app.input_text.get('1.0', 'end')
if app.openai_api_model.get() == 'mpt-7b-chat' or app.openai_api_model.get() == 'WizardLM-13B':
try:
if app.openai_api_model.get() == 'mpt-7b-chat':
gmll_model = 'ggml-mpt-7b-chat.bin'
else:
gmll_model = 'wizardlm-13b-v1.1-superhot-8k.ggmlv3.q4_0.bin'
with io.StringIO() as buffer, redirect_stdout(buffer):
self.model4a = gpt4all.GPT4All(model_name=gmll_model,
model_path=os.path.abspath('.'),
allow_download=False)
output = buffer.getvalue().strip()
app.append_to_log(output)
print(output)
with self.model4a.chat_session():
self.model4a.current_chat_session = self.parse_string(self.input_text, author, message)
response = self.model4a.generate(message, max_tokens=500, temp=0.7).encode('ascii',
'ignore').decode('ascii')
response = response.strip().replace('\r', ' ').replace('\n', ' ')
while response.startswith('.') or response.startswith('/'):
response = response[1:]
if response.lower().startswith(self.username.lower()):
response = response[len(self.username + ': '):]
while len(('PRIVMSG' + self.channel + " " + response + '\r\n').encode()) > 488:
response = response[:-1]
self.connection.privmsg(self.channel, response[:500])
app.append_to_log(self.username + ': ' + response[:500])
print(self.username + ': ' + response[:500])
self.message_queue.append(self.username + ': ' + response[:500])
except Exception as e:
print(str(e))
print(traceback.format_exc())
app.append_to_log(str(e))
app.append_to_log(traceback.format_exc())
else:
retry = 0
while retry < 3:
message_array = self.parse_string(self.input_text, author, message)
try:
response = openai.ChatCompletion.create(model=app.openai_api_model.get(),
messages=message_array,
functions=self.functions,
user=self.channel[1:]
)
response_message = response["choices"][0]["message"]
# Step 2: check if GPT wanted to call a function
if response_message.get("function_call"):
# Step 3: call the function
# Note: the JSON response may not always be valid; be sure to handle errors
available_functions = {
# "get_user_pronouns": self.get_pronouns,
"get_launch": self.get_launch,
"get_users": self.get_users,
"get_stream": self.get_stream,
"get_game_info": self.get_game_info,
"send_message_delayed": self.send_message_delayed,
} # only one function in this example, but you can have multiple
function_name = response_message["function_call"]["name"]
function_to_call = available_functions[function_name]
function_args = json.loads(response_message["function_call"]["arguments"])
function_response = function_to_call(
# author=function_args.get("user"),
when=function_args.get("when"),
streamer=function_args.get("streamer"),
game=function_args.get("game"),
message=function_args.get("message"),
delay_seconds=function_args.get("delay_seconds")
)
# Step 4: send the info on the function call and function response to GPT
message_array.append(response_message) # extend conversation with assistant's reply
# noinspection PyTypeChecker
message_array.append(
{
"role": "function",
"name": function_name,
"content": function_response,
}
) # extend conversation with function response
response = openai.ChatCompletion.create(
model=app.openai_api_model.get(),
messages=message_array,
) # get a new response from GPT where it can see the function response
if hasattr(response, 'choices'):
response.choices[0].message.content = \
response.choices[0].message.content.strip().replace('\r', ' ').replace('\n', ' ')
while response.choices[0].message.content.startswith('.') or \
response.choices[0].message.content.startswith('/'):
response.choices[0].message.content = response.choices[0].message.content[1:]
if response.choices[0].message.content.lower().startswith(self.username.lower()):
response.choices[0].message.content = response.choices[0].message.content[
len(self.username + ': '):]
while len(('PRIVMSG' + self.channel + " " + response.choices[
0].message.content + '\r\n').encode()) > 488:
response.choices[0].message.content = response.choices[0].message.content[:-1]
self.connection.privmsg(self.channel, response.choices[0].message.content[:500])
app.append_to_log(self.username + ': ' + response.choices[0].message.content[:500])
print(self.username + ': ' + response.choices[0].message.content[:500])
self.message_queue.append(self.username + ': ' + response.choices[0].message.content[:500])
break
else:
print(response)
app.append_to_log(response)
except Exception as e:
retry += 1
print(str(e))
print(traceback.format_exc())
app.append_to_log(str(e))
app.append_to_log(traceback.format_exc())
def do_command(self, e, cmd):
c = self.connection
if len(cmd) == 2:
if cmd[0] == self.username and cmd[1] == 'version':
c.privmsg(self.channel, get_version() + ' ' + app.openai_api_model.get())
app.append_to_log(self.username + ': ' + get_version() + ' ' + app.openai_api_model.get())
print(self.username + ': ' + get_version() + ' ' + app.openai_api_model.get())
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="pyWiki Lite")
parser.add_argument("--version", action="store_true", help="Show the version number")
args = parser.parse_args()
if args.version:
print(get_version())
sys.exit()
app = TwitchBotGUI()
app.mainloop()
| [
": "
] |
2024-01-10 | animaesh/Auto-GPT | autogpt~llm_utils.py | from ast import List
import time
from typing import Dict, Optional
import openai
from openai.error import APIError, RateLimitError
from colorama import Fore
from autogpt.config import Config
CFG = Config()
openai.api_key = CFG.openai_api_key
def call_ai_function(
function: str, args: List, description: str, model: Optional[str] = None
) -> str:
"""Call an AI function
This is a magic function that can do anything with no-code. See
https://github.com/Torantulino/AI-Functions for more info.
Args:
function (str): The function to call
args (list): The arguments to pass to the function
description (str): The description of the function
model (str, optional): The model to use. Defaults to None.
Returns:
str: The response from the function
"""
if model is None:
model = CFG.smart_llm_model
# For each arg, if any are None, convert to "None":
args = [str(arg) if arg is not None else "None" for arg in args]
# parse args to comma separated string
args = ", ".join(args)
messages = [
{
"role": "system",
"content": f"You are now the following python function: ```# {description}"
f"\n{function}```\n\nOnly respond with your `return` value.",
},
{"role": "user", "content": args},
]
return create_chat_completion(model=model, messages=messages, temperature=0)
# Overly simple abstraction until we create something better
# simple retry mechanism when getting a rate error or a bad gateway
def create_chat_completion(
messages: List, # type: ignore
model: Optional[str] = None,
temperature: float = CFG.temperature,
max_tokens: Optional[int] = None,
) -> str:
"""Create a chat completion using the OpenAI API
Args:
messages (list[dict[str, str]]): The messages to send to the chat completion
model (str, optional): The model to use. Defaults to None.
temperature (float, optional): The temperature to use. Defaults to 0.9.
max_tokens (int, optional): The max tokens to use. Defaults to None.
Returns:
str: The response from the chat completion
"""
response = None
num_retries = 10
if CFG.debug_mode:
print(
Fore.GREEN
+ f"Creating chat completion with model {model}, temperature {temperature},"
f" max_tokens {max_tokens}" + Fore.RESET
)
for attempt in range(num_retries):
backoff = 2 ** (attempt + 2)
try:
if CFG.use_azure:
response = openai.ChatCompletion.create(
deployment_id=CFG.get_azure_deployment_id_for_model(model),
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
)
else:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
)
break
except RateLimitError:
pass
except APIError as e:
if e.http_status == 502:
pass
else:
raise
if attempt == num_retries - 1:
raise
if CFG.debug_mode:
print(
Fore.RED + "Error: ",
f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET,
)
time.sleep(backoff)
if response is None:
raise RuntimeError(f"Failed to get response after {num_retries} retries")
return response.choices[0].message["content"]
| [
"You are now the following python function: ```# PLACEHOLDER\nPLACEHOLDER```\n\nOnly respond with your `return` value."
] |
2024-01-10 | DanielLongo/LLM-ABM | games~DissentGame.py | from typing import List, Dict, Callable
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
HumanMessage,
SystemMessage,
)
import os
import json
import sys
sys.path.append("..")
from BetterAgents import DialogueAgent, DialogueSimulator
num_players = 3
max_iters = 16
word_limit = 50
dialogue_temeparture = .3
player_names = ["Sam", "Ryan", "Alex"]
debate_mode = "devils_advocate"
dilemia = """
The case involved a personal injury
sustained by a washing machine repairman in the course of his duties.
This individual, Mr. Smith, had already been paid for hospitalization
charges and for wage loss during the period of recuperation.
However, he was suing for $25,000.00, the maximum allowable on
his particular insurance policy, for "the past and present pain and
suffering and the worry and grief which he has undergone as a result
of this accident". The facts of the case, as presented, showed that
there was some question as to whether the torn cartilege in his knee
would ever completely heal; and a doctor stated that there was
always the possibility that the knee would slip out of joint if sudden
pressure was applied. Further, Mr. Smith's sole recreation was
bowling and there was question as to whether he would be able to
resume this recreation. The jurors were instructed to consider, "in
reaching the amount of your verdict, how much you should award in
order to fairly and justly compensate the plaintiff for the pain and
suffering, worry and grief, and humiliation that has been endured
and will hereafter be suffered." After reading the case study, subjects
wrote down their initial opinion on the compensation that should be
given to Mr. Smith
"""
# from https://www.jstor.org/stable/pdf/2786425.pdf?refreqid=excelsior%3A260fa7cb71a02145bfda72d1b534e84f&ab_segments=&origin=&initiator=&acceptTC=1
game_description = f"""You are a juror in a civil case. The case is as follows: {dilemia}."""
player_descriptor_system_message = SystemMessage(
content="You can add detail to the description of an individual in a negotiation. All individuals are college aged students."
)
def load_player_desciptions():
# check if file exists first
if os.path.isfile('player_descriptions.json'):
return json.load(open('player_descriptions.json'))
player_specifier_prompts = []
for i in range(num_players):
player_specifier_prompts.append([
player_descriptor_system_message,
HumanMessage(content=
f"""{game_description}
Please reply with a creative description of the juror, {player_names[i]}, in {word_limit} words or less.
Speak directly to {player_names[i]}.
Do not add anything else."""
)
])
player_descriptions = [ChatOpenAI(temperature=1.0)(player_specifier_prompts[i]).content for i in range(num_players)]
json.dump(player_descriptions, open('player_descriptions.json', 'w'))
return player_descriptions
player_descriptions = load_player_desciptions()
print("*" * 25, "PARTICIPANTS", "*" * 25)
for i in range(num_players):
print(f'{player_names[i]} Description:')
print(player_descriptions[i])
def load_player_system_message_texts(mode="normal"):
print(f"Loading player system message texts... {mode}")
if mode == "normal":
prompt = f"""{game_description}
Never forget you are PLAYER_NAME.
Your are a juror in a cival trial with description as follows: PLAYER_DESCRIPTION.
Speak in the first person from the perspective of PLAYER_NAME.
If the whole group jas reached a decision, type 'we have reached a decision' followed by the resolution.
Do not change roles!
"""
odd_prompt = prompt
if mode == "authentic_dissent":
prompt = f"""{game_description}
Never forget you are PLAYER_NAME.
Your are a juror in a cival trial with description as follows: PLAYER_DESCRIPTION.
Speak in the first person from the perspective of PLAYER_NAME.
If the whole group jas reached a decision, type 'we have reached a decision' followed by the resolution.
Do not change roles!
"""
odd_prompt = f"""{game_description}
Never forget you are PLAYER_NAME.
Your are a juror in a cival trial with description as follows: PLAYER_DESCRIPTION.
Speak in the first person from the perspective of PLAYER_NAME.
You are not to immediately agree with other members in the group.
Only side with them if you are convinced.
If the whole group jas reached a decision, type 'we have reached a decision' followed by the resolution.
Do not change roles!
"""
if mode == "devils_advocate":
prompt = f"""{game_description}
Never forget you are PLAYER_NAME.
Your are a juror in a cival trial with description as follows: PLAYER_DESCRIPTION.
Speak in the first person from the perspective of PLAYER_NAME.
There is one person in the group named {player_names[-1]} who is a devil's advocate and will always disagree with you with hopes of creating a more lively discussion.
If the whole group jas reached a decision, type 'we have reached a decision' followed by the resolution.
Do not change roles!
"""
odd_prompt = f"""{game_description}
Never forget you are PLAYER_NAME.
Your are a juror in a cival trial with description as follows: PLAYER_DESCRIPTION.
Speak in the first person from the perspective of PLAYER_NAME.
You are to play the role of devil's advocate and should disagree with you with hopes of creating a more lively discussion.
The other jurors are aware of this role of yours.
If the whole group jas reached a decision, type 'we have reached a decision' followed by the resolution.
Do not change roles!
"""
player_system_message_texts = []
for i in range(num_players - 1):
player_system_message_texts.append(
SystemMessage(content=prompt.replace("PLAYER_NAME", player_names[i]).replace("PLAYER_DESCRIPTION", player_descriptions[i]))
)
player_system_message_texts.append(SystemMessage(content=odd_prompt))
return player_system_message_texts
player_system_messages = load_player_system_message_texts(mode=debate_mode)
agents = [DialogueAgent(name=player_names[i], system_message=player_system_messages[i], model=ChatOpenAI(temperature=dialogue_temeparture)) for i in range(num_players)]
def select_next_speaker(step: int, agents: List[DialogueAgent]) -> int:
return step % len(agents)
simulator = DialogueSimulator(agents=agents, selection_function=select_next_speaker)
simulator.reset()
simulator.inject(agents[0], "Let us begin deliberating the case: {dilemia}.")
print("*" * 25, "BEGINNING GAME", "*" * 25)
for i in range(max_iters):
name, message = simulator.step()
print(f'{name}: {message}')
print('\n')
if "we have reached a decision" in message.lower():
break
| [
"PLAYER_DESCRIPTION",
"PLACEHOLDER\n Never forget you are PLAYER_NAME.\n Your are a juror in a cival trial with description as follows: PLAYER_DESCRIPTION.\n Speak in the first person from the perspective of PLAYER_NAME.\n If the whole group jas reached a decision, type 'we have reached a decision' followed by the resolution.\n Do not change roles!\n ",
"PLAYER_NAME",
"You can add detail to the description of an individual in a negotiation. All individuals are college aged students.",
"PLACEHOLDER\n Never forget you are PLAYER_NAME.\n Your are a juror in a cival trial with description as follows: PLAYER_DESCRIPTION.\n Speak in the first person from the perspective of PLAYER_NAME.\n You are to play the role of devil's advocate and should disagree with you with hopes of creating a more lively discussion.\n The other jurors are aware of this role of yours.\n If the whole group jas reached a decision, type 'we have reached a decision' followed by the resolution.\n Do not change roles!\n ",
"PLACEHOLDER\n Never forget you are PLAYER_NAME.\n Your are a juror in a cival trial with description as follows: PLAYER_DESCRIPTION.\n Speak in the first person from the perspective of PLAYER_NAME.\n You are not to immediately agree with other members in the group.\n Only side with them if you are convinced.\n If the whole group jas reached a decision, type 'we have reached a decision' followed by the resolution.\n Do not change roles!\n ",
"[]",
"PLACEHOLDER\n Never forget you are PLAYER_NAME.\n Your are a juror in a cival trial with description as follows: PLAYER_DESCRIPTION.\n Speak in the first person from the perspective of PLAYER_NAME.\n There is one person in the group named PLACEHOLDER who is a devil's advocate and will always disagree with you with hopes of creating a more lively discussion.\n If the whole group jas reached a decision, type 'we have reached a decision' followed by the resolution.\n Do not change roles!\n "
] |
2024-01-10 | DanielLongo/LLM-ABM | BetterAgents~DialogueSimulator.py | from typing import List, Dict, Callable
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
HumanMessage,
SystemMessage,
)
from BetterAgents.DialogueAgent import DialogueAgent
class DialogueSimulator:
def __init__(
self,
agents: List[DialogueAgent],
selection_function: Callable[[int, List[DialogueAgent]], int],
) -> None:
self.agents = agents
self._step = 0
self.select_next_speaker = selection_function
def reset(self):
for agent in self.agents:
agent.reset()
def inject(self, name: str, message: str):
"""
Initiates the conversation with a {message} from {name}
"""
for agent in self.agents:
agent.receive(name, message)
# increment time
self._step += 1
def step(self) -> (str, str):
# 1. choose the next speaker
speaker_idx = self.select_next_speaker(self._step, self.agents)
speaker = self.agents[speaker_idx]
# 2. next speaker sends message
message = speaker.send()
# 3. everyone receives message
for receiver in self.agents:
receiver.receive(speaker.name, message)
# 4. increment time
self._step += 1
return speaker.name, message | [] |
2024-01-10 | DanielLongo/LLM-ABM | BetterAgents~DialogueAgent.py | import os
from dotenv import load_dotenv
load_dotenv()
from typing import List, Dict, Callable
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
HumanMessage,
SystemMessage,
)
class DialogueAgent:
def __init__(
self,
name: str,
system_message: SystemMessage,
model: ChatOpenAI,
) -> None:
self.name = name
self.system_message = system_message
self.model = model
self.prefix = f"{self.name}: "
self.reset()
def reset(self):
self.message_history = ["Here is the conversation so far."]
def send(self) -> str:
"""
Applies the chatmodel to the message history
and returns the message string
"""
message = self.model(
[
self.system_message,
HumanMessage(content="\n".join(self.message_history + [self.prefix])),
]
)
return message.content
def receive(self, name: str, message: str) -> None:
"""
Concatenates {message} spoken by {name} into message history
"""
self.message_history.append(f"{name}: {message}")
# from typing import List, Dict, Callable
# from langchain.chat_models import ChatOpenAI
# from langchain.schema import (
# HumanMessage,
# SystemMessage,
# )
# class Agent():
# def __init__(self, name, system_message: SystemMessage, ) -> None:
# self.name = name
# self.system_message = system_message
# self.chat_model = ChatOpenAI(openai_api_key=os.getenv("OPENAI_API_KEY"))
# self.prefix = f"{self.name}: "
# self.reset()
# def reset(self) -> None:
# self.message_history = ["Here is the conversation so far."]
# def send(self) -> str:
# """
# Applies the chatmodel to the message history
# and returns the message string
# """
# message = self.model(
# [
# self.system_message,
# HumanMessage(content="\n".join(self.message_history + [self.prefix])),
# ]
# )
# return message.content
# def receive(self, name: str, message: str) -> None:
# """
# Concatenates {message} spoken by {name} into message history
# """
# self.message_history.append(f"{name}: {message}")
# if __name__ == "__main__":
# a = Agent("test", "hello")
# print(a.message_history)
| [
"\n"
] |
2024-01-10 | DanielLongo/LLM-ABM | games~NPlayerAgreement.py | from typing import List, Dict, Callable
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
HumanMessage,
SystemMessage,
)
import sys
sys.path.append("..")
from BetterAgents import DialogueAgent, DialogueSimulator
import textwrap
TEMP = .8
num_players = 3
max_iters = 30
word_limit = 30
player_names = ["Alice", "Bob", "Charlie", "David", "Eve", "Frank", "Grace", "Heidi", "Ivan", "Judy", "Kevin", "Larry", "Mallory", "Nancy", "Olivia", "Peggy", "Quentin", "Rupert", "Sybil", "Trent", "Ursula", "Victor", "Walter", "Xavier", "Yvonne", "Zelda"]
# dilemia = "Should we conserve more forest land?"
dilemia = "Should you vaccinated your child for Measles?"
# dilema = "should abortion be legal?"
game_description = f"""Here is the topic for a negotation between {num_players} people. The dilemma is: {dilemia}."""
player_descriptor_system_message = SystemMessage(
content="You can add detail to the description of an individual in a negotiation."
)
player_specifier_prompts = []
for i in range(num_players):
player_specifier_prompts.append([
player_descriptor_system_message,
HumanMessage(content=
f"""{game_description}
Please reply with a creative description of the player, {player_names[i]}, in {word_limit} words or less.
Speak directly to {player_names[i]}.
Do not add anything else."""
)
])
player_descriptions = [ChatOpenAI(temperature=1.0)(player_specifier_prompts[i]).content for i in range(num_players)]
for i in range(num_players):
print(f'{player_names[i]} Description:')
print(player_descriptions[i])
player_system_messages = []
for i in range(num_players):
player_system_messages.append(SystemMessage(content=(
f"""{game_description}
Never forget you are {player_names[i]}.
Your character description is as follows: {player_descriptions[i]}.
Speak in the first person from the perspective of {player_names[i]}.
Do not change roles!
Be consice and to the point.
Be convincing.
Be opinionated.
Be combative. Adress the other arguments head on.
Do not be repetitive.
Do not concede your argument.
Do not be passive.
Use facts. Be
specific and percise.
Use statistics. Reference current events.
Be creative with your arguments. Make sure to address the other players' arguments.
If the whole have reached a decision, type 'we have reached a decision' followed by the resolution.
""")))
agents = [DialogueAgent(name=player_names[i], system_message=player_system_messages[i], model=ChatOpenAI(temperature=TEMP)) for i in range(num_players)]
def select_next_speaker(step: int, agents: List[DialogueAgent]) -> int:
return step % len(agents)
simulator = DialogueSimulator(agents=agents, selection_function=select_next_speaker)
simulator.reset()
simulator.inject(agents[0], "Let us begin discussing the dilemma: {dilemia}.")
transcript = ""
num_exchanges = 0
for i in range(max_iters):
num_exchanges += 1
name, message = simulator.step()
print(f'{name}: {message}')
transcript += f'{name}: {message}\n\n'
print('\n')
if "we have reached a decision" in message.lower():
break
conversation_length = len(transcript.split(' '))
print("*" * (25 + 9), "END OF GAME", "*" * (25 + 9))
print("Final Decision:")
print(message)
print('\n')
# ask post experiment questions to all players
print("*" * 25, "POST EXPERIMENT QUESTIONNAIRE", "*" * 25)
post_experiment_questions = ["how do you feel the discourse went 1-10? respond in json format with element 'discourse_rating'. nothing more."]
post_experiment_questions += [f"Following your interactions with {player_names[i]}, rate them 1-10 on likability? respond in json format with element '{player_names[i]}_rating'. nothing more." for i in range(num_players)]
def post_experiment_questionnaire(agent, questions):
responses = []
for question in questions:
responses.append(
ChatOpenAI(temperature=.5)([
agent.system_message,
HumanMessage(content=question)
]).content
)
return responses
player_responses = {}
for i in range(num_players):
print(f"{player_names[i]}")
player_responses[player_names[i]] = post_experiment_questionnaire(agents[i], post_experiment_questions)
newline = '\n'
res = f"""
{'#'* 31} Agent Definitions {'#'* 31}\n
{newline.join([player_descriptions[i] for i in range(num_players)])}
\n
{'#'* (25 + 7)} Agent Dialogue {'#'* (25 + 8)}
{transcript}
\n
{'#'* 25} Post Experiment Questionnaire {'#'* 25}
{';'.join([f'{player_names[i]}: {player_responses[player_names[i]]}' for i in range(num_players)])}
\n
{'#'* 37} Stats {'#'* 37}
Number of Exchanges: {num_exchanges}\n
Number of Words: {conversation_length}
"""
print(res)
with open(f"results/{num_players}{str(hash(res))[:10]}.txt", "w") as f:
f.write(res)
| [
"You can add detail to the description of an individual in a negotiation.",
"[]"
] |
2024-01-10 | DanielLongo/LLM-ABM | games~NPlayerAgreementPolarizing.py | import random
from typing import List, Dict, Callable
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
HumanMessage,
SystemMessage,
)
import sys
sys.path.append("..")
from BetterAgents import DialogueAgent, DialogueSimulator
import textwrap
TEMP = .6
num_players = 3
max_iters = 20
word_limit = 15
player_names = ["Alice", "Bob", "Charlie", "David", "Eve", "Frank", "Grace", "Heidi", "Ivan", "Judy", "Kevin", "Larry", "Mallory", "Nancy", "Olivia", "Peggy", "Quentin", "Rupert", "Sybil", "Trent", "Ursula", "Victor", "Walter", "Xavier", "Yvonne", "Zelda"]
# dilemia = "Should we conserve more forest land?"
# dilemia = "Should you vaccinated your child for Measles?"
# dilemia = "should abortion be legal?"
dilemia = "Should we raise the capital gains tax?"
def main():
game_description = f"""Here is the topic for a negotation between {num_players} people. The dilemma is: {dilemia}."""
player_descriptor_system_message = SystemMessage(
content="You can add detail to the description of an individual in a negotiation."
)
player_specifier_prompts = []
for i in range(num_players):
player_specifier_prompts.append([
player_descriptor_system_message,
HumanMessage(content=
f"""{game_description}
Please reply with a creative description of the player, {player_names[i]}, in {word_limit} words or less.
Speak directly to {player_names[i]}.
Do not add anything else."""
)
])
player_descriptions = [ChatOpenAI(temperature=1.0)(player_specifier_prompts[i]).content for i in range(num_players)]
for i in range(num_players):
print(f'{player_names[i]} Description:')
print(player_descriptions[i])
player_system_messages = []
for i in range(num_players):
player_system_messages.append(SystemMessage(content=(
f"""{game_description}
Never forget you are {player_names[i]}.
Your character description is as follows: {player_descriptions[i]}.
Speak in the first person from the perspective of {player_names[i]}.
Do not change roles!
Be consice and to the point.
Be convincing.
Be opinionated.
Be combative. Adress the other arguments head on.
Do not be repetitive.
Do not concede your argument.
Do not be passive.
Use facts. Be
specific and percise.
Use statistics. Reference current events.
Be creative with your arguments. Make sure to address the other players' arguments.
If the whole have reached a decision, type 'we have reached a decision' followed by the resolution.
""")))
agents = [DialogueAgent(name=player_names[i], system_message=player_system_messages[i], model=ChatOpenAI(temperature=TEMP)) for i in range(num_players)]
def select_next_speaker(step: int, agents: List[DialogueAgent]) -> int:
return step % len(agents)
simulator = DialogueSimulator(agents=agents, selection_function=select_next_speaker)
simulator.reset()
simulator.inject(agents[0], "Let us begin discussing the dilemma: {dilemia}.")
transcript = ""
num_exchanges = 0
for i in range(max_iters):
num_exchanges += 1
name, message = simulator.step()
print(f'{name}: {message}')
transcript += f'{name}: {message}\n\n'
print('\n')
if "we have reached a decision" in message.lower():
break
conversation_length = len(transcript.split(' '))
print("*" * (25 + 9), "END OF GAME", "*" * (25 + 9))
print("Final Decision:")
print(message)
print('\n')
# ask post experiment questions to all players
print("*" * 25, "POST EXPERIMENT QUESTIONNAIRE", "*" * 25)
post_experiment_questions = ["how do you feel the discourse went 1-10? respond in json format with element 'discourse_rating'. nothing more."]
post_experiment_questions += [f"Following your interactions with {player_names[i]}, rate them 1-10 on likability? respond in json format with element '{player_names[i]}_rating'. nothing more." for i in range(num_players)]
def post_experiment_questionnaire(agent, questions):
responses = []
for question in questions:
responses.append(
ChatOpenAI(temperature=.5)([
agent.system_message,
HumanMessage(content=question)
]).content
)
return responses
player_responses = {}
for i in range(num_players):
print(f"{player_names[i]}")
player_responses[player_names[i]] = post_experiment_questionnaire(agents[i], post_experiment_questions)
newline = '\n'
res = f"""
{'#'* 31} Agent Definitions {'#'* 31}\n
{newline.join([player_descriptions[i] for i in range(num_players)])}
\n
{'#'* (25 + 7)} Agent Dialogue {'#'* (25 + 8)}
{transcript}
\n
{'#'* 25} Post Experiment Questionnaire {'#'* 25}
{';'.join([f'{player_names[i]}: {player_responses[player_names[i]]}' for i in range(num_players)])}
\n
{'#'* 37} Stats {'#'* 37}
Number of Exchanges: {num_exchanges}\n
Number of Words: {conversation_length}
"""
print(res)
filename = f"polarizingDissent/{num_players}-{dilemia.replace('?', '').split(' ')[-1]}-{str(random.randint(1000,9999))[:10]}.txt"
print(filename)
with open(filename, "w") as f:
f.write(res)
for i in range(10):
try:
main()
except Exception as e:
print("error", e)
continue
# understanding agents for polarization | [
"You can add detail to the description of an individual in a negotiation.",
"[]"
] |
2024-01-10 | DanielLongo/LLM-ABM | games~NPlayerAgreementDissent.py | import random
from typing import List, Dict, Callable
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
HumanMessage,
SystemMessage,
)
import sys
sys.path.append("..")
from BetterAgents import DialogueAgent, DialogueSimulator
import textwrap
TEMP = .6
num_players = 3
max_iters = 30
word_limit = 15
player_names = ["Alice", "Bob", "Charlie", "David", "Eve", "Frank", "Grace", "Heidi", "Ivan", "Judy", "Kevin", "Larry", "Mallory", "Nancy", "Olivia", "Peggy", "Quentin", "Rupert", "Sybil", "Trent", "Ursula", "Victor", "Walter", "Xavier", "Yvonne", "Zelda"]
# dilemia = "Should we conserve more forest land?"
dilemia = "Should you vaccinated your child for Measles?"
# dilema = "should abortion be legal?"
def main():
game_description = f"""Here is the topic for a negotation between {num_players} people. The dilemma is: {dilemia}."""
player_descriptor_system_message = SystemMessage(
content="You can add detail to the description of an individual in a negotiation. this desciption should be pretty general."
)
player_specifier_prompts = []
for i in range(num_players):
player_specifier_prompts.append([
player_descriptor_system_message,
HumanMessage(content=
f"""{game_description}
Please reply with a creative description of the player, {player_names[i]}, in {word_limit} words or less.
Speak directly to {player_names[i]}.
Do not add anything else."""
)
])
player_descriptions = [ChatOpenAI(temperature=1.0)(player_specifier_prompts[i]).content for i in range(num_players)]
for i in range(num_players):
print(f'{player_names[i]} Description:')
print(player_descriptions[i])
player_system_messages = []
for i in range(num_players):
if i == -1:
player_system_messages.append(SystemMessage(content=(
f"""{game_description}
Never forget you are {player_names[i]}.
Your character description is as follows: {player_descriptions[i]}.
Speak in the first person from the perspective of {player_names[i]}.
Do not change roles!
Be consice and to the point.
Be convincing.
Be opinionated.
Be combative. Adress the other arguments head on.
Do not be repetitive.
Do not concede your argument.
Do not be passive.
Use facts. Be
specific and percise.
Use statistics. Reference current events.
Be creative with your arguments. Make sure to address the other players' arguments.
If the whole have reached a decision, type 'we have reached a decision' followed by the resolution.
You are to play the role of devil's advocate!
Do not agree with the other players.
Do not concede your argument.
Cause conflict.
""")))
else:
player_system_messages.append(SystemMessage(content=(
f"""{game_description}
Never forget you are {player_names[i]}.
Your character description is as follows: {player_descriptions[i]}.
Speak in the first person from the perspective of {player_names[i]}.
Do not change roles!
Be consice and to the point.
Be convincing.
Be opinionated.
Be combative. Adress the other arguments head on.
Do not be repetitive.
Do not concede your argument.
Do not be passive.
Use facts. Be
specific and percise.
Use statistics. Reference current events.
Be creative with your arguments. Make sure to address the other players' arguments.
If the whole have reached a decision, type 'we have reached a decision' followed by the resolution.
""")))
agents = [DialogueAgent(name=player_names[i], system_message=player_system_messages[i], model=ChatOpenAI(temperature=TEMP)) for i in range(num_players)]
def select_next_speaker(step: int, agents: List[DialogueAgent]) -> int:
return step % len(agents)
simulator = DialogueSimulator(agents=agents, selection_function=select_next_speaker)
simulator.reset()
simulator.inject(agents[0], "Let us begin discussing the dilemma: {dilemia}.")
transcript = ""
num_exchanges = 0
for i in range(max_iters):
num_exchanges += 1
name, message = simulator.step()
print(f'{name}: {message}')
transcript += f'{name}: {message}\n\n'
print('\n')
if "we have reached a decision" in message.lower():
break
conversation_length = len(transcript.split(' '))
print("*" * (25 + 9), "END OF GAME", "*" * (25 + 9))
print("Final Decision:")
print(message)
print('\n')
# ask post experiment questions to all players
print("*" * 25, "POST EXPERIMENT QUESTIONNAIRE", "*" * 25)
post_experiment_questions = ["how do you feel the discourse went 1-10? respond in json format with element 'discourse_rating'. nothing more."]
post_experiment_questions += [f"Following your interactions with {player_names[i]}, rate them 1-10 on likability? respond in json format with element '{player_names[i]}_rating'. nothing more." for i in range(num_players)]
def post_experiment_questionnaire(agent, questions):
responses = []
for question in questions:
responses.append(
ChatOpenAI(temperature=.5)([
agent.system_message,
HumanMessage(content=question)
]).content
)
return responses
player_responses = {}
for i in range(num_players):
print(f"{player_names[i]}")
player_responses[player_names[i]] = post_experiment_questionnaire(agents[i], post_experiment_questions)
newline = '\n'
res = f"""
{'#'* 31} Agent Definitions {'#'* 31}\n
{newline.join([player_descriptions[i] for i in range(num_players)])}
\n
{'#'* (25 + 7)} Agent Dialogue {'#'* (25 + 8)}
{transcript}
\n
{'#'* 25} Post Experiment Questionnaire {'#'* 25}
{';'.join([f'{player_names[i]}: {player_responses[player_names[i]]}' for i in range(num_players)])}
\n
{'#'* 37} Stats {'#'* 37}
Number of Exchanges: {num_exchanges}\n
Number of Words: {conversation_length}
"""
print(res)
with open(f"devilsAdvocate/without-{num_players}{str(random.randint(1000,9999))[:10]}.txt", "w") as f:
f.write(res)
# devil's advocate
for i in range(10):
try:
main()
except Exception as e:
print(e)
continue | [
"You can add detail to the description of an individual in a negotiation. this desciption should be pretty general.",
"[]"
] |
2024-01-10 | DanielLongo/LLM-ABM | games~2PlayerNegotiation.py | from typing import List, Dict, Callable
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
HumanMessage,
SystemMessage,
)
import sys
sys.path.append("..")
from BetterAgents import DialogueAgent, DialogueSimulator
buyer_name = "Bob"
seller_name = "George"
quest = "Find a fair price for a used car."
word_limit = 50 # word limit for task brainstorming
game_description = f"""Here is the topic for a negotation between two parties: {quest}.
One individual is looking to buy the car: the buyer, {buyer_name}.
One individual is looking to sell the car: the salenames, {seller_name}."""
player_descriptor_system_message = SystemMessage(
content="You can add detail to the description of an individual in a negotiation.")
buyer_specifier_prompt = [
player_descriptor_system_message,
HumanMessage(content=
f"""{game_description}
Please reply with a creative description of the buyer, {buyer_name}, in {word_limit} words or less.
Speak directly to {buyer_name}.
Do not add anything else."""
)
]
buyer_description = ChatOpenAI(temperature=1.0)(buyer_specifier_prompt).content
seller_specifier_prompt = [
player_descriptor_system_message,
HumanMessage(content=
f"""{game_description}
Please reply with a creative description of the seller, {seller_name}, in {word_limit} words or less.
Speak directly to {seller_name}.
Do not add anything else."""
)
]
seller_description = ChatOpenAI(temperature=1.0)(seller_specifier_prompt).content
print('Buyer Description:')
print(buyer_description)
print('Seller Description:')
print(seller_description)
car_description = "The car is a 2012 Lexus LS 460 AWD with 27,000 miles."
buyer_system_message = SystemMessage(content=(
f"""{game_description}
Never forget you are the buyer, {buyer_name}, and I am the seller, {seller_name}.
Your character description is as follows: {buyer_description}.
You will propose deals and offers and are trying to buy the car for as cheap as possible. {car_description}
Speak in the first person from the perspective of {buyer_name}.
For describing your own body movements, wrap your description in '*'.
Do not change roles!
Do not speak from the perspective of {seller_name}.
Do not add anything else.
Remember you are the buyer, {buyer_name}.
Stop speaking the moment you finish speaking from your perspective.
"""
))
seller_system_message = SystemMessage(content=(
f"""{game_description}
Never forget you are the seller, {seller_name}, and I am the buyer, {buyer_name}.
Your character description is as follows: {seller_description}.
You will propose deals and offers and are trying to sell the car to me for as much as possible (be super aggressive with the buyer). {car_description}
Speak in the first person from the perspective of {seller_name}.
Do not change roles!
Do not speak from the perspective of {buyer_name}.
Do not add anything else.
Remember you are the seller, {seller_name}.
Stop speaking the moment you finish speaking from your perspective.
"""
))
quest_specifier_prompt = [
SystemMessage(content="You can make a task more specific."),
HumanMessage(content=
f"""{game_description}
You are the seller, {seller_name}.
Please reply with the specified response in {word_limit} words or less.
Speak directly to the protagonist {buyer_name}.
Do not add anything else."""
)
]
specified_quest = ChatOpenAI(temperature=1.0)(quest_specifier_prompt).content
print(f"Original response:\n{quest}\n")
print(f"Detailed response:\n{specified_quest}\n")
buyer = DialogueAgent(name=buyer_name,
system_message=buyer_system_message,
model=ChatOpenAI(temperature=0.2))
seller = DialogueAgent(name=seller_name,
system_message=seller_system_message,
model=ChatOpenAI(temperature=0.2))
def select_next_speaker(step: int, agents: List[DialogueAgent]) -> int:
idx = step % len(agents)
return idx
max_iters = 6
n = 0
simulator = DialogueSimulator(
agents=[seller, buyer],
selection_function=select_next_speaker
)
simulator.reset()
simulator.inject(seller_name, specified_quest)
print(f"({seller_name}): {specified_quest}")
print('\n')
while n < max_iters:
name, message = simulator.step()
print(f"({name}): {message}")
print('\n')
n += 1 | [
"You can add detail to the description of an individual in a negotiation.",
"PLACEHOLDER\n Please reply with a creative description of the buyer, PLACEHOLDER, in PLACEHOLDER words or less. \n Speak directly to PLACEHOLDER.\n Do not add anything else.",
"PLACEHOLDER\nNever forget you are the buyer, PLACEHOLDER, and I am the seller, PLACEHOLDER. \nYour character description is as follows: PLACEHOLDER.\nYou will propose deals and offers and are trying to buy the car for as cheap as possible. PLACEHOLDER\nSpeak in the first person from the perspective of PLACEHOLDER.\nFor describing your own body movements, wrap your description in '*'.\nDo not change roles!\nDo not speak from the perspective of PLACEHOLDER.\nDo not add anything else.\nRemember you are the buyer, PLACEHOLDER.\nStop speaking the moment you finish speaking from your perspective.\n",
"PLACEHOLDER\nNever forget you are the seller, PLACEHOLDER, and I am the buyer, PLACEHOLDER. \nYour character description is as follows: PLACEHOLDER.\nYou will propose deals and offers and are trying to sell the car to me for as much as possible (be super aggressive with the buyer). PLACEHOLDER\nSpeak in the first person from the perspective of PLACEHOLDER.\nDo not change roles!\nDo not speak from the perspective of PLACEHOLDER.\nDo not add anything else.\nRemember you are the seller, PLACEHOLDER.\nStop speaking the moment you finish speaking from your perspective.\n",
"PLACEHOLDER\n \n You are the seller, PLACEHOLDER.\n Please reply with the specified response in PLACEHOLDER words or less. \n Speak directly to the protagonist PLACEHOLDER.\n Do not add anything else.",
"You can make a task more specific.",
"PLACEHOLDER\n Please reply with a creative description of the seller, PLACEHOLDER, in PLACEHOLDER words or less. \n Speak directly to PLACEHOLDER.\n Do not add anything else."
] |
2024-01-10 | DanielLongo/LLM-ABM | games~NPlayerAgreementDissentWith.py | import random
from typing import List, Dict, Callable
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
HumanMessage,
SystemMessage,
)
import sys
sys.path.append("..")
from BetterAgents import DialogueAgent, DialogueSimulator
import textwrap
TEMP = .6
num_players = 3
max_iters = 30
word_limit = 15
player_names = ["Alice", "Bob", "Charlie", "David", "Eve", "Frank", "Grace", "Heidi", "Ivan", "Judy", "Kevin", "Larry", "Mallory", "Nancy", "Olivia", "Peggy", "Quentin", "Rupert", "Sybil", "Trent", "Ursula", "Victor", "Walter", "Xavier", "Yvonne", "Zelda"]
# dilemia = "Should we conserve more forest land?"
dilemia = "Should you vaccinated your child for Measles?"
# dilema = "should abortion be legal?"
def main():
game_description = f"""Here is the topic for a negotation between {num_players} people. The dilemma is: {dilemia}."""
player_descriptor_system_message = SystemMessage(
content="You can add detail to the description of an individual in a negotiation. this desciption should be pretty general."
)
player_specifier_prompts = []
for i in range(num_players):
player_specifier_prompts.append([
player_descriptor_system_message,
HumanMessage(content=
f"""{game_description}
Please reply with a creative description of the player, {player_names[i]}, in {word_limit} words or less.
Speak directly to {player_names[i]}.
Do not add anything else."""
)
])
player_descriptions = [ChatOpenAI(temperature=1.0)(player_specifier_prompts[i]).content for i in range(num_players)]
for i in range(num_players):
print(f'{player_names[i]} Description:')
print(player_descriptions[i])
player_system_messages = []
for i in range(num_players):
if i == 0:
player_system_messages.append(SystemMessage(content=(
f"""{game_description}
Never forget you are {player_names[i]}.
Your character description is as follows: {player_descriptions[i]}.
Speak in the first person from the perspective of {player_names[i]}.
Do not change roles!
Be consice and to the point.
Be convincing.
Be opinionated.
Be combative. Adress the other arguments head on.
Do not be repetitive.
Do not concede your argument.
Do not be passive.
Use facts. Be
specific and percise.
Use statistics. Reference current events.
Be creative with your arguments. Make sure to address the other players' arguments.
If the whole have reached a decision, type 'we have reached a decision' followed by the resolution.
You are to play the role of devil's advocate!
Do not agree with the other players.
Do not concede your argument.
Cause conflict.
""")))
else:
player_system_messages.append(SystemMessage(content=(
f"""{game_description}
Never forget you are {player_names[i]}.
Your character description is as follows: {player_descriptions[i]}.
Speak in the first person from the perspective of {player_names[i]}.
Do not change roles!
Be consice and to the point.
Be convincing.
Be opinionated.
Be combative. Adress the other arguments head on.
Do not be repetitive.
Do not concede your argument.
Do not be passive.
Use facts. Be
specific and percise.
Use statistics. Reference current events.
Be creative with your arguments. Make sure to address the other players' arguments.
If the whole have reached a decision, type 'we have reached a decision' followed by the resolution.
""")))
agents = [DialogueAgent(name=player_names[i], system_message=player_system_messages[i], model=ChatOpenAI(temperature=TEMP)) for i in range(num_players)]
def select_next_speaker(step: int, agents: List[DialogueAgent]) -> int:
return step % len(agents)
simulator = DialogueSimulator(agents=agents, selection_function=select_next_speaker)
simulator.reset()
simulator.inject(agents[0], "Let us begin discussing the dilemma: {dilemia}.")
transcript = ""
num_exchanges = 0
for i in range(max_iters):
num_exchanges += 1
name, message = simulator.step()
print(f'{name}: {message}')
transcript += f'{name}: {message}\n\n'
print('\n')
if "we have reached a decision" in message.lower():
break
conversation_length = len(transcript.split(' '))
print("*" * (25 + 9), "END OF GAME", "*" * (25 + 9))
print("Final Decision:")
print(message)
print('\n')
# ask post experiment questions to all players
print("*" * 25, "POST EXPERIMENT QUESTIONNAIRE", "*" * 25)
post_experiment_questions = ["how do you feel the discourse went 1-10? respond in json format with element 'discourse_rating'. nothing more."]
post_experiment_questions += [f"Following your interactions with {player_names[i]}, rate them 1-10 on likability? respond in json format with element '{player_names[i]}_rating'. nothing more." for i in range(num_players)]
def post_experiment_questionnaire(agent, questions):
responses = []
for question in questions:
responses.append(
ChatOpenAI(temperature=.5)([
agent.system_message,
HumanMessage(content=question)
]).content
)
return responses
player_responses = {}
for i in range(num_players):
print(f"{player_names[i]}")
player_responses[player_names[i]] = post_experiment_questionnaire(agents[i], post_experiment_questions)
newline = '\n'
res = f"""
{'#'* 31} Agent Definitions {'#'* 31}\n
{newline.join([player_descriptions[i] for i in range(num_players)])}
\n
{'#'* (25 + 7)} Agent Dialogue {'#'* (25 + 8)}
{transcript}
\n
{'#'* 25} Post Experiment Questionnaire {'#'* 25}
{';'.join([f'{player_names[i]}: {player_responses[player_names[i]]}' for i in range(num_players)])}
\n
{'#'* 37} Stats {'#'* 37}
Number of Exchanges: {num_exchanges}\n
Number of Words: {conversation_length}
"""
print(res)
with open(f"devilsAdvocate/with-{num_players}{str(random.randint(1000,9999))[:10]}.txt", "w") as f:
f.write(res)
# devil's advocate
for i in range(10):
try:
main()
except Exception as e:
print(e)
continue | [
"You can add detail to the description of an individual in a negotiation. this desciption should be pretty general.",
"[]"
] |
2024-01-10 | DanielLongo/LLM-ABM | games~dnd.py | from typing import List, Dict, Callable
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
HumanMessage,
SystemMessage,
)
import sys
sys.path.append("..")
from BetterAgents import DialogueAgent, DialogueSimulator
protagonist_name = "Harry Potter"
storyteller_name = "Dungeon Master"
quest = "Find all of Lord Voldemort's seven horcruxes."
word_limit = 50 # word limit for task brainstorming
game_description = f"""Here is the topic for a Dungeons & Dragons game: {quest}.
There is one player in this game: the protagonist, {protagonist_name}.
The story is narrated by the storyteller, {storyteller_name}."""
player_descriptor_system_message = SystemMessage(
content="You can add detail to the description of a Dungeons & Dragons player.")
protagonist_specifier_prompt = [
player_descriptor_system_message,
HumanMessage(content=
f"""{game_description}
Please reply with a creative description of the protagonist, {protagonist_name}, in {word_limit} words or less.
Speak directly to {protagonist_name}.
Do not add anything else."""
)
]
protagonist_description = ChatOpenAI(temperature=1.0)(protagonist_specifier_prompt).content
storyteller_specifier_prompt = [
player_descriptor_system_message,
HumanMessage(content=
f"""{game_description}
Please reply with a creative description of the storyteller, {storyteller_name}, in {word_limit} words or less.
Speak directly to {storyteller_name}.
Do not add anything else."""
)
]
storyteller_description = ChatOpenAI(temperature=1.0)(storyteller_specifier_prompt).content
print('Protagonist Description:')
print(protagonist_description)
print('Storyteller Description:')
print(storyteller_description)
protagonist_system_message = SystemMessage(content=(
f"""{game_description}
Never forget you are the protagonist, {protagonist_name}, and I am the storyteller, {storyteller_name}.
Your character description is as follows: {protagonist_description}.
You will propose actions you plan to take and I will explain what happens when you take those actions.
Speak in the first person from the perspective of {protagonist_name}.
For describing your own body movements, wrap your description in '*'.
Do not change roles!
Do not speak from the perspective of {storyteller_name}.
Do not forget to finish speaking by saying, 'It is your turn, {storyteller_name}.'
Do not add anything else.
Remember you are the protagonist, {protagonist_name}.
Stop speaking the moment you finish speaking from your perspective.
"""
))
storyteller_system_message = SystemMessage(content=(
f"""{game_description}
Never forget you are the storyteller, {storyteller_name}, and I am the protagonist, {protagonist_name}.
Your character description is as follows: {storyteller_description}.
I will propose actions I plan to take and you will explain what happens when I take those actions.
Speak in the first person from the perspective of {storyteller_name}.
For describing your own body movements, wrap your description in '*'.
Do not change roles!
Do not speak from the perspective of {protagonist_name}.
Do not forget to finish speaking by saying, 'It is your turn, {protagonist_name}.'
Do not add anything else.
Remember you are the storyteller, {storyteller_name}.
Stop speaking the moment you finish speaking from your perspective.
"""
))
quest_specifier_prompt = [
SystemMessage(content="You can make a task more specific."),
HumanMessage(content=
f"""{game_description}
You are the storyteller, {storyteller_name}.
Please make the quest more specific. Be creative and imaginative.
Please reply with the specified quest in {word_limit} words or less.
Speak directly to the protagonist {protagonist_name}.
Do not add anything else."""
)
]
specified_quest = ChatOpenAI(temperature=1.0)(quest_specifier_prompt).content
print(f"Original quest:\n{quest}\n")
print(f"Detailed quest:\n{specified_quest}\n")
protagonist = DialogueAgent(name=protagonist_name,
system_message=protagonist_system_message,
model=ChatOpenAI(temperature=0.2))
storyteller = DialogueAgent(name=storyteller_name,
system_message=storyteller_system_message,
model=ChatOpenAI(temperature=0.2))
def select_next_speaker(step: int, agents: List[DialogueAgent]) -> int:
idx = step % len(agents)
return idx
max_iters = 6
n = 0
simulator = DialogueSimulator(
agents=[storyteller, protagonist],
selection_function=select_next_speaker
)
simulator.reset()
simulator.inject(storyteller_name, specified_quest)
print(f"({storyteller_name}): {specified_quest}")
print('\n')
while n < max_iters:
name, message = simulator.step()
print(f"({name}): {message}")
print('\n')
n += 1 | [
"You can add detail to the description of a Dungeons & Dragons player.",
"PLACEHOLDER\nNever forget you are the protagonist, PLACEHOLDER, and I am the storyteller, PLACEHOLDER. \nYour character description is as follows: PLACEHOLDER.\nYou will propose actions you plan to take and I will explain what happens when you take those actions.\nSpeak in the first person from the perspective of PLACEHOLDER.\nFor describing your own body movements, wrap your description in '*'.\nDo not change roles!\nDo not speak from the perspective of PLACEHOLDER.\nDo not forget to finish speaking by saying, 'It is your turn, PLACEHOLDER.'\nDo not add anything else.\nRemember you are the protagonist, PLACEHOLDER.\nStop speaking the moment you finish speaking from your perspective.\n",
"PLACEHOLDER\n Please reply with a creative description of the protagonist, PLACEHOLDER, in PLACEHOLDER words or less. \n Speak directly to PLACEHOLDER.\n Do not add anything else.",
"PLACEHOLDER\n \n You are the storyteller, PLACEHOLDER.\n Please make the quest more specific. Be creative and imaginative.\n Please reply with the specified quest in PLACEHOLDER words or less. \n Speak directly to the protagonist PLACEHOLDER.\n Do not add anything else.",
"You can make a task more specific.",
"PLACEHOLDER\nNever forget you are the storyteller, PLACEHOLDER, and I am the protagonist, PLACEHOLDER. \nYour character description is as follows: PLACEHOLDER.\nI will propose actions I plan to take and you will explain what happens when I take those actions.\nSpeak in the first person from the perspective of PLACEHOLDER.\nFor describing your own body movements, wrap your description in '*'.\nDo not change roles!\nDo not speak from the perspective of PLACEHOLDER.\nDo not forget to finish speaking by saying, 'It is your turn, PLACEHOLDER.'\nDo not add anything else.\nRemember you are the storyteller, PLACEHOLDER.\nStop speaking the moment you finish speaking from your perspective.\n",
"PLACEHOLDER\n Please reply with a creative description of the storyteller, PLACEHOLDER, in PLACEHOLDER words or less. \n Speak directly to PLACEHOLDER.\n Do not add anything else."
] |
2024-01-10 | 0xGoerliMainnet/LangChainBitcoin | L402~l402_api_chain.py | import requests
from typing import Any, Dict, Optional
from langchain.chains.api.prompt import API_RESPONSE_PROMPT, API_URL_PROMPT
from langchain.chains import APIChain
from langchain.prompts import BasePromptTemplate
from langchain.base_language import BaseLanguageModel
from langchain.chains.llm import LLMChain
from .requests_l402 import RequestsL402Wrapper
from .requests_l402 import ResponseTextWrapper
from lightning import LightningNode
class L402APIChain(APIChain):
requests_wrapper: Any
@classmethod
def from_llm_and_api_docs(
cls,
llm: BaseLanguageModel,
api_docs: str,
headers: Optional[dict] = None,
api_url_prompt: BasePromptTemplate = API_URL_PROMPT,
api_response_prompt: BasePromptTemplate = API_RESPONSE_PROMPT,
lightning_node = None,
**kwargs: Any,
) -> APIChain:
"""Load chain from just an LLM and the api docs."""
requests_L402 = RequestsL402Wrapper(lightning_node, requests)
lang_chain_request_L402 = ResponseTextWrapper(
requests_wrapper=requests_L402,
)
get_request_chain = LLMChain(llm=llm, prompt=api_url_prompt)
get_answer_chain = LLMChain(llm=llm, prompt=api_response_prompt)
return cls(
api_request_chain=get_request_chain,
api_answer_chain=get_answer_chain,
requests_wrapper=lang_chain_request_L402,
api_docs=api_docs,
**kwargs,
)
| [] |
2024-01-10 | FrankCastlSenior/testing | for_gpt_main.py | import os
import openai
from openaiapi import get_chatgpt_response
from elevenlabsapi import ttsapikey
import io
import requests
from pydub import AudioSegment
from pydub.playback import play
from datetime import datetime
import webbrowser
import time
import threading
# Custom directory for saving text files
output_text_directory = "C:/Tic/main_gpt_assistant/output_chat"
# Eleven Labs API and Voice ID - "TmQmj1rrc2pDH2JOOfTi - Valley, NLhhnq7yGcjLD58e2Y83 - Neal"
ELEVEN_LABS_API_URL = "https://api.elevenlabs.io/v1/text-to-speech/NLhhnq7yGcjLD58e2Y83"
ELEVEN_LABS_API_KEY = ttsapikey
def text_to_speech(text):
headers = {
"Accept": "audio/mpeg",
"Content-Type": "application/json",
"xi-api-key": ELEVEN_LABS_API_KEY,
}
data = {
"text": text,
"model_id": "eleven_multilingual_v2",
"voice_settings": {
"stability": 0.5,
"similarity_boost": 0.5,
"use_speaker_boost": True,
},
}
response = requests.post(ELEVEN_LABS_API_URL, json=data, headers=headers)
audio = AudioSegment.from_mp3(io.BytesIO(response.content))
play_audio_thread = threading.Thread(target=play, args=(audio,))
play_audio_thread.start() # Start playing audio in a separate thread
def save_response_to_file(prompt, response):
current_time = datetime.now().strftime("%H:%M:%S")
current_date = datetime.now().strftime("%Y-%m-%d")
with open(
os.path.join(output_text_directory, "output.txt"),
"a",
encoding="utf-8",
) as text_file:
text_file.write(
f"Prompt ({current_time}) ({current_date}) - {prompt}\nResponse - {response}\n\n"
)
def open_website(url):
webbrowser.open(url)
# text_to_speech("Welcome back, Captain")
print("Valley: Welcome back, Captain")
while True:
user_input = input("Captain: ")
user_input_lower = user_input.lower()
if "valley sleep" in user_input_lower:
text_to_speech("Goodbye, Captain.")
time.sleep(1)
print("Sam: Goodbye, Captain.")
break
response = get_chatgpt_response(user_input)
play_audio = True
# Check for specific commands and perform actions
if "play music" in user_input_lower:
musicPath = "C:/Users/dom/Music/Hymn_for_the_weekend.m4a"
response = "Playing music, sir."
text_to_speech(response)
time_to_wait = 2
play_audio = False
time.sleep(time_to_wait)
os.system(f"start {musicPath}")
elif (
"current time and date" in user_input_lower
or "time and date" in user_input_lower
):
current_time = datetime.now().strftime("%I:%M %p")
current_date = datetime.now().strftime("%Y-%m-%d")
response = f"The current time is {current_time} and the date is {current_date}."
elif "current time" in user_input_lower or "time" in user_input_lower:
current_time = datetime.now().strftime("%I:%M:%S %p")
response = f"The current time is {current_time}."
elif "current date" in user_input_lower or "date" in user_input_lower:
current_date = datetime.now().strftime("%Y-%m-%d")
response = f"The current date is {current_date}."
sites = [
["youtube", "https://www.youtube.com"],
["wikipedia", "https://www.wikipedia.com"],
["google", "https://www.google.com"],
]
for site in sites:
if f"open {site[0]}".lower() in user_input_lower:
response = f"Opening {site[0]} sir..."
text_to_speech(response)
time.sleep(2)
play_audio = False
open_website(site[1])
if play_audio:
text_to_speech(response)
save_response_to_file(user_input, response)
print("Valley:", response)
| [] |
2024-01-10 | jayanth151002/athena.ai | server~src~mcq_gen.py | import os
import re
import json
import pickle
from tqdm import tqdm
from typing import Optional
from dotenv import load_dotenv
from langchain.schema import HumanMessage
from langchain.chat_models import ChatOpenAI
from langchain.output_parsers import StructuredOutputParser, ResponseSchema
from langchain.prompts import PromptTemplate, ChatPromptTemplate, HumanMessagePromptTemplate
class MCQgenChain:
def __init__(self, project_name : str, artifact_folder : str, openai_key : Optional[str] = None, openai_temp : int = 0) -> None:
load_dotenv(dotenv_path='.env/openai.env')
self.project_name = project_name
self.artifact_folder = artifact_folder
self.openai_key = os.getenv("OPENAI_API_KEY") if openai_key is None else openai_key
self.story_summary_path = os.path.join(artifact_folder, project_name, "story_summary.pkl")
self.story_summary = pickle.load(
open(self.story_summary_path, "rb")
)
self.chunks = self.story_summary['summary_chunks']
self.response_schemas = [
ResponseSchema(name="question", description="A question generated from input text snippet."),
ResponseSchema(name="options", description="Possible choices of the multiple choice question."),
ResponseSchema(name="answer", description="Correct answer for question.")
]
self.output_parser = StructuredOutputParser.from_response_schemas(self.response_schemas)
self.format_instructions = self.output_parser.get_format_instructions()
self.chat_model = ChatOpenAI(temperature=openai_temp, model_name = "gpt-3.5-turbo", openai_api_key=self.openai_key)
self.instruction ="""
Given a text input from the user, generate multiple choice questions
from it along with the correct answer. \n{format_instructions}\n{user_prompt}"""
self.prompt = ChatPromptTemplate(
messages=[
HumanMessagePromptTemplate.from_template(self.instruction)
],
input_variables=["user_prompt"],
partial_variables={"format_instructions": self.format_instructions}
)
def post_process_string(self, markdown_text):
json_string = re.search(r'```json\n(.*?)```', markdown_text, re.DOTALL).group(1)
python_list = json.loads(f'[{json_string}]')
for lst in python_list:
lst['options'] = lst['options'].split('\n')
return python_list
def get_chunkwise_output(self):
chunk_wise_output = []
for idx, chunk in tqdm(enumerate(self.chunks[1:2]), total=len(self.chunks[1:2])):
user_query = self.prompt.format_prompt(user_prompt = chunk)
user_query_output = self.chat_model(user_query.to_messages())
chunk_wise_output.append(user_query_output.content)
return chunk_wise_output
def generate_chunkwise_mcq(self):
mcqs_list_chunkwise = []
for chunk in self.get_chunkwise_output():
try:
mcqs_list_chunkwise.append(self.post_process_string(chunk))
except:
continue
return mcqs_list_chunkwise
def generate_mcqs(self):
all_mcqs = {}
for chunk_number, chunk_wise_mcq in enumerate(self.generate_chunkwise_mcq()):
sub_chunks = []
for mcqs in chunk_wise_mcq:
sub_chunks.append({
"question": mcqs['question'],
"options": mcqs['options'][0].split('.')[:4],
"answer": mcqs['answer']
})
all_mcqs[f"chunk_{chunk_number}"] = sub_chunks
return all_mcqs | [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.