date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | jimmyliao/GPT-Azure-Search-Engine | app~pages~1_GPT_Smart_Search.py | import streamlit as st
import urllib
import os
import time
import requests
import random
from collections import OrderedDict
from openai.error import OpenAIError
from langchain.docstore.document import Document
from components.sidebar import sidebar
from utils import (
embed_docs,
get_answer,
get_sources,
search_docs,
num_tokens_from_string,
model_tokens_limit
)
AZURE_SEARCH_API_VERSION = '2021-04-30-Preview'
AZURE_OPENAI_API_VERSION = "2023-03-15-preview"
# setting encoding for GPT3.5 / GPT4 models
encoding_name ='cl100k_base'
def clear_submit():
st.session_state["submit"] = False
#@st.cache_data()
def get_search_results(query, indexes):
headers = {'Content-Type': 'application/json','api-key': os.environ["AZURE_SEARCH_KEY"]}
agg_search_results = []
for index in indexes:
url = os.environ["AZURE_SEARCH_ENDPOINT"] + '/indexes/'+ index + '/docs'
url += '?api-version={}'.format(AZURE_SEARCH_API_VERSION)
url += '&search={}'.format(query)
url += '&select=*'
url += '&$top=5' # You can change this to anything you need/want
url += '&queryLanguage=en-us'
url += '&queryType=semantic'
url += '&semanticConfiguration=my-semantic-config'
url += '&$count=true'
url += '&speller=lexicon'
url += '&answers=extractive|count-3'
url += '&captions=extractive|highlight-false'
resp = requests.get(url, headers=headers)
print(url)
print(resp.status_code)
search_results = resp.json()
agg_search_results.append(search_results)
return agg_search_results
st.set_page_config(page_title="GPT Smart Search", page_icon="📖", layout="wide")
st.header("GPT Smart Search Engine")
with st.sidebar:
st.markdown("""# Instructions""")
st.markdown("""
Ask a question that you think can be answered with the information in about 10k Arxiv Computer Science publications from 2020-2021 or in 52k Medical Covid-19 Publications from 2020.
For example:
- What are markov chains?
- List the authors that talk about Gradient Boosting Machines
- How does random forest work?
- What kind of problems can I solve with reinforcement learning? Give me some real life examples
- What kind of problems Turing Machines solve?
- What are the main risk factors for Covid-19?
- What medicine reduces inflamation in the lungs?
- Why Covid doesn't affect kids that much compared to adults?
\nYou will notice that the answers to these questions are diferent from the open ChatGPT, since these papers are the only possible context. This search engine does not look at the open internet to answer these questions. If the context doesn't contain information, the engine will respond: I don't know.
""")
st.markdown("""
- ***Quick Answer***: GPT model only uses, as context, the captions of the results coming from Azure Search
- ***Best Answer***: GPT model uses, as context. all of the content of the documents coming from Azure Search
""")
coli1, coli2 = st.columns([2,1])
with coli1:
query = st.text_input("Ask a question to your enterprise data lake", value= "What is CLP?", on_change=clear_submit)
with coli2:
temp = st.slider('Temperature :thermometer:', min_value=0.0, max_value=1.0, step=0.1, value=0.5)
# options = ['English', 'Spanish', 'Portuguese', 'French', 'Russian']
# selected_language = st.selectbox('Answer Language:', options, index=0)
col1, col2, col3 = st.columns([1,1,3])
with col1:
qbutton = st.button('Quick Answer')
with col2:
bbutton = st.button('Best Answer')
if (not os.environ.get("AZURE_SEARCH_ENDPOINT")) or (os.environ.get("AZURE_SEARCH_ENDPOINT") == ""):
st.error("Please set your AZURE_SEARCH_ENDPOINT on your Web App Settings")
elif (not os.environ.get("AZURE_SEARCH_KEY")) or (os.environ.get("AZURE_SEARCH_KEY") == ""):
st.error("Please set your AZURE_SEARCH_ENDPOINT on your Web App Settings")
elif (not os.environ.get("AZURE_OPENAI_ENDPOINT")) or (os.environ.get("AZURE_OPENAI_ENDPOINT") == ""):
st.error("Please set your AZURE_OPENAI_ENDPOINT on your Web App Settings")
elif (not os.environ.get("AZURE_OPENAI_API_KEY")) or (os.environ.get("AZURE_OPENAI_API_KEY") == ""):
st.error("Please set your AZURE_OPENAI_API_KEY on your Web App Settings")
else:
os.environ["OPENAI_API_BASE"] = os.environ.get("AZURE_OPENAI_ENDPOINT")
os.environ["OPENAI_API_KEY"] = os.environ.get("AZURE_OPENAI_API_KEY")
os.environ["OPENAI_API_VERSION"] = os.environ["AZURE_OPENAI_API_VERSION"] = AZURE_OPENAI_API_VERSION
if qbutton or bbutton or st.session_state.get("submit"):
if not query:
st.error("Please enter a question!")
else:
# Azure Search
index1_name = "cogsrch-index-files"
index2_name = "cogsrch-index-csv"
indexes = [index1_name, index2_name]
agg_search_results = get_search_results(query, indexes)
file_content = OrderedDict()
content = dict()
try:
for search_results in agg_search_results:
for result in search_results['value']:
if result['@search.rerankerScore'] > 1: # Show results that are at least 25% of the max possible score=4
content[result['id']]={
"title": result['title'],
"chunks": result['pages'],
"language": result['language'],
"caption": result['@search.captions'][0]['text'],
"score": result['@search.rerankerScore'],
"location": result['metadata_storage_path']
}
except:
st.markdown("Not data returned from Azure Search, check connection..")
#After results have been filtered we will Sort and add them as an Ordered list
for id in sorted(content, key= lambda x: content[x]["score"], reverse=True):
file_content[id] = content[id]
st.session_state["submit"] = True
# Output Columns
placeholder = st.empty()
try:
docs = []
for key,value in file_content.items():
if qbutton:
docs.append(Document(page_content=value['caption'], metadata={"source": value["location"]}))
add_text = "Coming up with a quick answer... ⏳"
if bbutton:
for page in value["chunks"]:
docs.append(Document(page_content=page, metadata={"source": value["location"]}))
add_text = "Reading the source documents to provide the best answer... ⏳"
if "add_text" in locals():
with st.spinner(add_text):
if(len(docs)>0):
gpt_tokens_limit = model_tokens_limit('gpt-35-turbo')
num_token = 0
for i in range(len(docs)):
num_token += num_tokens_from_string(docs[i].page_content,encoding_name)
# if the token count >3000 then only doing the embedding.
if num_token > gpt_tokens_limit:
language = random.choice(list(file_content.items()))[1]["language"]
index = embed_docs(docs, language)
sources = search_docs(index,query)
if qbutton:
answer = get_answer(sources, query, deployment="gpt-35-turbo", chain_type = "stuff", temperature=temp, max_tokens=256)
if bbutton:
answer = get_answer(sources, query, deployment="gpt-35-turbo", chain_type = "map_reduce", temperature=temp, max_tokens=500)
else:
answer = get_answer(docs, query, deployment="gpt-35-turbo", chain_type = "stuff", temperature=temp, max_tokens=256)
else:
answer = {"output_text":"No results found" }
else:
answer = {"output_text":"No results found" }
with placeholder.container():
st.markdown("#### Answer")
st.markdown(answer["output_text"].split("SOURCES:")[0])
st.markdown("Sources:")
try:
for s in answer["output_text"].split("SOURCES:")[1].replace(" ","").split(","):
st.markdown(s)
except:
st.markdown("N/A")
st.markdown("---")
st.markdown("#### Search Results")
if(len(docs)>1):
for key, value in file_content.items():
st.markdown(str(value["title"]) + ' (Score: ' + str(round(value["score"]*100/4,2)) + '%)')
st.markdown(value["caption"])
st.markdown("---")
except OpenAIError as e:
st.error(e)
| [] |
2024-01-10 | wuhuachaocoding/DeepSpeed | deepspeed~ops~sparse_attention~matmul.py | '''Copyright The Microsoft DeepSpeed Team'''
# DeepSpeed note, code taken & adapted from commit 9aa94789f13ada713af36cfd8cca2fc9a7f6b79a
# https://github.com/ptillet/torch-blocksparse/blob/master/torch_blocksparse/matmul.py
import importlib
import torch
import triton
import triton.language as tl
import triton._C.libtriton as libtriton
from deepspeed.accelerator import get_accelerator
@triton.jit
def _kernel(A,
B,
C,
stride_za,
stride_ha,
stride_ma,
stride_ka,
stride_zb,
stride_hb,
stride_kb,
stride_nb,
stride_zc,
stride_hc,
stride_mc,
stride_nc,
DS0,
DS1,
SDD_K,
SDD_off_width,
lut,
locks,
nlocks,
**meta):
TM = meta['TM']
TN = meta['TN']
TK = meta['TK']
TZ = meta['TZ']
BLOCK = meta['BLOCK']
#------------#
#- Prologue -#
#------------#
pid0 = tl.program_id(0)
pid1 = tl.program_id(1)
pidz = tl.program_id(2)
if meta['SDD']:
pid1 = pid1 + SDD_off_width
blockidm = tl.arange(0, TM) // BLOCK
blockidn = tl.arange(0, TN) // BLOCK
offlutm = blockidm * (TN // BLOCK) * 4
offlutn = blockidn * 4
header = lut + pid1 * (TM // BLOCK) * (TN // BLOCK) * 4
z = tl.load(header + 0)
i = tl.load(header + 1 + offlutm)
j = tl.load(header + 2 + offlutn)
AS1 = SDD_K // TZ
lockid = tl.where(TZ > 1, 1, 0)
offka = pid0 * AS1
offkb = pid0 * AS1
offmc = 0
offnc = 0
offpa = 0
offpb = 0
maxid = TZ
offhc = 0
offha = z
offhb = z
ram = i * BLOCK + (tl.arange(0, TM) % BLOCK)
rbn = j * BLOCK + (tl.arange(0, TN) % BLOCK)
else:
header = lut + pid0 * 6
offset = tl.load(header + 0)
AS1 = tl.load(header + 1)
column = tl.load(header + 2)
depth = tl.load(header + 3)
lockid = tl.load(header + 4)
maxid = tl.load(header + 5)
pinc = lut + offset
offhc = depth
if meta['DSD']:
# output offset
offnc = pid1 * TN
offmc = column * TM
offpc = 0
# dense input offset
offnb = pid1 * TN
offkb = tl.load(pinc)
offkb = tl.multiple_of(offkb, 8) # compiler hint
offpb = 0
# sparse input offset
offma = 0
offka = 0
offpa = tl.load(pinc + 1)
offpa = tl.multiple_of(offpa, 8) # compiler hint
offpa = offpa * BLOCK * BLOCK
offha = 0
offhb = depth
else:
# output offset
offmc = pid1 * TM
offnc = column * TN
offpc = 0
# dense input offset
offma = pid1 * TM
offka = tl.load(pinc)
offka = tl.multiple_of(offka, 8) # compiler hint
offpa = 0
# sparse input offset
offnb = 0
offkb = 0
offpb = tl.load(pinc + 1)
offpb = tl.multiple_of(offpb, 8) # compiler hint
offpb = offpb * BLOCK * BLOCK
offha = depth
offhb = 0
ram = offma + tl.arange(0, TM)
rbn = offnb + tl.arange(0, TN)
# initialize a, b pointers
rka = offka + tl.arange(0, TK)
rkb = offkb + tl.arange(0, TK)
pa = A + pidz * stride_za + offha * stride_ha + offpa + ram[:, None] * stride_ma + rka[None, :] * stride_ka
pb = B + pidz * stride_zb + offhb * stride_hb + offpb + rbn[None, :] * stride_nb + rkb[:, None] * stride_kb
if meta['DDS']:
checkam = ram[:, None] < DS0
else:
checkam = AS1 > 0
if meta['DSD']:
checkbn = rbn[None, :] < DS0
else:
checkbn = AS1 > 0
a = tl.load(pa, mask=checkam, other=0.)
b = tl.load(pb, mask=checkbn, other=0.)
## ---------------- ##
## Inner Loop ##
## ---------------- ##
acc = tl.zeros((TM, TN), dtype=tl.float32)
for k in range(AS1, 0, -TK):
acc += tl.dot(a, b)
if meta['SDD']:
inc_a = TK * stride_ka
inc_b = TK * stride_kb
else:
pinc += 2
if meta['DSD']:
inc_b = tl.load(pinc)
inc_a = tl.load(pinc + 1)
inc_b = tl.multiple_of(inc_b, 8)
inc_a = tl.multiple_of(inc_a, 8)
inc_b = inc_b * stride_kb
if meta['DDS']:
inc_a = tl.load(pinc)
inc_b = tl.load(pinc + 1)
inc_a = tl.multiple_of(inc_a, 8)
inc_b = tl.multiple_of(inc_b, 8)
inc_a = inc_a * stride_ka
pa += inc_a
pb += inc_b
# pre-fetch
checkak = k > TK
checkbk = k > TK
checka = checkam & checkak
checkb = checkbn & checkbk
a = tl.load(pa, mask=checka)
b = tl.load(pb, mask=checkb)
c = acc.to(C.dtype.element_ty)
if meta['SDD']:
checkc = True
rr_blockidm = tl.arange(0, TM) // BLOCK
rr_blockidn = tl.arange(0, TN) // BLOCK
rr_offlutm = rr_blockidm * (TN // BLOCK) * 4
rr_offlutn = rr_blockidn * 4
off_bkid = 3 + rr_offlutm[:, None] + rr_offlutn[None, :]
bkid = tl.load(header + off_bkid)
offpc = bkid * BLOCK * BLOCK
rcm = tl.arange(0, TM) % BLOCK
rcn = tl.arange(0, TN) % BLOCK
else:
rcm = offmc + tl.arange(0, TM)
rcn = offnc + tl.arange(0, TN)
if meta['DSD']:
checkc = rcn[None, :] < DS0
if meta['DDS']:
checkc = rcm[:, None] < DS0
pc = C + offpc + offhc * stride_hc + pidz * stride_zc + rcm[:, None] * stride_mc + rcn[None, :] * stride_nc
# write-back directly
if lockid == 0:
tl.store(pc, c, mask=checkc)
# accumulate partial results using spin-locks
else:
plock = locks + tl.program_id(2) * nlocks * tl.num_programs(1) + tl.program_id(
1) * nlocks + lockid - 1
pcount = plock + tl.num_programs(2) * tl.num_programs(1) * nlocks
while tl.atomic_cas(plock, 0, 1) == 1:
pass
count = tl.load(pcount)
if count == 0:
tl.store(pc, c, mask=checkc)
else:
d = tl.load(pc, mask=checkc)
tl.store(pc, d + c, mask=checkc)
tl.atomic_xchg(pcount, (count + 1) % maxid)
tl.atomic_xchg(plock, 0)
##############
# MAIN API #
##############
class _sparse_matmul(torch.autograd.Function):
sdd_cache = dict()
dsd_cache = dict()
dds_cache = dict()
locks = dict()
# Given an array sizes representing reduction size for each
# column of a block-mode matrix multiplication,
# performs load-balancing to achieve more smaller reductions
# between `seg_size` elements
@staticmethod
def load_balance(sizes, block):
#global triton
#if triton is None:
# triton = importlib.import_module('triton')
# segment size
# heuristics taken from OpenAI blocksparse code
# https://github.com/openai/blocksparse/blob/master/blocksparse/matmul.py#L95
max_size = sizes.max()
min_size = sizes[sizes != 0].min()
#if max_size > min_size * 2.0:
# seg_max = max(triton.cdiv(max_size, 4), min_size*2)
#else:
# seg_max = max_size
seg_max = max_size
seg_min = max(triton.cdiv(seg_max, 4), 4)
# split reduction into segments
div = sizes // seg_max
rem = sizes % seg_max
packs = div + (sizes < seg_min).long() + (rem >= seg_min).long()
width = packs.sum()
segments = torch.empty(width, dtype=sizes.dtype)
column = torch.empty_like(segments)
lockid = torch.zeros_like(segments)
maxid = torch.zeros_like(segments)
nlocks = 0
current = 0
col_idx = 0
for i in range(len(sizes)):
d, r = div[i], rem[i]
isempty = sizes[i] < seg_min
last = current + d + (r >= seg_min) + isempty
# column id
column[current:last] = col_idx
# lock id
if d > 1 or (d == 1 and r >= seg_min):
nlocks += 1
lockid[current:last] = nlocks
maxid[current:last] = last - current
# segment size
segments[current:current + d] = seg_max
if r < seg_min and not isempty:
segments[current + d - 1] += r
if r >= seg_min or isempty:
segments[current + d] = r
current = last
col_idx += 1
offsets = torch.zeros_like(segments)
offsets[1:] = torch.cumsum(segments[:-1], dim=0)
return segments, column, lockid, maxid, offsets
@staticmethod
def get_locks(size, dev):
if dev not in _sparse_matmul.locks or \
size > _sparse_matmul.locks[dev].size(0):
_sparse_matmul.locks[dev] = torch.zeros(size, dtype=torch.int32, device=dev)
return _sparse_matmul.locks[dev]
##########################
# SPARSE = DENSE x DENSE #
##########################
@staticmethod
def make_sdd_lut(layout, block, dtype, device):
#_sparse_matmul._load_utils()
#start_width = 64 // block
#segmented = _sparse_matmul.sdd_segment(layout.type(torch.int32), start_width)
start_width = (128 if block > 16 else 32) // block
layout = layout.type(torch.int32)
segmented = libtriton.superblock(layout.data_ptr(),
layout.shape[0],
layout.shape[1],
layout.shape[2],
start_width)
luts, widths, packs = [], [], []
for size, nnz in segmented:
""" width = nnz.shape[0] // (size * size)
h = nnz[:, 0]
i = nnz[:, 1]
j = nnz[:, 2]
b = nnz[:, 3]
lut = torch.stack((h, i, j, b), dim=1).view(-1).contiguous()
luts.append(lut.type(torch.int32).to(device))
widths.append(width)
packs.append(size) """
nnz = nnz.reshape(-1, 4)
width = nnz.shape[0] // (size * size)
luts.append(torch.from_numpy(nnz).type(torch.int32).to(device))
widths.append(width)
packs.append(size)
# create locks
return luts, None, widths, packs
@staticmethod
def _sdd_matmul(a,
b,
trans_a,
trans_b,
trans_c,
spdims,
block,
luts,
num_locks,
widths,
packs,
bench,
time):
if trans_c:
a, b = b, a
trans_a, trans_b = not trans_b, not trans_a
AS0 = a.size(0)
# Shape check
a_dim = -2 if trans_a else -1
b_dim = -1 if trans_b else -2
a_inner, b_inner = a.shape[a_dim], b.shape[b_dim]
if a_inner != b_inner:
raise ValueError(
f"Size of tensor A along the {a_dim} dim ({a_inner}) must match size "
f"of tensor B along the {b_dim} dim ({b_inner})")
if a_inner % 16 != 0:
raise ValueError('Reduction size for SDD must be a multiple of 16')
batch_size = a.size(0)
a_outer = a.size(3 if trans_a else 2)
dtype = a.dtype
is_16_multiple = a_inner % 16 == 0
is_32_multiple = a_inner % 32 == 0
is_64_multiple = a_inner % 64 == 0
if not is_16_multiple:
raise ValueError('Reduction size for SDD must be a multiple of 16')
device = a.device
# create kernel
total_width = sum([width * pack * pack for width, pack in zip(widths, packs)])
c = torch.empty((batch_size,
total_width,
block,
block),
dtype=dtype,
device=a.device)
for lut, width, pack in zip(luts, widths, packs):
F32TK = [8, 16]
F16TK = [16]
F16TK += [32] if is_32_multiple else []
F16TK += [64] if is_64_multiple else []
TK = {torch.float32: F32TK, torch.float16: F16TK}[dtype]
num_lock = 1
meta = {
'TM': block * pack,
'TN': block * pack,
'BLOCK': block,
'TK': TK[0],
'TZ': 1,
'SDD': True,
'DSD': False,
'DDS': False
}
# create output
locks = _sparse_matmul.get_locks(2 * width * AS0 * num_lock, a.device)
# maximum grid size is 65535
# so operation might be decomposed into multiple
# kernel calls
max_width = 49152
total = 0 if bench else None
for off_width in range(0, width, max_width):
grid = lambda meta: [
meta['TZ'],
min(max_width,
width - off_width),
batch_size
]
_kernel[grid](a,
b,
c,
a.stride(0),
a.stride(1),
a.stride(3 if trans_a else 2),
a.stride(2 if trans_a else 3),
b.stride(0),
b.stride(1),
b.stride(3 if trans_b else 2),
b.stride(2 if trans_b else 3),
c.stride(0),
c.stride(0),
c.stride(2),
c.stride(3),
a_outer,
a_outer,
a_inner,
off_width,
lut,
locks,
num_lock,
num_warps=4,
**meta)
# save for backward pass
return c
##########################
# DENSE = DENSE x SPARSE #
##########################
# Given a binary layout of 0s and 1s,
# Construct look-up table for efficient execution on GPUs
@staticmethod
def make_dxx_lut(layout, block, step, trans, device, transform=lambda idx: idx):
# load-balancing
_empty = torch.tensor([], dtype=torch.int64, device=layout.device)
segments = _empty.clone()
column = _empty.clone()
depth = _empty.clone()
lockid = _empty.clone()
maxid = _empty.clone()
offsets = _empty.clone()
current_offset = 0
current_maxid = 0
for z in range(layout.size(0)):
if trans:
sizes = torch.sum(layout[z, :, :], 1)
else:
sizes = torch.sum(layout[z, :, :], 0)
z_segments, z_column, z_lockid, z_maxid, z_offsets = _sparse_matmul.load_balance(sizes, block)
z_depth = z * torch.ones_like(z_segments)
z_lockid[z_lockid > 0] += current_maxid
current_maxid = z_lockid.max()
# concatenate depth
segments = torch.cat((segments, z_segments))
column = torch.cat((column, z_column))
depth = torch.cat((depth, z_depth))
maxid = torch.cat((maxid, z_maxid))
offsets = torch.cat((offsets, current_offset + z_offsets))
lockid = torch.cat((lockid, z_lockid))
current_offset += layout[z, :, :].sum()
segments *= step
# pointer increments
if trans:
nnz = layout.nonzero()
else:
nnz = layout.transpose(1, 2).nonzero()
num_blocks = nnz.size(0)
offsets = torch.min(offsets, (num_blocks - 1) * torch.ones_like(offsets))
idx = transform(nnz[:, 2] * block)
xincs = idx.clone()
xincs[1:] -= idx[:-1]
# divide block into multiple steps
div = block // step
xincs = xincs.view(-1, 1).repeat(1, div)
xincs[:, 1:] = step
xincs[:, 0] -= (div - 1) * step
# first increment for each reduction is actually the offset
xincs[offsets[segments > 0], 0] = idx[offsets[segments > 0]]
xincs = xincs.view(-1)
# block-mode input increments
if trans:
widx = torch.arange(num_blocks)
else:
widx = _empty.clone()
current_offset = 0
for z in range(layout.size(0)):
layoutw = layout[z, :, :].clone()
msum = layoutw.sum()
layoutw[layoutw > 0] = 1 + torch.arange(msum)
widx = torch.cat((widx, current_offset + layoutw.T[layoutw.T > 0] - 1))
current_offset += msum
widx = widx
wincs = widx * block * block
wincs[1:] -= widx[:-1] * block * block
wincs = wincs.view(-1, 1).repeat(1, div)
if trans:
wincs[:, 1:] = step
wincs[:, 0] -= (div - 1) * step
else:
wincs[:, 1:] = step * block
wincs[:, 0] -= (div - 1) * step * block
wincs[offsets[segments > 0], 0] = widx[offsets[segments > 0]]
wincs = wincs.view(-1)
# adjust offset and segment size
offsets *= 2 * div
segments *= div
# create header
width = column.size(0)
offsets += 6 * width
header = torch.stack((offsets,
segments,
column,
depth,
lockid,
maxid),
dim=1).view(-1).contiguous()
incs = torch.stack((xincs, wincs), dim=1).view(-1).contiguous()
incs = torch.cat((incs, torch.zeros(2, device=incs.device, dtype=incs.dtype)))
# create lut
lut = torch.cat((header, incs))
lut = lut.type(torch.int32).to(device)
# create locks
num_locks = max(1, lockid.max())
return lut, num_locks, width, None
@staticmethod
def _dds_matmul(a,
b,
trans_a,
trans_b,
trans_c,
spdims,
block,
lut,
num_locks,
width,
packs,
bench,
time):
global triton
if triton is None:
triton = importlib.import_module('triton')
# shapes / dtypes
AS0 = a.size(0)
AS1 = a.size(1)
AS2 = a.size(3 if trans_a else 2)
AS3 = a.size(2 if trans_a else 3)
BS0 = spdims[0]
BS1 = block * spdims[2 if trans_b else 1]
BS2 = block * spdims[1 if trans_b else 2]
dtype = a.dtype
# kernel
meta = {
'TN': block,
'TM': 128,
'TK': 16,
'BLOCK': block,
'TZ': 1,
'SDD': False,
'DSD': False,
'DDS': True
}
# output
CS0 = AS0
CS1 = AS1
CS2 = BS2 if trans_c else AS2
CS3 = AS2 if trans_c else BS2
locks = _sparse_matmul.get_locks(2 * AS0 * AS2 // 32 * num_locks, a.device)
c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)
grid = lambda meta: [width, triton.cdiv(AS2, meta['TM']), AS0]
_kernel[grid](a,
b,
c,
a.stride(0),
a.stride(1),
a.stride(3 if trans_a else 2),
a.stride(2 if trans_a else 3),
b.stride(0),
b.stride(1),
b.stride(3 if trans_b else 2),
b.stride(2 if trans_b else 3),
c.stride(0),
c.stride(1),
c.stride(3 if trans_c else 2),
c.stride(2 if trans_c else 3),
AS2,
BS2,
0,
0,
lut,
locks,
num_locks,
num_warps=4,
**meta)
return c
@staticmethod
def _dsd_matmul(a,
b,
trans_a,
trans_b,
trans_c,
spdims,
block,
lut,
num_locks,
width,
packs,
bench,
time):
global triton
if triton is None:
triton = importlib.import_module('triton')
# shapes / dtypes
AS0 = spdims[0]
AS1 = block * spdims[2 if trans_a else 1]
AS2 = block * spdims[1 if trans_a else 2]
BS0 = b.size(0)
BS1 = b.size(1)
BS2 = b.size(3 if trans_b else 2)
BS3 = b.size(2 if trans_b else 3)
dtype = a.dtype
# kernel
meta = {
'TM': block,
'TN': 128,
'TK': 16,
'BLOCK': block,
'TZ': 1,
'SDD': False,
'DSD': True,
'DDS': False
}
# output
CS0 = BS0
CS1 = BS1
CS2 = BS3 if trans_c else AS1
CS3 = AS1 if trans_c else BS3
locks = _sparse_matmul.get_locks(2 * BS0 * BS3 // 32 * num_locks, a.device)
c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)
grid = lambda meta: [width, triton.cdiv(BS3, meta['TN']), BS0]
_kernel[grid](a,
b,
c,
a.stride(0),
a.stride(1),
a.stride(3 if trans_a else 2),
a.stride(2 if trans_a else 3),
b.stride(0),
b.stride(1),
b.stride(3 if trans_b else 2),
b.stride(2 if trans_b else 3),
c.stride(0),
c.stride(1),
c.stride(2),
c.stride(3),
BS3,
AS1,
0,
0,
lut,
locks,
num_locks,
num_warps=4,
**meta)
return c
fn = {
'sdd': _sdd_matmul.__get__(object),
'dsd': _dsd_matmul.__get__(object),
'dds': _dds_matmul.__get__(object)
}
@staticmethod
def forward(ctx,
a,
b,
trans_a,
trans_b,
trans_c,
mode,
spdims,
block,
c_lut,
c_num_locks,
c_width,
c_packs,
c_bench,
c_time,
da_lut,
da_num_locks,
da_width,
da_packs,
da_bench,
da_time,
db_lut,
db_num_locks,
db_width,
db_packs,
db_bench,
db_time):
c = _sparse_matmul.fn[mode](a,
b,
trans_a,
trans_b,
trans_c,
spdims,
block,
c_lut,
c_num_locks,
c_width,
c_packs,
c_bench,
c_time)
# save for backward
ctx.save_for_backward(a, b)
ctx.da_num_locks = da_num_locks
ctx.da_lut = da_lut
ctx.da_width = da_width
ctx.da_packs = da_packs
ctx.da_bench = da_bench
ctx.da_time = da_time
ctx.db_lut = db_lut
ctx.db_num_locks = db_num_locks
ctx.db_width = db_width
ctx.db_bench = db_bench
ctx.db_packs = db_packs
ctx.db_time = db_time
ctx.mode = mode
ctx.spdims = spdims
ctx.block = block
ctx.trans_a = trans_a
ctx.trans_b = trans_b
return c
@staticmethod
def backward(ctx, dc):
# saved for backward
a, b = ctx.saved_tensors
mode = ctx.mode
# gradients w.r.t. a
if ctx.needs_input_grad[0]:
mode_da = mode[1] + mode[0] + mode[2]
da = _sparse_matmul.fn[mode_da](dc,
b,
False,
not ctx.trans_b,
ctx.trans_a,
ctx.spdims,
ctx.block,
ctx.da_lut,
ctx.da_num_locks,
ctx.da_width,
ctx.da_packs,
ctx.da_bench,
ctx.da_time)
# gradients w.r.t. b
if ctx.needs_input_grad[1]:
mode_db = mode[2] + mode[1] + mode[0]
db = _sparse_matmul.fn[mode_db](a,
dc,
not ctx.trans_a,
False,
ctx.trans_b,
ctx.spdims,
ctx.block,
ctx.db_lut,
ctx.db_num_locks,
ctx.db_width,
ctx.db_packs,
ctx.db_bench,
ctx.db_time)
return da, db, None, None, None,\
None, None, None, None,\
None, None, None, None, None, None,\
None, None, None, None, None, None,\
None, None, None, None, None, None
class MatMul:
"""Block-Sparse MatMul class; this class handles three types of matrix-multiplication:
- sparse = dense X dense
- dense = sparse X dense
- dense = dense X sparse
For more details about sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
"""
def make_lut(self, dtype, device):
"""Generates the sparsity layout/s used in block-sparse matmul
"""
key = (dtype, device)
if key in self.lut_cache:
return self.lut_cache[key]
# C look-up table
layout, block = self.layout, self.block
step = 16
if self.mode == 'sdd':
c_lut, c_num_locks, c_width, c_packs = _sparse_matmul.make_sdd_lut(layout, block, dtype, device)
elif self.mode == 'dsd':
c_lut, c_num_locks, c_width, c_packs = _sparse_matmul.make_dxx_lut(layout, block, step, not self.trans_a, device)
elif self.mode == 'dds':
c_lut, c_num_locks, c_width, c_packs = _sparse_matmul.make_dxx_lut(layout, block, step, self.trans_b, device)
# DA look-up table
if self.mode == 'sdd':
da_lut, da_num_locks, da_width, da_packs = _sparse_matmul.make_dxx_lut(layout, block, step, True, device)
elif self.mode == 'dsd':
da_lut, da_num_locks, da_width, da_packs = _sparse_matmul.make_sdd_lut(layout, block, dtype, device)
elif self.mode == 'dds':
da_lut, da_num_locks, da_width, da_packs = _sparse_matmul.make_dxx_lut(layout, block, step, not self.trans_b, device)
# DB look-up table
if self.mode == 'sdd':
db_lut, db_num_locks, db_width, db_packs = _sparse_matmul.make_dxx_lut(layout, block, step, False, device)
elif self.mode == 'dsd':
db_lut, db_num_locks, db_width, db_packs = _sparse_matmul.make_dxx_lut(layout, block, step, self.trans_a, device)
elif self.mode == 'dds':
db_lut, db_num_locks, db_width, db_packs = _sparse_matmul.make_sdd_lut(layout, block, dtype, device)
self.lut_cache[key] = (c_lut, c_num_locks, c_width, c_packs,\
da_lut, da_num_locks, da_width, da_packs,\
db_lut, db_num_locks, db_width, db_packs)
return self.lut_cache[key]
def __init__(self, layout, block, mode, trans_a=False, trans_b=False, bench=False):
"""Initialize the Block-Sparse MatMul class.
Arguments:
layout: required: sparsity layout tensor
block: required: an integer determining the block size.
mode: required: a string determining type of matmul; ('sdd') sparse = dense X dense, ('dsd') dense = sparse X dense, ('dds') dense = dense X sparse
trans_a: optional: a boolean determining if multiplication needs to be applied on transpose of input a; default is false
trans_b: optional: a boolean determining if multiplication needs to be applied on transpose of input b; default is false
bench: optional: set if you want to do benchmarking
"""
if mode not in ['sdd', 'dsd', 'dds']:
raise NotImplementedError('Supported modes are: sdd, dsd, dds')
# look-up table cache
self.lut_cache = dict()
# attributes
self.trans_a = trans_a
self.trans_b = trans_b
self.mode = mode
self.block = block
self.layout = layout
layout_dim = layout.ndim
assert layout_dim in (2, 3), "Layout should be a 2 or 3 dimensional tensor of 0s and 1s"
if not mode == 'sdd':
# Dims to be reduced on the 'inside' of the matmul, either -1 or -2
trans_dense, trans_sparse, sparse_inner = (trans_b, trans_a, -1) if mode == 'dsd' else (trans_a, trans_b, -2)
self.dense_inner_dim = -(
(sparse_inner % 2) + 1) if not trans_dense else sparse_inner
sparse_inner = sparse_inner if not trans_sparse else -(
(sparse_inner % 2) + 1)
# Inner dim of the dense input should be equal to the inner dim of the sparse input
self.dense_inner_size = layout.shape[sparse_inner] * block
# Expected shape for sparse inputs
self.sparse_shape = (layout.sum().item(), block, block)
# Support using the same layout across attention heads etc.
if layout_dim == 2:
layout = layout.unsqueeze(0)
layout = layout.long(
) # Above code assumes the layout tensor is an integral type
self.spdims = layout.shape
# timings
self.bench = bench
self.time_c = None
self.time_da = None
self.time_db = None
# pad shapes of a tensor to make it
# compatible with kernel calls
@staticmethod
def _pad_shape(x, is_sparse):
max_dim = 3 if is_sparse else 4
for i in range(max_dim - x.dim()):
x = x.unsqueeze(0)
return x
def __call__(self, a, b):
"""Applies Block-Sparse MatMul.
For more details about sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
Arguments:
a: required: a dense/block-sparse tensor; first input of mat-mul
b: required: a dense/block-sparse tensor; second input of mat-mul
Return:
c: a dense/block-sparse tensor result of a X b
"""
c_lut, c_num_locks, c_width, c_packs,\
da_lut, da_num_locks, da_width, da_packs,\
db_lut, db_num_locks, db_width, db_packs = self.make_lut(a.dtype, a.device)
# timings
time_c = [None]
time_da = [None]
time_db = [None]
original_dims = max(a.ndim, b.ndim)
a, b = self._validate_inputs(a, b)
# pad shapes with ones
a = MatMul._pad_shape(a, self.mode == 'dsd')
b = MatMul._pad_shape(b, self.mode == 'dds')
# execute
c = _sparse_matmul.apply(a,
b,
self.trans_a,
self.trans_b,
False,
self.mode,
self.spdims,
self.block,
c_lut,
c_num_locks,
c_width,
c_packs,
self.bench,
time_c,
da_lut,
da_num_locks,
da_width,
da_packs,
self.bench,
time_da,
db_lut,
db_num_locks,
db_width,
db_packs,
self.bench,
time_db)
# This removes any leading singleton dimensions we may have added to the tensor that weren't in the input
dims_to_trim = c.ndim - original_dims
for _ in range(dims_to_trim):
c = c.squeeze(0)
self.time_c = time_c[0]
self.time_da = time_da[0]
self.time_db = time_db[0]
return c
def _validate_inputs(self, a, b):
if a.device != b.device:
raise ValueError(
f"Inputs must be on the same device; got {a.device} for tensor A "
f"and {b.device} for tensor B")
if not get_accelerator().on_accelerator(a):
raise ValueError("Only GPU devices are supported for now")
# When autocast is enabled, torch.matmul autocasts to float16, so we do the same here
if torch.is_autocast_enabled():
a, b = a.half(), b.half()
elif a.dtype != b.dtype:
raise ValueError(
f"Inputs must be the same dtype; got {a.dtype} for A and {b.dtype} for B"
)
mode, trans_a, trans_b = self.mode, self.trans_a, self.trans_b
if mode != 'sdd':
# One input is sparse
dense, dense_name, sparse, sparse_name = (a, 'A', b, 'B') if mode == 'dds' else (b, 'B', a, 'A')
dense_inner = dense.shape[self.dense_inner_dim]
if dense_inner != self.dense_inner_size:
raise ValueError(
f"Expected tensor {dense_name} to have size {self.dense_inner_size} at dim "
f"{self.dense_inner_dim % dense.ndim}, got {dense_inner}.")
if sparse.shape[-len(self.sparse_shape):] != self.sparse_shape:
raise ValueError(
f"Expected tensor with trailing dimensions of shape {self.sparse_shape} for argument "
f"{sparse_name}, got {sparse.shape}")
def add_extra_dims(x):
# Add extra leading singleton dimensions if needed
dims_needed = 4 - x.ndim
if dims_needed > 0:
singletons = [1] * dims_needed
x = x.view(*singletons, *x.shape)
elif dims_needed < 0:
raise ValueError(
"Tensors with more than 4 dimensions are not currently supported")
return x
# Pad shapes with leading singleton dimensions
a = add_extra_dims(a)
b = add_extra_dims(b)
return a, b
| [] |
2024-01-10 | Texaser/MTN | main.py | import os
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
import torch
import argparse
import pandas as pd
import sys
from nerf.provider import NeRFDataset
from nerf.utils import *
# torch.autograd.set_detect_anomaly(True)
if __name__ == '__main__':
# See https://stackoverflow.com/questions/27433316/how-to-get-argparse-to-read-arguments-from-a-file-with-an-option-rather-than-pre
class LoadFromFile (argparse.Action):
def __call__ (self, parser, namespace, values, option_string = None):
with values as f:
# parse arguments in the file and store them in the target namespace
parser.parse_args(f.read().split(), namespace)
parser = argparse.ArgumentParser()
parser.add_argument('--file', type=open, action=LoadFromFile, help="specify a file filled with more arguments")
parser.add_argument('--text', default=None, help="text prompt")
parser.add_argument('--negative', default='', type=str, help="negative text prompt")
parser.add_argument('-O', action='store_true', help="equals --fp16 --cuda_ray")
parser.add_argument('-O2', action='store_true', help="equals --backbone vanilla")
parser.add_argument('--test', action='store_true', help="test mode")
parser.add_argument('--six_views', action='store_true', help="six_views mode: save the images of the six views")
parser.add_argument('--eval_interval', type=int, default=10, help="evaluate on the valid set every interval epochs")
parser.add_argument('--test_interval', type=int, default=100, help="test on the test set every interval epochs")
parser.add_argument('--workspace', type=str, default='workspace')
parser.add_argument('--seed', default=3407) #3407 42
parser.add_argument('--image', default=None, help="image prompt")
parser.add_argument('--image_config', default=None, help="image config csv")
parser.add_argument('--known_view_interval', type=int, default=4, help="train default view with RGB loss every & iters, only valid if --image is not None.")
parser.add_argument('--IF', action='store_true', help="experimental: use DeepFloyd IF as the guidance model for nerf stage")
parser.add_argument('--guidance', type=str, nargs='*', default=['SD'], help='guidance model')
parser.add_argument('--guidance_scale', type=float, default=100, help="diffusion model classifier-free guidance scale")
parser.add_argument('--save_mesh', action='store_true', help="export an obj mesh with texture")
parser.add_argument('--mcubes_resolution', type=int, default=256, help="mcubes resolution for extracting mesh")
parser.add_argument('--decimate_target', type=int, default=5e4, help="target face number for mesh decimation")
parser.add_argument('--dmtet', action='store_true', help="use dmtet finetuning")
parser.add_argument('--tet_grid_size', type=int, default=128, help="tet grid size")
parser.add_argument('--init_with', type=str, default='', help="ckpt to init dmtet")
parser.add_argument('--lock_geo', action='store_true', help="disable dmtet to learn geometry")
## Perp-Neg options
parser.add_argument('--perpneg', action='store_true', help="use perp_neg")
parser.add_argument('--negative_w', type=float, default=-2, help="The scale of the weights of negative prompts. A larger value will help to avoid the Janus problem, but may cause flat faces. Vary between 0 to -4, depending on the prompt")
parser.add_argument('--front_decay_factor', type=float, default=2, help="decay factor for the front prompt")
parser.add_argument('--side_decay_factor', type=float, default=10, help="decay factor for the side prompt")
### training options
parser.add_argument('--iters', type=int, default=6000, help="training iters")
parser.add_argument('--warm_iters', type=int, default=600, help="training iters")
parser.add_argument('--lr', type=float, default=1e-3, help="max learning rate")
parser.add_argument('--ckpt', type=str, default='latest', help="possible options are ['latest', 'scratch', 'best', 'latest_model']")
parser.add_argument('--cuda_ray', action='store_true', help="use CUDA raymarching instead of pytorch")
parser.add_argument('--taichi_ray', action='store_true', help="use taichi raymarching")
parser.add_argument('--max_steps', type=int, default=1024, help="max num steps sampled per ray (only valid when using --cuda_ray)")
parser.add_argument('--num_steps', type=int, default=64, help="num steps sampled per ray (only valid when not using --cuda_ray)")
parser.add_argument('--upsample_steps', type=int, default=32, help="num steps up-sampled per ray (only valid when not using --cuda_ray)")
parser.add_argument('--update_extra_interval', type=int, default=16, help="iter interval to update extra status (only valid when using --cuda_ray)")
parser.add_argument('--max_ray_batch', type=int, default=4096, help="batch size of rays at inference to avoid OOM (only valid when not using --cuda_ray)")
parser.add_argument('--latent_iter_ratio', type=float, default=0.2, help="training iters that only use albedo shading")
parser.add_argument('--albedo_iter_ratio', type=float, default=0, help="training iters that only use albedo shading")
parser.add_argument('--min_ambient_ratio', type=float, default=0.1, help="minimum ambient ratio to use in lambertian shading")
parser.add_argument('--textureless_ratio', type=float, default=0.2, help="ratio of textureless shading")
parser.add_argument('--jitter_pose', action='store_true', help="add jitters to the randomly sampled camera poses")
parser.add_argument('--jitter_center', type=float, default=0.2, help="amount of jitter to add to sampled camera pose's center (camera location)")
parser.add_argument('--jitter_target', type=float, default=0.2, help="amount of jitter to add to sampled camera pose's target (i.e. 'look-at')")
parser.add_argument('--jitter_up', type=float, default=0.02, help="amount of jitter to add to sampled camera pose's up-axis (i.e. 'camera roll')")
parser.add_argument('--uniform_sphere_rate', type=float, default=0, help="likelihood of sampling camera location uniformly on the sphere surface area")
parser.add_argument('--grad_clip', type=float, default=-1, help="clip grad of all grad to this limit, negative value disables it")
parser.add_argument('--grad_clip_rgb', type=float, default=-1, help="clip grad of rgb space grad to this limit, negative value disables it")
# model options
parser.add_argument('--bg_radius', type=float, default=1.4, help="if positive, use a background model at sphere(bg_radius)") #1.4
parser.add_argument('--density_activation', type=str, default='exp', choices=['softplus', 'exp'], help="density activation function")
parser.add_argument('--density_thresh', type=float, default=10, help="threshold for density grid to be occupied")
parser.add_argument('--blob_density', type=float, default=5, help="max (center) density for the density blob")
parser.add_argument('--blob_radius', type=float, default=0.2, help="control the radius for the density blob")
# network backbone
parser.add_argument('--backbone', type=str, default='grid', choices=['grid_tcnn', 'grid', 'vanilla', 'grid_taichi'], help="nerf backbone")
parser.add_argument('--optim', type=str, default='adan', choices=['adan', 'adam'], help="optimizer")
parser.add_argument('--sd_version', type=str, default='2.1', choices=['1.5', '2.0', '2.1'], help="stable diffusion version")
parser.add_argument('--hf_key', type=str, default=None, help="hugging face Stable diffusion model key")
# try this if CUDA OOM
parser.add_argument('--fp16', action='store_true', help="use float16 for training")
parser.add_argument('--vram_O', default=False, action='store_true', help="optimization for low VRAM usage")
# rendering resolution in training, increase these for better quality / decrease these if CUDA OOM even if --vram_O enabled.
parser.add_argument('--w', type=int, default=64, help="render width for NeRF in training")
parser.add_argument('--h', type=int, default=64, help="render height for NeRF in training")
parser.add_argument('--known_view_scale', type=float, default=1.5, help="multiply --h/w by this for known view rendering")
parser.add_argument('--known_view_noise_scale', type=float, default=2e-3, help="random camera noise added to rays_o and rays_d")
parser.add_argument('--dmtet_reso_scale', type=float, default=8, help="multiply --h/w by this for dmtet finetuning")
parser.add_argument('--batch_size', type=int, default=1, help="images to render per batch using NeRF")
### dataset options
parser.add_argument('--bound', type=float, default=1, help="assume the scene is bounded in box(-bound, bound)")
parser.add_argument('--dt_gamma', type=float, default=0, help="dt_gamma (>=0) for adaptive ray marching. set to 0 to disable, >0 to accelerate rendering (but usually with worse quality)")
parser.add_argument('--min_near', type=float, default=0.01, help="minimum near distance for camera")
parser.add_argument('--radius_range', type=float, nargs='*', default=[3.0, 3.5], help="training camera radius range")
parser.add_argument('--theta_range', type=float, nargs='*', default=[45, 105], help="training camera range along the polar angles (i.e. up and down). See advanced.md for details.")
parser.add_argument('--phi_range', type=float, nargs='*', default=[-180, 180], help="training camera range along the azimuth angles (i.e. left and right). See advanced.md for details.")
parser.add_argument('--fovy_range', type=float, nargs='*', default=[10, 30], help="training camera fovy range")
parser.add_argument('--default_radius', type=float, default=3.2, help="radius for the default view")
parser.add_argument('--default_polar', type=float, default=90, help="polar for the default view")
parser.add_argument('--default_azimuth', type=float, default=0, help="azimuth for the default view")
parser.add_argument('--default_fovy', type=float, default=20, help="fovy for the default view")
parser.add_argument('--progressive_view', default=True, action='store_true', help="progressively expand view sampling range from default to full")
parser.add_argument('--progressive_view_init_ratio', type=float, default=0.2, help="initial ratio of final range, used for progressive_view")
parser.add_argument('--progressive_level', action='store_true', help="progressively increase gridencoder's max_level")
parser.add_argument('--angle_overhead', type=float, default=30, help="[0, angle_overhead] is the overhead region")
parser.add_argument('--angle_front', type=float, default=60, help="[0, angle_front] is the front region, [180, 180+angle_front] the back region, otherwise the side region.")
parser.add_argument('--t_range', type=float, nargs='*', default=[0.02, 0.98], help="stable diffusion time steps range")
parser.add_argument('--dont_override_stuff',action='store_true', help="Don't override t_range, etc.")
### regularizations
parser.add_argument('--lambda_entropy', type=float, default=1e-3, help="loss scale for alpha entropy")
parser.add_argument('--lambda_opacity', type=float, default=0, help="loss scale for alpha value")
parser.add_argument('--lambda_orient', type=float, default=1e-2, help="loss scale for orientation")
parser.add_argument('--lambda_tv', type=float, default=0, help="loss scale for total variation")
parser.add_argument('--lambda_wd', type=float, default=0, help="loss scale")
parser.add_argument('--lambda_mesh_normal', type=float, default=0.5, help="loss scale for mesh normal smoothness")
parser.add_argument('--lambda_mesh_laplacian', type=float, default=0.5, help="loss scale for mesh laplacian")
parser.add_argument('--lambda_guidance', type=float, default=1, help="loss scale for SDS")
parser.add_argument('--lambda_rgb', type=float, default=1000, help="loss scale for RGB")
parser.add_argument('--lambda_mask', type=float, default=500, help="loss scale for mask (alpha)")
parser.add_argument('--lambda_normal', type=float, default=0, help="loss scale for normal map")
parser.add_argument('--lambda_depth', type=float, default=10, help="loss scale for relative depth")
parser.add_argument('--lambda_2d_normal_smooth', type=float, default=0, help="loss scale for 2D normal image smoothness")
parser.add_argument('--lambda_3d_normal_smooth', type=float, default=0, help="loss scale for 3D normal image smoothness")
parser.add_argument('--lambda_grid_tv_reg', type=float, default=1e-7, help="loss scale for grid regularization")
parser.add_argument('--lambda_grid_l2_reg', type=float, default=1e-7, help="loss scale for grid regularization")
### debugging options
parser.add_argument('--save_guidance', action='store_true', help="save images of the per-iteration NeRF renders, added noise, denoised (i.e. guidance), fully-denoised. Useful for debugging, but VERY SLOW and takes lots of memory!")
parser.add_argument('--save_guidance_interval', type=int, default=10, help="save guidance every X step")
### GUI options
parser.add_argument('--gui', action='store_true', help="start a GUI")
parser.add_argument('--W', type=int, default=800, help="GUI width")
parser.add_argument('--H', type=int, default=800, help="GUI height")
parser.add_argument('--radius', type=float, default=5, help="default GUI camera radius from center")
parser.add_argument('--fovy', type=float, default=20, help="default GUI camera fovy")
parser.add_argument('--light_theta', type=float, default=60, help="default GUI light direction in [0, 180], corresponding to elevation [90, -90]")
parser.add_argument('--light_phi', type=float, default=0, help="default GUI light direction in [0, 360), azimuth")
parser.add_argument('--max_spp', type=int, default=1, help="GUI rendering max sample per pixel")
parser.add_argument('--zero123_config', type=str, default='./pretrained/zero123/sd-objaverse-finetune-c_concat-256.yaml', help="config file for zero123")
parser.add_argument('--zero123_ckpt', type=str, default='./pretrained/zero123/105000.ckpt', help="ckpt for zero123")
parser.add_argument('--zero123_grad_scale', type=str, default='angle', help="whether to scale the gradients based on 'angle' or 'None'")
parser.add_argument('--dataset_size_train', type=int, default=100, help="Length of train dataset i.e. # of iterations per epoch")
parser.add_argument('--dataset_size_valid', type=int, default=8, help="# of frames to render in the turntable video in validation")
parser.add_argument('--dataset_size_test', type=int, default=100, help="# of frames to render in the turntable video at test time")
parser.add_argument('--exp_start_iter', type=int, default=None, help="start iter # for experiment, to calculate progressive_view and progressive_level")
parser.add_argument('--exp_end_iter', type=int, default=None, help="end iter # for experiment, to calculate progressive_view and progressive_level")
opt = parser.parse_args()
if opt.O:
opt.fp16 = True
opt.cuda_ray = True
elif opt.O2:
opt.fp16 = True
opt.backbone = 'vanilla'
opt.progressive_level = True
if opt.IF:
if 'SD' in opt.guidance:
opt.guidance.remove('SD')
opt.guidance.append('IF')
opt.latent_iter_ratio = 0 # must not do as_latent
opt.images, opt.ref_radii, opt.ref_polars, opt.ref_azimuths, opt.zero123_ws = [], [], [], [], []
opt.default_zero123_w = 1
opt.exp_start_iter = opt.exp_start_iter or 0
opt.exp_end_iter = opt.exp_end_iter or opt.iters
# parameters for image-conditioned generation
if opt.image is not None or opt.image_config is not None:
if opt.text is None:
# use zero123 guidance model when only providing image
opt.guidance = ['zero123']
if not opt.dont_override_stuff:
opt.fovy_range = [opt.default_fovy, opt.default_fovy] # fix fov as zero123 doesn't support changing fov
opt.guidance_scale = 5
opt.lambda_3d_normal_smooth = 10
else:
# use stable-diffusion when providing both text and image
opt.guidance = ['SD', 'clip']
if not opt.dont_override_stuff:
opt.guidance_scale = 10
opt.t_range = [0.2, 0.6]
opt.known_view_interval = 2
opt.lambda_3d_normal_smooth = 20
opt.bg_radius = -1
# smoothness
opt.lambda_entropy = 1
opt.lambda_orient = 1
# latent warmup is not needed
opt.latent_iter_ratio = 0
if not opt.dont_override_stuff:
opt.albedo_iter_ratio = 0
# make shape init more stable
opt.progressive_view = True
opt.progressive_level = True
if opt.image is not None:
opt.images += [opt.image]
opt.ref_radii += [opt.default_radius]
opt.ref_polars += [opt.default_polar]
opt.ref_azimuths += [opt.default_azimuth]
opt.zero123_ws += [opt.default_zero123_w]
if opt.image_config is not None:
# for multiview (zero123)
conf = pd.read_csv(opt.image_config, skipinitialspace=True)
opt.images += list(conf.image)
opt.ref_radii += list(conf.radius)
opt.ref_polars += list(conf.polar)
opt.ref_azimuths += list(conf.azimuth)
opt.zero123_ws += list(conf.zero123_weight)
if opt.image is None:
opt.default_radius = opt.ref_radii[0]
opt.default_polar = opt.ref_polars[0]
opt.default_azimuth = opt.ref_azimuths[0]
opt.default_zero123_w = opt.zero123_ws[0]
# reset to None
if len(opt.images) == 0:
opt.images = None
# default parameters for finetuning
if opt.dmtet:
opt.h = int(opt.h * opt.dmtet_reso_scale)
opt.w = int(opt.w * opt.dmtet_reso_scale)
opt.known_view_scale = 1
if not opt.dont_override_stuff:
opt.t_range = [0.02, 0.50] # ref: magic3D
if opt.images is not None:
opt.lambda_normal = 0
opt.lambda_depth = 0
if opt.text is not None and not opt.dont_override_stuff:
opt.t_range = [0.20, 0.50]
# assume finetuning
opt.latent_iter_ratio = 0
opt.albedo_iter_ratio = 0
opt.progressive_view = False
# opt.progressive_level = False
# record full range for progressive view expansion
if opt.progressive_view:
if not opt.dont_override_stuff:
# disable as they disturb progressive view
opt.jitter_pose = False
opt.uniform_sphere_rate = 0
# back up full range
opt.full_radius_range = opt.radius_range
opt.full_theta_range = opt.theta_range
opt.full_phi_range = opt.phi_range
opt.full_fovy_range = opt.fovy_range
if opt.backbone == 'vanilla':
from nerf.network import NeRFNetwork
elif opt.backbone == 'grid' or 'multiscale_triplane_pooling':
from nerf.network_grid import NeRFNetwork
elif opt.backbone == 'grid_tcnn':
from nerf.network_grid_tcnn import NeRFNetwork
elif opt.backbone == 'grid_taichi':
opt.cuda_ray = False
opt.taichi_ray = True
import taichi as ti
from nerf.network_grid_taichi import NeRFNetwork
taichi_half2_opt = True
taichi_init_args = {"arch": ti.cuda, "device_memory_GB": 4.0}
if taichi_half2_opt:
taichi_init_args["half2_vectorization"] = True
ti.init(**taichi_init_args)
else:
raise NotImplementedError(f'--backbone {opt.backbone} is not implemented!')
print(opt)
if opt.seed is not None:
seed_everything(int(opt.seed))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = NeRFNetwork(opt).to(device)
if opt.dmtet and opt.init_with != '':
if opt.init_with.endswith('.pth'):
# load pretrained weights to init dmtet
state_dict = torch.load(opt.init_with, map_location=device)
model.load_state_dict(state_dict['model'], strict=False)
if opt.cuda_ray:
model.mean_density = state_dict['mean_density']
model.init_tet()
else:
# assume a mesh to init dmtet (experimental, not working well now!)
import trimesh
mesh = trimesh.load(opt.init_with, force='mesh', skip_material=True, process=False)
model.init_tet(mesh=mesh)
print(model)
if opt.six_views:
guidance = None # no need to load guidance model at test
trainer = Trainer(' '.join(sys.argv), 'df', opt, model, guidance, device=device, workspace=opt.workspace, fp16=opt.fp16, use_checkpoint=opt.ckpt)
test_loader = NeRFDataset(opt, device=device, type='six_views', H=opt.H, W=opt.W, size=6).dataloader(batch_size=1)
trainer.test(test_loader, write_video=False)
if opt.save_mesh:
trainer.save_mesh()
elif opt.test:
guidance = None # no need to load guidance model at test
trainer = Trainer(' '.join(sys.argv), 'df', opt, model, guidance, device=device, workspace=opt.workspace, fp16=opt.fp16, use_checkpoint=opt.ckpt)
if opt.gui:
from nerf.gui import NeRFGUI
gui = NeRFGUI(opt, trainer)
gui.render()
else:
test_loader = NeRFDataset(opt, device=device, type='test', H=opt.H, W=opt.W, size=opt.dataset_size_test).dataloader(batch_size=1)
trainer.test(test_loader)
if opt.save_mesh:
trainer.save_mesh()
else:
train_loader = NeRFDataset(opt, device=device, type='train', H=opt.h, W=opt.w, size=opt.dataset_size_train * opt.batch_size).dataloader()
if opt.optim == 'adan':
from optimizer import Adan
# Adan usually requires a larger LR
optimizer = lambda model: Adan(model.get_params(5 * opt.lr), eps=1e-8, weight_decay=2e-5, max_grad_norm=5.0, foreach=False)
else: # adam
optimizer = lambda model: torch.optim.Adam(model.get_params(opt.lr), betas=(0.9, 0.99), eps=1e-15)
if opt.backbone == 'vanilla':
scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, lambda iter: 0.1 ** min(iter / opt.iters, 1))
else:
scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, lambda iter: 1) # fixed
# scheduler = lambda optimizer: optim.lr_scheduler.LambdaLR(optimizer, lambda iter: 0.1 ** min(iter / opt.iters, 1))
guidance = nn.ModuleDict()
if 'SD' in opt.guidance:
from guidance.sd_utils import StableDiffusion
guidance['SD'] = StableDiffusion(device, opt.fp16, opt.vram_O, opt.sd_version, opt.hf_key, opt.t_range)
if 'IF' in opt.guidance:
from guidance.if_utils import IF
guidance['IF'] = IF(device, opt.vram_O, opt.t_range)
if 'zero123' in opt.guidance:
from guidance.zero123_utils import Zero123
guidance['zero123'] = Zero123(device=device, fp16=opt.fp16, config=opt.zero123_config, ckpt=opt.zero123_ckpt, vram_O=opt.vram_O, t_range=opt.t_range, opt=opt)
if 'clip' in opt.guidance:
from guidance.clip_utils import CLIP
guidance['clip'] = CLIP(device)
trainer = Trainer(' '.join(sys.argv), 'df', opt, model, guidance, device=device, workspace=opt.workspace, optimizer=optimizer, ema_decay=0.95, fp16=opt.fp16, lr_scheduler=scheduler, use_checkpoint=opt.ckpt, scheduler_update_every_step=True)
trainer.default_view_data = train_loader._data.get_default_view_data()
if opt.gui:
from nerf.gui import NeRFGUI
gui = NeRFGUI(opt, trainer, train_loader)
gui.render()
else:
valid_loader = NeRFDataset(opt, device=device, type='val', H=opt.H, W=opt.W, size=opt.dataset_size_valid).dataloader(batch_size=1)
test_loader = NeRFDataset(opt, device=device, type='test', H=opt.H, W=opt.W, size=opt.dataset_size_test).dataloader(batch_size=1)
max_epoch = np.ceil(opt.iters / len(train_loader)).astype(np.int32)
trainer.train(train_loader, valid_loader, test_loader, max_epoch)
if opt.save_mesh:
trainer.save_mesh()
| [] |
2024-01-10 | SAMAD101/Chino | chino.py | #!/usr/bin/python
import typer
import os
import openai
import json
openai.api_key = os.getenv("OPEN_API_KEY")
def get_response(prompt):
"""Get a response from GPT-3"""
prompt_info = {
"prompt": prompt,
}
return json.dumps(prompt_info)
def run_conversation(prompt) -> str:
# Step 1: send the conversation and available functions to GPT
messages = [{"role": "user", "text": prompt}]
functions = [
{
"name": "get_response",
"description": "Get a response from GPT-3",
"parameters": {
"type": "string",
"properties": {
"prompt": {
"type": "string",
"description": "Prompt for generating response by Chino using OpenAI's GPT-3 API",
},
},
},
}
]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=messages,
functions=functions,
function_call="auto", # auto is default, but we'll be explicit
)
response_message = response["choices"][0]["message"]["content"]
# Step 2: check if GPT wanted to call a function
if response_message.get("function_call"):
# Step 3: call the function
# Note: the JSON response may not always be valid; be sure to handle errors
available_functions = {
"get_response": get_response(),
} # only one function in this example, but you can have multiple
function_name = response_message["function_call"]["name"]
function_to_call = available_functions[function_name]
function_args = json.loads(response_message["function_call"]["arguments"])
function_response = function_to_call(
location=function_args.get("response"),
)
# Step 4: send the info on the function call and function response to GPT
messages.append(response_message) # extend conversation with assistant's reply
messages.append(
{
"role": "function",
"name": function_name,
"content": function_response,
}
) # extend conversation with function response
second_response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=messages,
) # get a new response from GPT where it can see the function response
return second_response
def main(prompt: str = typer.Option(None, '-p', '--prompt', help="Prompt for ChatGPT")):
if prompt is not None:
response = run_conversation(prompt)
typer.echo(response)
else:
typer.echo("Chino is Happy!")
if __name__ == "__main__":
typer.run(main)
| [
"{'prompt': PLACEHOLDER}"
] |
2024-01-10 | soumitra9/langchain | langchain~embeddings~bedrock.py | import json
import os
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Extra, root_validator
from langchain.embeddings.base import Embeddings
class BedrockEmbeddings(BaseModel, Embeddings):
"""Embeddings provider to invoke Bedrock embedding models.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Bedrock service.
"""
"""
Example:
.. code-block:: python
from langchain.bedrock_embeddings import BedrockEmbeddings
region_name ="us-east-1"
credentials_profile_name = "default"
model_id = "amazon.titan-e1t-medium"
be = BedrockEmbeddings(
credentials_profile_name=credentials_profile_name,
region_name=region_name,
model_id=model_id
)
"""
client: Any #: :meta private:
region_name: Optional[str] = None
"""The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config in case it is not provided here.
"""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
model_id: str = "amazon.titan-e1t-medium"
"""Id of the model to call, e.g., amazon.titan-e1t-medium, this is
equivalent to the modelId property in the list-foundation-models api"""
model_kwargs: Optional[Dict] = None
"""Key word arguments to pass to the model."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that AWS credentials to and python package exists in environment."""
if "client" in values:
return values
try:
import boto3
if values["credentials_profile_name"] is not None:
session = boto3.Session(profile_name=values["credentials_profile_name"])
else:
# use default credentials
session = boto3.Session()
client_params = {}
if values["region_name"]:
client_params["region_name"] = values["region_name"]
values["client"] = session.client("bedrock", **client_params)
except ImportError:
raise ModuleNotFoundError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
return values
def _embedding_func(self, text: str) -> List[float]:
"""Call out to Bedrock embedding endpoint."""
# replace newlines, which can negatively affect performance.
text = text.replace(os.linesep, " ")
_model_kwargs = self.model_kwargs or {}
input_body = {**_model_kwargs}
input_body["inputText"] = text
body = json.dumps(input_body)
content_type = "application/json"
accepts = "application/json"
embeddings = []
try:
response = self.client.invoke_model(
body=body,
modelId=self.model_id,
accept=accepts,
contentType=content_type,
)
response_body = json.loads(response.get("body").read())
embeddings = response_body.get("embedding")
except Exception as e:
raise ValueError(f"Error raised by inference endpoint: {e}")
return embeddings
def embed_documents(
self, texts: List[str], chunk_size: int = 1
) -> List[List[float]]:
"""Compute doc embeddings using a Bedrock model.
Args:
texts: The list of texts to embed.
chunk_size: Bedrock currently only allows single string
inputs, so chunk size is always 1. This input is here
only for compatibility with the embeddings interface.
Returns:
List of embeddings, one for each text.
"""
results = []
for text in texts:
response = self._embedding_func(text)
results.append(response)
return results
def embed_query(self, text: str) -> List[float]:
"""Compute query embeddings using a Bedrock model.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self._embedding_func(text)
| [] |
2024-01-10 | soumitra9/langchain | langchain~client~models.py | from datetime import datetime
from enum import Enum
from typing import Any, ClassVar, Dict, List, Mapping, Optional, Sequence, Union
from uuid import UUID, uuid4
from pydantic import BaseModel, Field, root_validator
from langchain.callbacks.tracers.schemas import Run, RunTypeEnum
class ExampleBase(BaseModel):
"""Example base model."""
dataset_id: UUID
inputs: Dict[str, Any]
outputs: Optional[Dict[str, Any]] = Field(default=None)
class Config:
frozen = True
class ExampleCreate(ExampleBase):
"""Example create model."""
id: Optional[UUID]
created_at: datetime = Field(default_factory=datetime.utcnow)
class Example(ExampleBase):
"""Example model."""
id: UUID
created_at: datetime
modified_at: Optional[datetime] = Field(default=None)
runs: List[Run] = Field(default_factory=list)
class ExampleUpdate(BaseModel):
"""Update class for Example."""
dataset_id: Optional[UUID] = None
inputs: Optional[Dict[str, Any]] = None
outputs: Optional[Dict[str, Any]] = None
class Config:
frozen = True
class DatasetBase(BaseModel):
"""Dataset base model."""
tenant_id: UUID
name: str
description: Optional[str] = None
class Config:
frozen = True
class DatasetCreate(DatasetBase):
"""Dataset create model."""
id: Optional[UUID]
created_at: datetime = Field(default_factory=datetime.utcnow)
class Dataset(DatasetBase):
"""Dataset ORM model."""
id: UUID
created_at: datetime
modified_at: Optional[datetime] = Field(default=None)
class ListRunsQueryParams(BaseModel):
"""Query params for GET /runs endpoint."""
id: Optional[List[UUID]]
"""Filter runs by id."""
parent_run: Optional[UUID]
"""Filter runs by parent run."""
run_type: Optional[RunTypeEnum]
"""Filter runs by type."""
session: Optional[UUID] = Field(default=None, alias="session_id")
"""Only return runs within a session."""
reference_example: Optional[UUID]
"""Only return runs that reference the specified dataset example."""
execution_order: Optional[int]
"""Filter runs by execution order."""
error: Optional[bool]
"""Whether to return only runs that errored."""
offset: Optional[int]
"""The offset of the first run to return."""
limit: Optional[int]
"""The maximum number of runs to return."""
start_time: Optional[datetime] = Field(
default=None,
alias="start_before",
description="Query Runs that started <= this time",
)
end_time: Optional[datetime] = Field(
default=None,
alias="end_after",
description="Query Runs that ended >= this time",
)
class Config:
extra = "forbid"
frozen = True
@root_validator(allow_reuse=True)
def validate_time_range(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate that start_time <= end_time."""
start_time = values.get("start_time")
end_time = values.get("end_time")
if start_time and end_time and start_time > end_time:
raise ValueError("start_time must be <= end_time")
return values
class FeedbackSourceBase(BaseModel):
type: ClassVar[str]
metadata: Optional[Dict[str, Any]] = None
class Config:
frozen = True
class APIFeedbackSource(FeedbackSourceBase):
"""API feedback source."""
type: ClassVar[str] = "api"
class ModelFeedbackSource(FeedbackSourceBase):
"""Model feedback source."""
type: ClassVar[str] = "model"
class FeedbackSourceType(Enum):
"""Feedback source type."""
API = "api"
"""General feedback submitted from the API."""
MODEL = "model"
"""Model-assisted feedback."""
class FeedbackBase(BaseModel):
"""Feedback schema."""
created_at: datetime = Field(default_factory=datetime.utcnow)
"""The time the feedback was created."""
modified_at: datetime = Field(default_factory=datetime.utcnow)
"""The time the feedback was last modified."""
run_id: UUID
"""The associated run ID this feedback is logged for."""
key: str
"""The metric name, tag, or aspect to provide feedback on."""
score: Union[float, int, bool, None] = None
"""Value or score to assign the run."""
value: Union[float, int, bool, str, dict, None] = None
"""The display value, tag or other value for the feedback if not a metric."""
comment: Optional[str] = None
"""Comment or explanation for the feedback."""
correction: Union[str, dict, None] = None
"""Correction for the run."""
feedback_source: Optional[
Union[APIFeedbackSource, ModelFeedbackSource, Mapping[str, Any]]
] = None
"""The source of the feedback."""
class Config:
frozen = True
class FeedbackCreate(FeedbackBase):
"""Schema used for creating feedback."""
id: UUID = Field(default_factory=uuid4)
feedback_source: APIFeedbackSource
"""The source of the feedback."""
class Feedback(FeedbackBase):
"""Schema for getting feedback."""
id: UUID
feedback_source: Optional[Dict] = None
"""The source of the feedback. In this case"""
class ListFeedbackQueryParams(BaseModel):
"""Query Params for listing feedbacks."""
run: Optional[Sequence[UUID]] = None
limit: int = 100
offset: int = 0
class Config:
"""Config for query params."""
extra = "forbid"
frozen = True
| [] |
2024-01-10 | soumitra9/langchain | langchain~llms~bedrock.py | import json
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
class LLMInputOutputAdapter:
"""Adapter class to prepare the inputs from Langchain to a format
that LLM model expects. Also, provides helper function to extract
the generated text from the model response."""
@classmethod
def prepare_input(
cls, provider: str, prompt: str, model_kwargs: Dict[str, Any]
) -> Dict[str, Any]:
input_body = {**model_kwargs}
if provider == "anthropic" or provider == "ai21":
input_body["prompt"] = prompt
else:
input_body["inputText"] = prompt
if provider == "anthropic" and "max_tokens_to_sample" not in input_body:
input_body["max_tokens_to_sample"] = 50
return input_body
@classmethod
def prepare_output(cls, provider: str, response: Any) -> str:
if provider == "anthropic":
response_body = json.loads(response.get("body").read().decode())
return response_body.get("completion")
else:
response_body = json.loads(response.get("body").read())
if provider == "ai21":
return response_body.get("completions")[0].get("data").get("text")
else:
return response_body.get("results")[0].get("outputText")
class Bedrock(LLM):
"""LLM provider to invoke Bedrock models.
To authenticate, the AWS client uses the following methods to
automatically load credentials:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
If a specific credential profile should be used, you must pass
the name of the profile from the ~/.aws/credentials file that is to be used.
Make sure the credentials / roles used have the required policies to
access the Bedrock service.
"""
"""
Example:
.. code-block:: python
from bedrock_langchain.bedrock_llm import BedrockLLM
llm = BedrockLLM(
credentials_profile_name="default",
model_id="amazon.titan-tg1-large"
)
"""
client: Any #: :meta private:
region_name: Optional[str] = None
"""The aws region e.g., `us-west-2`. Fallsback to AWS_DEFAULT_REGION env variable
or region specified in ~/.aws/config in case it is not provided here.
"""
credentials_profile_name: Optional[str] = None
"""The name of the profile in the ~/.aws/credentials or ~/.aws/config files, which
has either access keys or role information specified.
If not specified, the default credential profile or, if on an EC2 instance,
credentials from IMDS will be used.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
"""
model_id: str
"""Id of the model to call, e.g., amazon.titan-tg1-large, this is
equivalent to the modelId property in the list-foundation-models api"""
model_kwargs: Optional[Dict] = None
"""Key word arguments to pass to the model."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that AWS credentials to and python package exists in environment."""
# Skip creating new client if passed in constructor
if "client" in values:
return values
try:
import boto3
if values["credentials_profile_name"] is not None:
session = boto3.Session(profile_name=values["credentials_profile_name"])
else:
# use default credentials
session = boto3.Session()
client_params = {}
if values["region_name"]:
client_params["region_name"] = values["region_name"]
values["client"] = session.client("bedrock", **client_params)
except ImportError:
raise ModuleNotFoundError(
"Could not import boto3 python package. "
"Please install it with `pip install boto3`."
)
except Exception as e:
raise ValueError(
"Could not load credentials to authenticate with AWS client. "
"Please check that credentials in the specified "
"profile name are valid."
) from e
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
_model_kwargs = self.model_kwargs or {}
return {
**{"model_kwargs": _model_kwargs},
}
@property
def _llm_type(self) -> str:
"""Return type of llm."""
return "amazon_bedrock"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Call out to Bedrock service model.
Args:
prompt: The prompt to pass into the model.
stop: Optional list of stop words to use when generating.
Returns:
The string generated by the model.
Example:
.. code-block:: python
response = se("Tell me a joke.")
"""
_model_kwargs = self.model_kwargs or {}
provider = self.model_id.split(".")[0]
input_body = LLMInputOutputAdapter.prepare_input(
provider, prompt, _model_kwargs
)
body = json.dumps(input_body)
accept = "application/json"
contentType = "application/json"
try:
response = self.client.invoke_model(
body=body, modelId=self.model_id, accept=accept, contentType=contentType
)
text = LLMInputOutputAdapter.prepare_output(provider, response)
except Exception as e:
raise ValueError(f"Error raised by bedrock service: {e}")
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | soumitra9/langchain | langchain~llms~gpt4all.py | """Wrapper for the GPT4All model."""
from functools import partial
from typing import Any, Dict, List, Mapping, Optional, Set
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
class GPT4All(LLM):
r"""Wrapper around GPT4All language models.
To use, you should have the ``gpt4all`` python package installed, the
pre-trained model file, and the model's config information.
Example:
.. code-block:: python
from langchain.llms import GPT4All
model = GPT4All(model="./models/gpt4all-model.bin", n_ctx=512, n_threads=8)
# Simplest invocation
response = model("Once upon a time, ")
"""
model: str
"""Path to the pre-trained GPT4All model file."""
backend: Optional[str] = Field(None, alias="backend")
n_ctx: int = Field(512, alias="n_ctx")
"""Token context window."""
n_parts: int = Field(-1, alias="n_parts")
"""Number of parts to split the model into.
If -1, the number of parts is automatically determined."""
seed: int = Field(0, alias="seed")
"""Seed. If -1, a random seed is used."""
f16_kv: bool = Field(False, alias="f16_kv")
"""Use half-precision for key/value cache."""
logits_all: bool = Field(False, alias="logits_all")
"""Return logits for all tokens, not just the last token."""
vocab_only: bool = Field(False, alias="vocab_only")
"""Only load the vocabulary, no weights."""
use_mlock: bool = Field(False, alias="use_mlock")
"""Force system to keep model in RAM."""
embedding: bool = Field(False, alias="embedding")
"""Use embedding mode only."""
n_threads: Optional[int] = Field(4, alias="n_threads")
"""Number of threads to use."""
n_predict: Optional[int] = 256
"""The maximum number of tokens to generate."""
temp: Optional[float] = 0.8
"""The temperature to use for sampling."""
top_p: Optional[float] = 0.95
"""The top-p value to use for sampling."""
top_k: Optional[int] = 40
"""The top-k value to use for sampling."""
echo: Optional[bool] = False
"""Whether to echo the prompt."""
stop: Optional[List[str]] = []
"""A list of strings to stop generation when encountered."""
repeat_last_n: Optional[int] = 64
"Last n tokens to penalize"
repeat_penalty: Optional[float] = 1.3
"""The penalty to apply to repeated tokens."""
n_batch: int = Field(1, alias="n_batch")
"""Batch size for prompt processing."""
streaming: bool = False
"""Whether to stream the results or not."""
context_erase: float = 0.5
"""Leave (n_ctx * context_erase) tokens
starting from beginning if the context has run out."""
allow_download: bool = False
"""If model does not exist in ~/.cache/gpt4all/, download it."""
client: Any = None #: :meta private:
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
@staticmethod
def _model_param_names() -> Set[str]:
return {
"n_ctx",
"n_predict",
"top_k",
"top_p",
"temp",
"n_batch",
"repeat_penalty",
"repeat_last_n",
"context_erase",
}
def _default_params(self) -> Dict[str, Any]:
return {
"n_ctx": self.n_ctx,
"n_predict": self.n_predict,
"top_k": self.top_k,
"top_p": self.top_p,
"temp": self.temp,
"n_batch": self.n_batch,
"repeat_penalty": self.repeat_penalty,
"repeat_last_n": self.repeat_last_n,
"context_erase": self.context_erase,
}
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in the environment."""
try:
from gpt4all import GPT4All as GPT4AllModel
except ImportError:
raise ImportError(
"Could not import gpt4all python package. "
"Please install it with `pip install gpt4all`."
)
full_path = values["model"]
model_path, delimiter, model_name = full_path.rpartition("/")
model_path += delimiter
values["client"] = GPT4AllModel(
model_name,
model_path=model_path or None,
model_type=values["backend"],
allow_download=values["allow_download"],
)
if values["n_threads"] is not None:
# set n_threads
values["client"].model.set_thread_count(values["n_threads"])
values["backend"] = values["client"].model.model_type
return values
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {
"model": self.model,
**self._default_params(),
**{
k: v for k, v in self.__dict__.items() if k in self._model_param_names()
},
}
@property
def _llm_type(self) -> str:
"""Return the type of llm."""
return "gpt4all"
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
r"""Call out to GPT4All's generate method.
Args:
prompt: The prompt to pass into the model.
stop: A list of strings to stop generation when encountered.
Returns:
The string generated by the model.
Example:
.. code-block:: python
prompt = "Once upon a time, "
response = model(prompt, n_predict=55)
"""
text_callback = None
if run_manager:
text_callback = partial(run_manager.on_llm_new_token, verbose=self.verbose)
text = ""
for token in self.client.generate(prompt, **self._default_params()):
if text_callback:
text_callback(token)
text += token
if stop is not None:
text = enforce_stop_tokens(text, stop)
return text
| [] |
2024-01-10 | soumitra9/langchain | langchain~output_parsers~combining.py | from __future__ import annotations
from typing import Any, Dict, List
from pydantic import root_validator
from langchain.schema import BaseOutputParser
class CombiningOutputParser(BaseOutputParser):
"""Class to combine multiple output parsers into one."""
parsers: List[BaseOutputParser]
@root_validator()
def validate_parsers(cls, values: Dict[str, Any]) -> Dict[str, Any]:
"""Validate the parsers."""
parsers = values["parsers"]
if len(parsers) < 2:
raise ValueError("Must have at least two parsers")
for parser in parsers:
if parser._type == "combining":
raise ValueError("Cannot nest combining parsers")
if parser._type == "list":
raise ValueError("Cannot comine list parsers")
return values
@property
def _type(self) -> str:
"""Return the type key."""
return "combining"
def get_format_instructions(self) -> str:
"""Instructions on how the LLM output should be formatted."""
initial = f"For your first output: {self.parsers[0].get_format_instructions()}"
subsequent = "\n".join(
[
f"Complete that output fully. Then produce another output, separated by two newline characters: {p.get_format_instructions()}" # noqa: E501
for p in self.parsers[1:]
]
)
return f"{initial}\n{subsequent}"
def parse(self, text: str) -> Dict[str, Any]:
"""Parse the output of an LLM call."""
texts = text.split("\n\n")
output = dict()
for txt, parser in zip(texts, self.parsers):
output.update(parser.parse(txt.strip()))
return output
| [] |
2024-01-10 | elenajp/twitter_bot | bot~twitterbot.py | #!/usr/bin/env python3
import os
import openai
import tweepy
from keys import keys # Don't need this line in AWS Lambda
from tweepy import Cursor
API_KEY = keys["API_KEY"]
API_SECRET_KEY = keys["API_SECRET_KEY"]
ACCESS_TOKEN = keys["ACCESS_TOKEN"]
ACCESS_TOKEN_SECRET = keys["ACCESS_TOKEN_SECRET"]
openai.api_key = keys["OPENAPI_SECRET_KEY"]
auth = tweepy.OAuthHandler(API_KEY, API_SECRET_KEY)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
def retweet_comment_and_like():
"""Retweets Twitter posts containing a specific hashtag, likes the tweet and comments on it too"""
hashtags = "#saveoursharks OR #sharkawareness OR #sharklover OR #savesharks OR #sharkdiving OR #ilovesharks OR #protectsharks"
# Searches for tweets with a certain hashtag, language and the full text (extended) of the tweet is returned
for tweet in Cursor(
api.search_tweets, q=hashtags, lang="en", tweet_mode="extended"
).items(2):
try:
# Checks if the tweet has not already been retweeted, then if not, retweets it
if not tweet.retweeted:
api.retweet(tweet.id)
tweet.favorite()
status = api.get_status(tweet.id, tweet_mode="extended")
screen_name = status.user.screen_name
message = f"@{screen_name} Great tweet! I really enjoyed it."
api.update_status(message, in_reply_to_status_id=tweet.id)
print("Retweeted tweet: " + tweet.full_text)
except Exception as error:
print("Error: " + str(error))
def reply_to_mentions():
"""Replies to mentions with a random shark fact mentioned by chatGPT"""
# Get the latest mention
mentions = api.mentions_timeline()
latest_mention = mentions[0]
# Use OpenAI to generate a reply to the latest mention
model_engine = "text-davinci-002"
# prompt = "Reply to @" + latest_mention.user.screen_name + ": " + latest_mention.text
prompt = "Mention a shark fact"
# Load the ids of replied tweets from a file
replied_tweet_ids = set()
if os.path.exists("ids.txt"):
with open("replied_tweet_ids.txt", "r") as f:
for line in f:
replied_tweet_ids.add(int(line.strip()))
if latest_mention.id not in replied_tweet_ids:
try:
completion = openai.Completion.create(
engine=model_engine,
prompt=prompt,
max_tokens=160,
n=1,
stop=None,
temperature=0.5,
)
reply = completion.choices[0].text
reply = f"Thanks @{latest_mention.user.screen_name}, let me throw a shark fact at ya: {reply}"
api.create_favorite(latest_mention.id)
# Post the reply
api.update_status(status=reply, in_reply_to_status_id=latest_mention.id)
print("Successfully replied with:", reply)
# Add the tweet id to the set of replied tweet ids
replied_tweet_ids.add(latest_mention.id)
except Exception as e:
print("Error:", e)
retweet_comment_and_like()
reply_to_mentions()
| [
"Mention a shark fact"
] |
2024-01-10 | 2xic/optimization-playground | notes~infrastructure~tools~langchain~pdf-summary~keypoints.py | import textract
from dotenv import load_dotenv
load_dotenv()
from langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate
from langchain.memory import ConversationBufferWindowMemory
from langchain.chains.conversation.memory import ConversationSummaryMemory
import json
class KeyIdeas:
def __init__(self) -> None:
template = """
You are given some machine learning papers, you should read them CAREFULLY and give out the key ideas.
You will interlay create a summary since I give you parts of the document in batches.
Create the summary with bullet points!
Previous summary: {history}
New text batch: {text}
New summary:
"""
prompt = PromptTemplate(
input_variables=['text', 'history'],
template=template
)
llm = OpenAI(temperature=0)
self.chatgpt_chain = LLMChain(
llm=llm,
prompt=prompt,
verbose=True,
memory=ConversationSummaryMemory(llm=llm),
)
def summary(self, text):
output = self.chatgpt_chain.predict(
text=text
)
return output
if __name__ == "__main__":
text = textract.process('./1801.06146.pdf')
batch_size = 3700
model = KeyIdeas()
for i in range(0, len(text), batch_size):
batch = text[i:i+batch_size]
if not len(batch):
break
response = model.summary(
batch.decode('utf-8').replace("\n", " ").strip()
)
print(response)
| [
"\n You are given some machine learning papers, you should read them CAREFULLY and give out the key ideas.\n \n You will interlay create a summary since I give you parts of the document in batches.\n\n Create the summary with bullet points!\n\n Previous summary: {history}\n\n New text batch: {text}\n\n New summary: \n "
] |
2024-01-10 | 2xic/optimization-playground | notes~infrastructure~tools~langchain~youtube-summary~get_transcript.py | from dotenv import load_dotenv
load_dotenv()
import openai
import json
from pydub import AudioSegment
def split_audio(sizes=10):
sound = AudioSegment.from_mp3("output.mp3")
point_split = len(sound) // sizes
for i in range(sizes):
sound_segment = sound[point_split*i : point_split * (i + 1)]
sound_segment.export("temp.mp3", format="mp3")
yield "temp.mp3"
def get_transcript():
for index, i in enumerate(split_audio()):
print(f"index == {index}")
audio_file = open(i, "rb")
transcript = openai.Audio.transcribe("whisper-1", audio_file)
with open(f"transcript_{index}.json", "w") as file:
file.write(json.dumps(transcript))
if __name__ == "__main__":
get_transcript()
| [] |
2024-01-10 | 2xic/optimization-playground | notes~infrastructure~tools~langchain~per-arne~reason.py | """
Per Arne need to reason some, we use langchain as a layer between openai
"""
from langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate
from langchain.memory import ConversationBufferWindowMemory
class Reason:
def __init__(self) -> None:
template = """
Du er en saksbehandler. Du skal hjelpe til så godt du kan, og være snill.
Du snakker i telefonen, og dialogen er flytende. Du må derfor skrive kort og ryddig.
Du er Per Arne, og alt du skriver bør skrives som "Per Arne:"
Her er det innringer har sagt:
{history}
Innringer: {context}
Per Arne:
"""
prompt = PromptTemplate(
input_variables=['context', 'history'],
template=template
)
# prompt.format(context=context)
self.chatgpt_chain = LLMChain(
llm=OpenAI(temperature=0),
prompt=prompt,
verbose=True,
memory=ConversationBufferWindowMemory(k=2),
)
def predict(self, call_info):
output = self.chatgpt_chain.predict(
context=call_info
)
return output
| [
"\n Du er en saksbehandler. Du skal hjelpe til så godt du kan, og være snill.\n\n Du snakker i telefonen, og dialogen er flytende. Du må derfor skrive kort og ryddig.\n\n Du er Per Arne, og alt du skriver bør skrives som \"Per Arne:\"\n\n Her er det innringer har sagt: \n {history}\n\n Innringer: {context}\n\n Per Arne: \n ",
"context",
"Per Arne:"
] |
2024-01-10 | 2xic/optimization-playground | notes~infrastructure~tools~langchain~youtube-summary~get_summary.py | from dotenv import load_dotenv
load_dotenv()
from langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate
from langchain.memory import ConversationBufferWindowMemory
from langchain.chains.conversation.memory import ConversationSummaryMemory
import json
class ProcessTranscript:
def __init__(self) -> None:
template = """
You should give a good summary of the transcript.
There will be multiple transcripts of the same video sent to you.
{history}
Human: {text}
AI:
"""
prompt = PromptTemplate(
input_variables=['text', 'history'],
template=template
)
llm = OpenAI(temperature=0)
self.chatgpt_chain = LLMChain(
llm=llm,
prompt=prompt,
verbose=True,
memory=ConversationSummaryMemory(llm=llm),
)
def summary(self, text):
output = self.chatgpt_chain.predict(
text=text
)
return output
if __name__ == "__main__":
model = ProcessTranscript()
output = None
for i in range(10):
with open(f"data/transcript_{i}.json", "r") as file:
output = model.summary(
json.loads(file.read())["text"]
)
with open(f"data/output_{i}.txt", "w") as file:
file.write(output)
| [
"\n You should give a good summary of the transcript. \n There will be multiple transcripts of the same video sent to you.\n \n {history}\n\n Human: {text}\n\n AI: \n "
] |
2024-01-10 | 2xic/optimization-playground | notes~infrastructure~tools~langchain~per-arne~whisper.py | """
Per Arne needs to hear, and we use open-ai whisper model for that :)
"""
from dotenv import load_dotenv
load_dotenv()
import os
import cache
import openai
class Whisper:
def __init__(self) -> None:
self.root = os.path.dirname(
os.path.abspath(__file__)
)
def get_transcript(self, name):
path = os.path.join(
self.root,
"record.wav"
)
audio_file = open(path, "rb")
with cache.Cache(audio_file) as ref:
if ref.cached:
print("Cache :=)")
return ref.cached
else:
transcript = openai.Audio.transcribe("whisper-1", audio_file)
return ref.cache(
transcript["text"]
)
if __name__ == "__main__":
print(
Whisper().get_transcript("record.wav")
)
| [] |
2024-01-10 | lupantech/MathVista | evaluation~extract_answer.py | import os
import re
import time
import argparse
from tqdm import tqdm
import sys
sys.path.append('../')
from utilities import *
# OpenAI
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
# print(openai.api_key)
# load demo prompt
from prompts.ext_ans import demo_prompt
def verify_extraction(extraction):
extraction = extraction.strip()
if extraction == "" or extraction == None:
return False
return True
def create_test_prompt(demo_prompt, query, response):
demo_prompt = demo_prompt.strip()
test_prompt = f"{query}\n\n{response}"
full_prompt = f"{demo_prompt}\n\n{test_prompt}\n\nExtracted answer: "
return full_prompt
def extract_answer(response, problem, quick_extract=False):
question_type = problem['question_type']
answer_type = problem['answer_type']
choices = problem['choices']
query = problem['query']
if response == "":
return ""
if question_type == 'multi_choice' and response in choices:
return response
if answer_type == "integer":
try:
extraction = int(response)
return str(extraction)
except:
pass
if answer_type == "float":
try:
extraction = str(float(response))
return extraction
except:
pass
# quick extraction
if quick_extract:
print("Quickly extracting answer...")
# The answer is "text". -> "text"
try:
result = re.search(r'The answer is "(.*)"\.', response)
if result:
extraction = result.group(1)
return extraction
except:
pass
# general extraction
try:
full_prompt = create_test_prompt(demo_prompt, query, response)
extraction = get_chat_response(full_prompt, openai.api_key)
return extraction
except Exception as e:
print(e)
print(f"Error in extracting answer for {pid}")
return ""
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# input
parser.add_argument('--output_dir', type=str, default='../results')
parser.add_argument('--output_file', type=str, default='answer.json')
parser.add_argument('--response_label', type=str, default='response', help='response label for the input file')
# model
parser.add_argument('--llm_engine', type=str, default='gpt-4-0613', help='llm engine',
choices = ['gpt-3.5-turbo', 'gpt-3.5', 'gpt-4', 'gpt-4-0314', 'gpt-4-0613'])
parser.add_argument('--number', type=int, default=-1, help='number of problems to run')
parser.add_argument('--quick_extract', action='store_true', help='use rules to extract answer for some problems')
parser.add_argument('--rerun', action='store_true', help='rerun the answer extraction')
# output
parser.add_argument('--save_every', type=int, default=10, help='save every n problems')
parser.add_argument('--output_label', type=str, default='', help='label for the output file')
args = parser.parse_args()
# args
label = args.response_label
result_file = os.path.join(args.output_dir, args.output_file)
if args.output_label != '':
output_file = result_file.replace('.json', f'_{args.output_label}.json')
else:
output_file = result_file
# read results
print(f"Reading {result_file}...")
results = read_json(result_file)
# full pids
full_pids = list(results.keys())
if args.number > 0:
full_pids = full_pids[:min(args.number, len(full_pids))]
print("Number of testing problems:", len(full_pids))
# test pids
if args.rerun:
test_pids = full_pids
else:
test_pids = []
for pid in full_pids:
# print(pid)
if 'extraction' not in results[pid] or not verify_extraction(results[pid]['extraction']):
test_pids.append(pid)
test_num = len(test_pids)
print("Number of problems to run:", test_num)
# print(test_pids)
# tqdm, enumerate results
for i, pid in enumerate(tqdm(test_pids)):
problem = results[pid]
assert label in problem
response = problem[label]
extraction = extract_answer(response, problem, args.quick_extract)
results[pid]['extraction'] = extraction
if i % args.save_every == 0 or i == test_num - 1:
print(f"Saving results to {output_file}...")
save_json(results, output_file)
print(f"Results saved.")
| [
"PLACEHOLDER\n\nPLACEHOLDER\n\nPLACEHOLDER\n\nExtracted answer: ",
"PLACEHOLDER\n\nPLACEHOLDER"
] |
2024-01-10 | lupantech/MathVista | utilities.py | import os
import cv2
import json
import time
import pickle
import openai
import re
from word2number import w2n
def create_dir(output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
def read_csv(file):
data = []
with open(file, 'r') as f:
for line in f:
data.append(line.strip())
return data
def read_pandas_csv(csv_path):
# read a pandas csv sheet
import pandas as pd
df = pd.read_csv(csv_path)
return df
def read_json(path):
with open(path, 'r', encoding='utf-8') as f:
return json.load(f)
def read_jsonl(file):
with open(file, 'r') as f:
data = [json.loads(line) for line in f]
return data
def read_pickle(path):
with open(path, 'rb') as f:
return pickle.load(f)
def save_json(data, path):
with open(path, 'w') as f:
json.dump(data, f, indent=4)
def save_array_img(path, image):
cv2.imwrite(path, image)
def contains_digit(text):
# check if text contains a digit
if any(char.isdigit() for char in text):
return True
return False
def contains_number_word(text):
# check if text contains a number word
ignore_words = ["a", "an", "point"]
words = re.findall(r'\b\w+\b', text) # This regex pattern matches any word in the text
for word in words:
if word in ignore_words:
continue
try:
w2n.word_to_num(word)
return True # If the word can be converted to a number, return True
except ValueError:
continue # If the word can't be converted to a number, continue with the next word
# check if text contains a digit
if any(char.isdigit() for char in text):
return True
return False # If none of the words could be converted to a number, return False
def contains_quantity_word(text, special_keep_words=[]):
# check if text contains a quantity word
quantity_words = ["most", "least", "fewest"
"more", "less", "fewer",
"largest", "smallest", "greatest",
"larger", "smaller", "greater",
"highest", "lowest", "higher", "lower",
"increase", "decrease",
"minimum", "maximum", "max", "min",
"mean", "average", "median",
"total", "sum", "add", "subtract",
"difference", "quotient", "gap",
"half", "double", "twice", "triple",
"square", "cube", "root",
"approximate", "approximation",
"triangle", "rectangle", "circle", "square", "cube", "sphere", "cylinder", "cone", "pyramid",
"multiply", "divide",
"percentage", "percent", "ratio", "proportion", "fraction", "rate",
]
quantity_words += special_keep_words # dataset specific words
words = re.findall(r'\b\w+\b', text) # This regex pattern matches any word in the text
if any(word in quantity_words for word in words):
return True
return False # If none of the words could be converted to a number, return False
def is_bool_word(text):
if text in ["Yes", "No", "True", "False",
"yes", "no", "true", "false",
"YES", "NO", "TRUE", "FALSE"]:
return True
return False
def is_digit_string(text):
# remove ".0000"
text = text.strip()
text = re.sub(r'\.0+$', '', text)
try:
int(text)
return True
except ValueError:
return False
def is_float_string(text):
# text is a float string if it contains a "." and can be converted to a float
if "." in text:
try:
float(text)
return True
except ValueError:
return False
return False
def copy_image(image_path, output_image_path):
from shutil import copyfile
copyfile(image_path, output_image_path)
def copy_dir(src_dir, dst_dir):
from shutil import copytree
# copy the source directory to the target directory
copytree(src_dir, dst_dir)
import PIL.Image as Image
def get_image_size(img_path):
img = Image.open(img_path)
width, height = img.size
return width, height
def get_chat_response(promot, api_key, model="gpt-3.5-turbo", temperature=0, max_tokens=256, n=1, patience=10000000,
sleep_time=0):
messages = [
{"role": "user", "content": promot},
]
# print("I am here")
while patience > 0:
patience -= 1
try:
response = openai.ChatCompletion.create(model=model,
messages=messages,
api_key=api_key,
temperature=temperature,
max_tokens=max_tokens,
n=n)
if n == 1:
prediction = response['choices'][0]['message']['content'].strip()
if prediction != "" and prediction != None:
return prediction
else:
prediction = [choice['message']['content'].strip() for choice in response['choices']]
if prediction[0] != "" and prediction[0] != None:
return prediction
except Exception as e:
if "Rate limit" not in str(e):
print(e)
if "Please reduce the length of the messages" in str(e):
print("!!Reduce promot size")
# reduce input prompt and keep the tail
new_size = int(len(promot) * 0.9)
new_start = len(promot) - new_size
promot = promot[new_start:]
messages = [
{"role": "user", "content": promot},
]
if sleep_time > 0:
time.sleep(sleep_time)
return ""
| [] |
2024-01-10 | lupantech/MathVista | models~claude.py |
import time
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
# build claude class
class Claude_Model():
def __init__(self, model="claude-2", api_key="", temperature=0, max_tokens=1024, n=1, patience=1000, sleep_time=0):
self.model = model
self.api_key = api_key
self.temperature = temperature
self.max_tokens = max_tokens
self.n = n
self.patience = patience
self.sleep_time = sleep_time
def get_response(self, image_path, user_prompt):
patience = self.patience
while patience > 0:
patience -= 1
try:
# configure the default for all requests:
anthropic = Anthropic(
max_retries=0,
api_key=self.api_key,
)
# update prompt
if "python" in user_prompt:
_HUMAN_PROMPT = HUMAN_PROMPT + "Generate the runnable python code only."
else:
_HUMAN_PROMPT = HUMAN_PROMPT
# configure per-request options
completion = anthropic.with_options(max_retries=5).completions.create(
prompt=f"{_HUMAN_PROMPT} {user_prompt}{AI_PROMPT}",
max_tokens_to_sample=self.max_tokens,
model=self.model,
)
# inference
prediction = completion.completion.strip()
if "python" in user_prompt:
prediction = prediction.replace("```python", "").replace("```", "").strip()
if prediction != "" and prediction != None:
return prediction
except Exception as e:
if "limit" not in str(e):
print(e)
if self.sleep_time > 0:
time.sleep(self.sleep_time)
return ""
| [
"PLACEHOLDERGenerate the runnable python code only.",
"PLACEHOLDER PLACEHOLDERPLACEHOLDER"
] |
2024-01-10 | whwu95/Cap4Video | modules~modeling_tv_titles_video.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import torch
from torch import nn
import torch.nn.functional as F
from modules.until_module import PreTrainedModel, AllGather, CrossEn, cosface
from modules.module_cross import CrossModel, CrossConfig, Transformer as TransformerClip
from modules.module_clip import CLIP, convert_weights
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from modules.co_attention_transformer_module import Co_attention_block
logger = logging.getLogger(__name__)
allgather = AllGather.apply
class CLIP4ClipPreTrainedModel(PreTrainedModel, nn.Module):
def __init__(self, cross_config, *inputs, **kwargs):
super(CLIP4ClipPreTrainedModel, self).__init__(cross_config)
self.cross_config = cross_config
self.clip = None
self.cross = None
@classmethod
def from_pretrained(cls, cross_model_name, state_dict=None, cache_dir=None, type_vocab_size=2, *inputs, **kwargs):
task_config = None
if "task_config" in kwargs.keys():
task_config = kwargs["task_config"]
if not hasattr(task_config, "local_rank"):
task_config.__dict__["local_rank"] = 0
elif task_config.local_rank == -1:
task_config.local_rank = 0
if state_dict is None: state_dict = {}
if hasattr(task_config, 'pretrained_clip_name'):
pretrained_clip_name = task_config.pretrained_clip_name
clip_state_dict = CLIP.get_config(pretrained_clip_name=pretrained_clip_name)
for key, val in clip_state_dict.items():
new_key = "clip." + key
if new_key not in state_dict:
state_dict[new_key] = val.clone()
cross_config, _ = CrossConfig.get_config(cross_model_name, cache_dir, type_vocab_size, state_dict=None, task_config=task_config)
model = cls(cross_config, clip_state_dict, *inputs, **kwargs) # -----------
if model.linear_patch == "3d":
contain_conv2 = False
for key in state_dict.keys():
if key.find("visual.conv2.weight") > -1:
contain_conv2 = True
break
if contain_conv2 is False and hasattr(model.clip.visual, "conv2"):
cp_weight = state_dict["clip.visual.conv1.weight"].clone()
kernel_size = model.clip.visual.conv2.weight.size(2)
conv2_size = model.clip.visual.conv2.weight.size()
conv2_size = list(conv2_size)
left_conv2_size = conv2_size.copy()
right_conv2_size = conv2_size.copy()
left_conv2_size[2] = (kernel_size - 1) // 2
right_conv2_size[2] = kernel_size - 1 - left_conv2_size[2]
left_zeros, right_zeros = None, None
if left_conv2_size[2] > 0:
left_zeros = torch.zeros(*tuple(left_conv2_size), dtype=cp_weight.dtype, device=cp_weight.device)
if right_conv2_size[2] > 0:
right_zeros = torch.zeros(*tuple(right_conv2_size), dtype=cp_weight.dtype, device=cp_weight.device)
cat_list = []
if left_zeros != None: cat_list.append(left_zeros)
cat_list.append(cp_weight.unsqueeze(2))
if right_zeros != None: cat_list.append(right_zeros)
cp_weight = torch.cat(cat_list, dim=2)
state_dict["clip.visual.conv2.weight"] = cp_weight
if model.sim_header == 'tightTransf':
contain_cross = False
for key in state_dict.keys():
if key.find("cross.transformer") > -1:
contain_cross = True
break
if contain_cross is False:
for key, val in clip_state_dict.items():
if key == "positional_embedding":
state_dict["cross.embeddings.position_embeddings.weight"] = val.clone()
continue
if key.find("transformer.resblocks") == 0:
num_layer = int(key.split(".")[2])
# cut from beginning
if num_layer < task_config.cross_num_hidden_layers:
state_dict["cross."+key] = val.clone()
continue
if model.sim_header in ["seqLSTM", "seqTransf"]:
contain_frame_position = False
for key in state_dict.keys():
if key.find("frame_position_embeddings") > -1:
contain_frame_position = True
break
if contain_frame_position is False:
for key, val in clip_state_dict.items():
if key == "positional_embedding":
state_dict["frame_position_embeddings.weight"] = val.clone()
continue
if "seqTransf" in model.sim_header and key.find("transformer.resblocks") == 0:
num_layer = int(key.split(".")[2])
# cut from beginning
if num_layer < task_config.cross_num_hidden_layers:
state_dict[key.replace("transformer.", "transformerClip.")] = val.clone()
continue
## <=== End of initialization trick
if state_dict is not None:
model = cls.init_preweight(model, state_dict, task_config=task_config)
return model
def show_log(task_config, info):
if task_config is None or task_config.local_rank == 0:
logger.warning(info)
def update_attr(target_name, target_config, target_attr_name, source_config, source_attr_name, default_value=None):
if hasattr(source_config, source_attr_name):
if default_value is None or getattr(source_config, source_attr_name) != default_value:
setattr(target_config, target_attr_name, getattr(source_config, source_attr_name))
show_log(source_config, "Set {}.{}: {}.".format(target_name,
target_attr_name, getattr(target_config, target_attr_name)))
return target_config
def check_attr(target_name, task_config):
return hasattr(task_config, target_name) and task_config.__dict__[target_name]
class CLIP4Clip(CLIP4ClipPreTrainedModel):
def __init__(self, cross_config, clip_state_dict, task_config):
super(CLIP4Clip, self).__init__(cross_config)
self.task_config = task_config
self.ignore_video_index = -1
assert self.task_config.max_words + self.task_config.max_frames <= cross_config.max_position_embeddings
self._stage_one = True
self._stage_two = False
show_log(task_config, "Stage-One:{}, Stage-Two:{}".format(self._stage_one, self._stage_two))
self.loose_type = False
if self._stage_one and check_attr('loose_type', self.task_config):
self.loose_type = True
show_log(task_config, "Test retrieval by loose type.")
# CLIP Encoders: From OpenAI: CLIP [https://github.com/openai/CLIP] ===>
vit = "visual.proj" in clip_state_dict
assert vit
if vit:
vision_width = clip_state_dict["visual.conv1.weight"].shape[0]
vision_layers = len([k for k in clip_state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = clip_state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((clip_state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_resolution = vision_patch_size * grid_size
else:
counts: list = [len(set(k.split(".")[2] for k in clip_state_dict if k.startswith(f"visual.layer{b}"))) for b in
[1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = clip_state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((clip_state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == clip_state_dict["visual.attnpool.positional_embedding"].shape[0]
image_resolution = output_width * 32
embed_dim = clip_state_dict["text_projection"].shape[1]
context_length = clip_state_dict["positional_embedding"].shape[0]
vocab_size = clip_state_dict["token_embedding.weight"].shape[0]
transformer_width = clip_state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in clip_state_dict if k.startswith(f"transformer.resblocks")))
show_log(task_config, "\t embed_dim: {}".format(embed_dim))
show_log(task_config, "\t image_resolution: {}".format(image_resolution))
show_log(task_config, "\t vision_layers: {}".format(vision_layers))
show_log(task_config, "\t vision_width: {}".format(vision_width))
show_log(task_config, "\t vision_patch_size: {}".format(vision_patch_size))
show_log(task_config, "\t context_length: {}".format(context_length))
show_log(task_config, "\t vocab_size: {}".format(vocab_size))
show_log(task_config, "\t transformer_width: {}".format(transformer_width))
show_log(task_config, "\t transformer_heads: {}".format(transformer_heads))
show_log(task_config, "\t transformer_layers: {}".format(transformer_layers))
self.linear_patch = '2d'
if hasattr(task_config, "linear_patch"):
self.linear_patch = task_config.linear_patch
show_log(task_config, "\t\t linear_patch: {}".format(self.linear_patch))
# use .float() to avoid overflow/underflow from fp16 weight. https://github.com/openai/CLIP/issues/40
cut_top_layer = 0
show_log(task_config, "\t cut_top_layer: {}".format(cut_top_layer))
self.clip = CLIP(
embed_dim,
image_resolution, vision_layers-cut_top_layer, vision_width, vision_patch_size,
context_length, vocab_size, transformer_width, transformer_heads, transformer_layers-cut_top_layer,
linear_patch=self.linear_patch
).float()
self.co_connetion_transformer_model_block = nn.Sequential(*[Co_attention_block(hidden_size=embed_dim, num_attention_heads=transformer_heads, dropout_rate=0.1) for i in range(1)])
for key in ["input_resolution", "context_length", "vocab_size"]:
if key in clip_state_dict:
del clip_state_dict[key]
convert_weights(self.clip)
# <=== End of CLIP Encoders
self.sim_header = 'meanP'
if hasattr(task_config, "sim_header"):
self.sim_header = task_config.sim_header
show_log(task_config, "\t sim_header: {}".format(self.sim_header))
if self.sim_header == "tightTransf": assert self.loose_type is False
self.interaction = 'dp'
if hasattr(task_config, "interaction"):
self.interaction = task_config.interaction
show_log(task_config, "\t interaction: {}".format(self.interaction))
cross_config.max_position_embeddings = context_length
if self.loose_type is False:
# Cross Encoder ===>
cross_config = update_attr("cross_config", cross_config, "num_hidden_layers", self.task_config, "cross_num_hidden_layers")
self.cross = CrossModel(cross_config)
# <=== End of Cross Encoder
self.similarity_dense = nn.Linear(cross_config.hidden_size, 1)
if self.sim_header in ["seqLSTM", "seqTransf"]:
self.frame_position_embeddings = nn.Embedding(cross_config.max_position_embeddings, cross_config.hidden_size)
if "seqTransf" in self.sim_header:
self.transformerClip = TransformerClip(width=transformer_width, layers=self.task_config.cross_num_hidden_layers, heads=transformer_heads,)
if self.sim_header == "seqLSTM":
self.lstm_visual = nn.LSTM(input_size=cross_config.hidden_size, hidden_size=cross_config.hidden_size,
batch_first=True, bidirectional=False, num_layers=1)
self.fuse_weight_fc = nn.Linear(transformer_width, 2)
self.query_weight_fc = nn.Sequential(
nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True),
nn.Linear(transformer_width, 1))
self.text_pool_type = task_config.text_pool_type
if self.text_pool_type in ['weight_l', 'weight_g']:
self.title_weight_fc = nn.Sequential(
nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True),
nn.Linear(transformer_width, 1))
self.k = task_config.k
if self.interaction == 'wti':
if self.task_config.wti_arch == 1:
self.text_weight_fc = nn.Linear(transformer_width, 1)
self.video_weight_fc = nn.Linear(transformer_width, 1)
elif self.task_config.wti_arch == 2:
self.text_weight_fc = nn.Sequential(
nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True),
nn.Linear(transformer_width, 1))
self.video_weight_fc = nn.Sequential(
nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True),
nn.Linear(transformer_width, 1))
elif self.task_config.wti_arch == 3:
self.text_weight_fc = nn.Sequential(
nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True),
nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True),
nn.Linear(transformer_width, 1))
self.video_weight_fc = nn.Sequential(
nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True),
nn.Linear(transformer_width, transformer_width), nn.ReLU(inplace=True),
nn.Linear(transformer_width, 1))
if self.text_pool_type in ['transf_avg']:
self.num_captions = 30
self.sentence_position_embeddings = nn.Embedding(self.num_captions, embed_dim) # 时序transformer的位置编码
self.caption_transformer_layer = nn.TransformerEncoderLayer(d_model=transformer_width,
nhead=transformer_heads,
dim_feedforward=transformer_width, dropout=0,
batch_first=True)
self.caption_transformer_encoder = nn.TransformerEncoder(self.caption_transformer_layer, num_layers=2)
self.text_position_embeddings = nn.Embedding(context_length, embed_dim)
self.loss_fct = CrossEn()
self.apply(self.init_weights)
def forward(self, text_ids, attention_mask, video, video_mask, title_ids, title_mask, train_video):
text_emb, video_emb, title_emb = self.get_text_video_title_output(text_ids, video, title_ids, title_mask, attention_mask, train_video)
if self.training:
loss = 0.
if train_video == True:
sim_matrix = self.get_video_text_similarity_logits(text_emb, video_emb, title_emb, attention_mask, video_mask, title_mask,loose_type=self.loose_type)
else:
sim_matrix = self.get_titles_similarity_logits(text_emb, video_emb, title_emb, attention_mask, video_mask, title_mask, loose_type=self.loose_type)
sim_loss1 = self.loss_fct(sim_matrix)
sim_loss2 = self.loss_fct(sim_matrix.T)
sim_loss = (sim_loss1 + sim_loss2) / 2
loss = sim_loss
return loss
else:
if train_video == True:
sim_matrix = self.get_video_text_similarity_logits(text_emb, video_emb, title_emb, attention_mask,video_mask, title_mask, loose_type=self.loose_type)
else:
sim_matrix = self.get_titles_similarity_logits(text_emb, video_emb, title_emb, attention_mask,video_mask, title_mask, loose_type=self.loose_type)
return None
def get_text_output(self, input_ids):
bs_pair = input_ids.size(0)
n_text = input_ids.size(1)
input_ids = input_ids.view(-1, input_ids.shape[-1])
sequence_hidden = self.clip.encode_text(input_ids, return_hidden=True)[1].float()
sequence_hidden = sequence_hidden.view(bs_pair, n_text, -1, sequence_hidden.size(-1))
if n_text == 1:
sequence_hidden = sequence_hidden.view(bs_pair, -1, sequence_hidden.size(-1))
return sequence_hidden
def get_video_output(self, video):
# video: b, 1, t, 1, c, h, w
b, pair, ts, bs, channel, h, w = video.shape
# [b, 1, t, 1, c, h, w] -> [b*t, c, h, w]
video = video.view(b * pair * bs * ts, channel, h, w)
video_frame = bs * ts
# [bs * t, c, h, w] -> [bs * t, dim]
visual_hidden = self.clip.encode_image(video, video_frame=video_frame).float()
# [bs * t, dim] -> [bs, t, dim]
visual_hidden = visual_hidden.view(-1, video_frame, visual_hidden.size(-1))
return visual_hidden
def get_text_video_output(self, input_ids, video):
sequence_output = self.get_text_output(input_ids)
visual_output = self.get_video_output(video)
return sequence_output, visual_output
def get_text_video_title_output(self, text_ids, video, title_ids, title_mask=None, text_mask=None, train_video = True):
sequence_output = self.get_text_output(text_ids)
if train_video == True:
visual_output = self.get_video_output(video)
else:
visual_output = torch.tensor([1], device=video.device)
title_emb = self.get_text_output(title_ids)
return sequence_output, visual_output, title_emb
def _get_cross_output(self, sequence_output, visual_output, attention_mask, video_mask):
# haven't uesd, TODO
concat_features = torch.cat((sequence_output, visual_output), dim=1) # concatnate tokens and frames
concat_mask = torch.cat((attention_mask, video_mask), dim=1)
text_type_ = torch.zeros_like(attention_mask)
video_type_ = torch.ones_like(video_mask)
concat_type = torch.cat((text_type_, video_type_), dim=1)
cross_layers, pooled_output = self.cross(concat_features, concat_type, concat_mask, output_all_encoded_layers=True)
cross_output = cross_layers[-1]
return cross_output, pooled_output, concat_mask
def get_text_sep_feat(self, text_feat, text_mask):
# text_mask: [bs_text, max_words] or [bs_text, n_text, max_words]
# text_feat: [bs_text, n_words, dim] or [bs_text, n_text, n_words, dim]
# output: [bs_text, n_text, dim]
n_dim = text_feat.dim()
text_feat = text_feat.contiguous()
if n_dim == 3: # n_dim=3表示文本句子描述
text_feat = text_feat[torch.arange(text_feat.shape[0]), torch.sum(text_mask, dim=-1) - 1, :]
text_feat = text_feat.unsqueeze(1).contiguous()
elif n_dim == 4:
bs_pair, n_text, n_word, text_dim = text_feat.shape
text_feat = text_feat.view(bs_pair * n_text, n_word, text_dim)
text_mask = text_mask.view(bs_pair * n_text, n_word)
text_feat = text_feat[torch.arange(text_feat.shape[0]), torch.sum(text_mask, dim=-1) - 1, :]
text_feat = text_feat.view(bs_pair, n_text, text_dim)
return text_feat
def _mean_pooling_for_similarity_text(self, sequence_output, attention_mask):
attention_mask_un = attention_mask.to(dtype=torch.float).unsqueeze(-1)
attention_mask_un[:, 0, :] = 0.
sequence_output = sequence_output * attention_mask_un
text_out = torch.sum(sequence_output, dim=1) / torch.sum(attention_mask_un, dim=1, dtype=torch.float)
return text_out
def _mean_pooling_for_similarity_video(self, visual_output, video_mask,):
video_mask_un = video_mask.to(dtype=torch.float).unsqueeze(-1)
visual_output = visual_output * video_mask_un
video_mask_un_sum = torch.sum(video_mask_un, dim=1, dtype=torch.float)
video_mask_un_sum[video_mask_un_sum == 0.] = 1.
video_out = torch.sum(visual_output, dim=1) / video_mask_un_sum
return video_out
def _mean_pooling_for_similarity(self, sequence_output, visual_output, attention_mask, video_mask,):
text_out = self._mean_pooling_for_similarity_text(sequence_output, attention_mask)
video_out = self._mean_pooling_for_similarity_video(visual_output, video_mask)
return text_out, video_out
def agg_video_feat(self, visual_output, video_mask, sim_header="meanP"):
visual_output = visual_output.contiguous()
if sim_header == "meanP":
# Default: Parameter-free type
pass
elif sim_header == "seqLSTM":
# Sequential type: LSTM
visual_output_original = visual_output
visual_output = pack_padded_sequence(visual_output, torch.sum(video_mask, dim=-1).cpu(),
batch_first=True, enforce_sorted=False)
visual_output, _ = self.lstm_visual(visual_output)
if self.training: self.lstm_visual.flatten_parameters()
visual_output, _ = pad_packed_sequence(visual_output, batch_first=True)
visual_output = torch.cat((visual_output, visual_output_original[:, visual_output.size(1):, ...].contiguous()), dim=1)
visual_output = visual_output + visual_output_original
elif "seqTransf" in sim_header:
# Sequential type: Transformer Encoder
visual_output_original = visual_output
seq_length = visual_output.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long, device=visual_output.device)
position_ids = position_ids.unsqueeze(0).expand(visual_output.size(0), -1)
frame_position_embeddings = self.frame_position_embeddings(position_ids)
visual_output = visual_output + frame_position_embeddings
extended_video_mask = (1.0 - video_mask.unsqueeze(1)) * -1000000.0
extended_video_mask = extended_video_mask.expand(-1, video_mask.size(1), -1)
visual_output = visual_output.permute(1, 0, 2) # NLD -> LND
visual_output = self.transformerClip(visual_output, extended_video_mask)
visual_output = visual_output.permute(1, 0, 2) # LND -> NLD
visual_output = visual_output + visual_output_original
return visual_output
def dot_product_logits(self, sequence_output, visual_output, text_mask, video_mask):
sequence_output = self.get_text_sep_feat(sequence_output, text_mask) # B x 1 x D
if self.training:
visual_output = allgather(visual_output, self.task_config)
video_mask = allgather(video_mask, self.task_config)
sequence_output = allgather(sequence_output, self.task_config)
torch.distributed.barrier()
sequence_output = sequence_output.squeeze(1) # B x 1 x D -> B x D
sequence_output = sequence_output / sequence_output.norm(dim=-1, keepdim=True)
visual_output = visual_output / visual_output.norm(dim=-1, keepdim=True)
visual_output = self._mean_pooling_for_similarity_video(visual_output, video_mask) # B x N_v x D -> B x D
visual_output = visual_output / visual_output.norm(dim=-1, keepdim=True)
retrieve_logits = torch.matmul(sequence_output, visual_output.t()) # n_t n_v
if self.training:
logit_scale = self.clip.logit_scale.exp()
retrieve_logits = logit_scale * retrieve_logits
return retrieve_logits
else:
return retrieve_logits
def wti_interaction(self, text_feat, video_feat, text_mask, video_mask):
if self.training and torch.cuda.is_available(): # batch merge here
text_feat = allgather(text_feat, self.task_config)
video_feat = allgather(video_feat, self.task_config)
text_mask = allgather(text_mask, self.task_config)
video_mask = allgather(video_mask, self.task_config)
torch.distributed.barrier() # force sync
if self.interaction == 'wti':
text_weight = self.text_weight_fc(text_feat).squeeze(2) # B x N_t x D -> B x N_t
text_weight.masked_fill_(torch.tensor((1 - text_mask), dtype=torch.bool), float("-inf"))
text_weight = torch.softmax(text_weight, dim=-1) # B x N_t
video_weight = self.video_weight_fc(video_feat).squeeze(2) # B x N_v x D -> B x N_v
video_weight.masked_fill_((1 - video_mask).clone().detach().bool(), float("-inf"))
video_weight = torch.softmax(video_weight, dim=-1) # B x N_v
text_feat = text_feat / text_feat.norm(dim=-1, keepdim=True)
video_feat = video_feat / video_feat.norm(dim=-1, keepdim=True)
retrieve_logits = torch.einsum('atd,bvd->abtv', [text_feat, video_feat])
retrieve_logits = torch.einsum('abtv,at->abtv', [retrieve_logits, text_mask])
retrieve_logits = torch.einsum('abtv,bv->abtv', [retrieve_logits, video_mask])
text_sum = text_mask.sum(-1)
video_sum = video_mask.sum(-1)
# max for video token
if self.interaction == 'ti': # token-wise interaction
t2v_logits, max_idx1 = retrieve_logits.max(dim=-1) # abtv -> abt
v2t_logits, max_idx2 = retrieve_logits.max(dim=-2) # abtv -> abv
t2v_logits = torch.sum(t2v_logits, dim=2) / (text_sum.unsqueeze(1))
v2t_logits = torch.sum(v2t_logits, dim=2) / (video_sum.unsqueeze(0))
retrieve_logits = (t2v_logits + v2t_logits) / 2.0
elif self.interaction == 'wti': # weighted token-wise interaction
t2v_logits, max_idx1 = retrieve_logits.max(dim=-1) # abtv -> abt
t2v_logits = torch.einsum('abt,at->ab', [t2v_logits, text_weight])
v2t_logits, max_idx2 = retrieve_logits.max(dim=-2) # abtv -> abv
v2t_logits = torch.einsum('abv,bv->ab', [v2t_logits, video_weight])
retrieve_logits = (t2v_logits + v2t_logits) / 2.0
if self.training:
logit_scale = self.clip.logit_scale.exp()
retrieve_logits = logit_scale * retrieve_logits
return retrieve_logits
else:
return retrieve_logits
def _loose_similarity(self, sequence_output, visual_output, attention_mask, video_mask, sim_header="meanP"):
sequence_output = sequence_output.contiguous()
if self.interaction == 'dp':
retrieve_logits = self.dot_product_logits(sequence_output, visual_output, attention_mask, video_mask)
elif self.interaction in ['ti', 'wti']:
retrieve_logits = self.wti_interaction(sequence_output, visual_output, attention_mask, video_mask)
else:
raise NotImplementedError
return retrieve_logits
def _cross_similarity(self, sequence_output, visual_output, attention_mask, video_mask):
sequence_output, visual_output = sequence_output.contiguous(), visual_output.contiguous()
b_text, s_text, h_text = sequence_output.size()
b_visual, s_visual, h_visual = visual_output.size()
retrieve_logits_list = []
step_size = b_text # set smaller to reduce memory cost
split_size = [step_size] * (b_text // step_size)
release_size = b_text - sum(split_size)
if release_size > 0:
split_size += [release_size]
# due to clip text branch return the last hidden
attention_mask = torch.ones(sequence_output.size(0), 1)\
.to(device=attention_mask.device, dtype=attention_mask.dtype)
sequence_output_splits = torch.split(sequence_output, split_size, dim=0)
attention_mask_splits = torch.split(attention_mask, split_size, dim=0)
for i in range(len(split_size)):
sequence_output_row = sequence_output_splits[i]
attention_mask_row = attention_mask_splits[i]
sequence_output_l = sequence_output_row.unsqueeze(1).repeat(1, b_visual, 1, 1)
sequence_output_l = sequence_output_l.view(-1, s_text, h_text)
attention_mask_l = attention_mask_row.unsqueeze(1).repeat(1, b_visual, 1)
attention_mask_l = attention_mask_l.view(-1, s_text)
step_truth = sequence_output_row.size(0)
visual_output_r = visual_output.unsqueeze(0).repeat(step_truth, 1, 1, 1)
visual_output_r = visual_output_r.view(-1, s_visual, h_visual)
video_mask_r = video_mask.unsqueeze(0).repeat(step_truth, 1, 1)
video_mask_r = video_mask_r.view(-1, s_visual)
cross_output, pooled_output, concat_mask = \
self._get_cross_output(sequence_output_l, visual_output_r, attention_mask_l, video_mask_r)
retrieve_logits_row = self.similarity_dense(pooled_output).squeeze(-1).view(step_truth, b_visual)
retrieve_logits_list.append(retrieve_logits_row)
retrieve_logits = torch.cat(retrieve_logits_list, dim=0)
return retrieve_logits
def get_similarity_logits(self, sequence_output, visual_output, attention_mask, video_mask, loose_type=False):
if loose_type:
assert self.sim_header in ["meanP", "seqLSTM", "seqTransf"]
retrieve_logits = self._loose_similarity(sequence_output, visual_output, attention_mask, video_mask, sim_header=self.sim_header)
return retrieve_logits
else:
assert self.sim_header in ["tightTransf"]
retrieve_logits = self._cross_similarity(sequence_output, visual_output, attention_mask, video_mask, )
return retrieve_logits
def get_video_text_similarity_logits(self, text_feat, video_feat, title_feat, text_mask, video_mask, title_mask, loose_type=False):
# [bs_text, 1, max_words] -> [bs_text, max_words]
text_mask = text_mask.view(-1, text_mask.shape[-1])
# [bs_video, 1, max_words] -> [bs_video, max_words]
video_mask = video_mask.view(-1, video_mask.shape[-1])
title_output = self.get_text_sep_feat(title_feat,title_mask)
cross_video_mask = video_mask.reshape(video_mask.shape[0],1,1,video_mask.shape[-1])
cross_titles_mask = torch.ones((title_mask.shape[0],title_mask.shape[1]),device=title_output.device)
cross_titles_mask = cross_titles_mask.reshape(cross_titles_mask.shape[0],1,1,cross_titles_mask.shape[-1])
for co_layer in self.co_connetion_transformer_model_block:
video_feat, title_output, co_attention_probs = co_layer(video_feat,cross_video_mask,title_output, cross_titles_mask)
visual_output = self.agg_video_feat(video_feat, video_mask, self.sim_header) ##经过seq transfomer
qv_logits = self.get_similarity_logits(text_feat, visual_output, text_mask, video_mask, loose_type)
retrieve_logits = qv_logits
return retrieve_logits
def get_text_title_similarity_logits(self, text_output, title_output, text_mask, title_mask):
# dp
title_mask = title_mask.view(-1, title_mask.shape[-1])
text_output = self.get_text_sep_feat(text_output, text_mask).squeeze(1) # B x 1 x D -> B x D
title_output = self.get_text_sep_feat(title_output, title_mask).squeeze(1) # B x 1 x D -> B x D
if self.training:
text_output = allgather(text_output, self.task_config)
title_output = allgather(title_output, self.task_config)
torch.distributed.barrier()
text_output = text_output / text_output.norm(dim=-1, keepdim=True)
title_output = title_output / title_output.norm(dim=-1, keepdim=True)
retrieve_logits = torch.matmul(text_output, title_output.t()) # n_title n_cap
if self.training:
logit_scale = self.clip.logit_scale.exp()
retrieve_logits = logit_scale * retrieve_logits
return retrieve_logits
else:
return retrieve_logits
def get_titles_similarity_logits(self, text_feat, video_feat, title_feat, text_mask, video_mask, title_mask,loose_type=False):
text_mask = text_mask.view(-1, text_mask.shape[-1])
video_mask = video_mask.view(-1, video_mask.shape[-1])
qt_logits = self.get_text_titles_similarity_logits(text_feat, title_feat, text_mask, title_mask, video_feat,video_mask)
retrieve_logits = qt_logits
return retrieve_logits
def get_text_titles_similarity_logits(self, text_output, title_output, text_mask, title_mask, video_feat,video_mask):
title_output = self.get_text_sep_feat(title_output, title_mask)
if self.text_pool_type in ['transf_avg']:
# b t c
x_original = title_output
seq_length = title_output.shape[1]
position_ids = torch.arange(seq_length, dtype=torch.long, device=title_output.device)
position_ids = position_ids.unsqueeze(0).expand(title_output.size(0), -1)
sentence_position_embeddings = self.sentence_position_embeddings(position_ids)
title_output = title_output + sentence_position_embeddings
title_output = self.caption_transformer_encoder(title_output)
if self.training:
text_output = allgather(text_output, self.task_config)
title_output = allgather(title_output, self.task_config)
text_mask = allgather(text_mask, self.task_config)
torch.distributed.barrier()
title_ori = title_output
text_embed = self.get_text_sep_feat(text_output, text_mask).squeeze(1)
################# title pooling begin ##############
if self.text_pool_type == 'clip_top1':
title_embed_pooled = title_output[:, 0]
elif self.text_pool_type in ['avg', 'transf_avg']:
title_embed_pooled = title_output.mean(dim=1)
elif self.text_pool_type == 'topk':
bs_text, embed_dim = text_embed.shape
sims = title_output @ text_embed.t()
sims_topk = torch.topk(sims, self.k, dim=1)[1]
title_output = title_output.unsqueeze(-1).expand(-1, -1, -1, bs_text)
sims_topk = sims_topk.unsqueeze(2).expand(-1, -1, embed_dim, -1)
title_embeds_topk = torch.gather(title_output, dim=1, index=sims_topk)
title_embed_pooled = title_embeds_topk.sum(dim=1)
title_embed_pooled = title_embed_pooled.permute(0, 2, 1)
################# title pooling end ##############
text_embed = text_embed / text_embed.norm(dim=-1, keepdim=True)
title_embed_pooled = title_embed_pooled / title_embed_pooled.norm(dim=-1, keepdim=True)
if self.text_pool_type in ['clip_top1', 'avg', 'transf_avg']:
# bs_text x bs_title
q2t_logits = torch.mm(text_embed, title_embed_pooled.t())
if self.text_pool_type in ['clip_top1', 'avg', 'transf_avg']:
retrieve_logits = q2t_logits
if self.training:
logit_scale = self.clip.logit_scale.exp()
retrieve_logits = logit_scale * retrieve_logits
return retrieve_logits
else:
return retrieve_logits
| [] |
2024-01-10 | srihariKrishnaswamy/trap-deconstructed | Python_Items~prediction_from_rec.py | import models
import torch
import data_creation
import os
from dotenv import load_dotenv
from data_creation import bpms, feels, keys, modes
import data_config
from prediction_from_file import BPM_MODEL_PATH, FEEL_MODEL_PATH, MAJOR_KEY_MODEL_PATH, MINOR_KEY_MODEL_PATH, MODE_MODEL_PATH, SECONDS_PER_CHOP, SAMPLE_RATE, NUM_SAMPLES, TEMP_NAME, INT_FOLDER
# import pyaudio
import wave
import time
from prediction_from_file import INT_FOLDER, process_song_and_make_preds
import librosa
import openai
from server import TEMP_FILE
REC_NAME = "temp_rec.wav"
TEMP_REC_PATH = os.path.join(INT_FOLDER, TEMP_FILE)
# def collect_audio():
# audio = pyaudio.PyAudio()
# stream = audio.open(format=pyaudio.paInt16, channels=1, rate=data_config.SAMPLE_RATE, input=True, frames_per_buffer=1024)
# frames = []
# seconds_needed = data_creation.SECONDS_PER_CHOP + 2
# start_time = time.time()
# curr_time = time.time()
# try:
# while True and seconds_needed >= curr_time - start_time: # it'll go for max 10 seconds (we give it 2 seconds of padding just because)
# data = stream.read(1024)
# frames.append(data)
# print(f"seconds remaining: {(start_time+seconds_needed-curr_time):.1f}")
# curr_time = time.time()
# except KeyboardInterrupt:
# pass
# if curr_time - start_time < seconds_needed:
# print("recording prematurely stopped, no processing done")
# return
# stream.stop_stream()
# stream.close()
# audio.terminate()
# sound_file = wave.open(TEMP_REC_PATH, "wb")
# sound_file.setnchannels(1)
# sound_file.setsampwidth(audio.get_sample_size(pyaudio.paInt16))
# sound_file.setframerate(data_config.SAMPLE_RATE)
# sound_file.writeframes(b''.join(frames))
# sound_file.close()
def output_bpm():
filename = TEMP_REC_PATH
signal, sr = librosa.load(filename)
tempo, beat_frames = librosa.beat.beat_track(y=signal, sr=sr)
# print(f"BPM: {tempo:.2f}")
return int(tempo)
def ask_gpt(bpm, key, mode, feel):
load_dotenv()
bpm = int(bpm)
openai.api_key = os.getenv("OPENAI_API_KEY")
completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": f"give me 3 tips (one sentence each) to create a good sounding, {feel}, {bpm} bpm, trap song in the key of {key} {mode}. Answer in this exact format with NO OTHER TEXT: 1. Tip 1 2. Tip 2 3. Tip 3"}])
return completion.choices[0].message.content
def run_inference():
bpm_model = models.BPM_Predictor(1, len(bpms))
bpm_sd = torch.load(BPM_MODEL_PATH)
bpm_model.load_state_dict(bpm_sd)
feel_model = models.Feel_Predictor(1, len(feels))
feel_sd = torch.load(FEEL_MODEL_PATH)
feel_model.load_state_dict(feel_sd)
major_key_model = models.Key_Predictor(1, len(keys))
major_key_sd = torch.load(MAJOR_KEY_MODEL_PATH)
major_key_model.load_state_dict(major_key_sd)
minor_key_model = models.Key_Predictor(1, len(keys))
minor_key_sd = torch.load(MINOR_KEY_MODEL_PATH)
minor_key_model.load_state_dict(minor_key_sd)
mode_model = models.Mode_Predictor(1)
mode_sd = torch.load(MODE_MODEL_PATH)
mode_model.load_state_dict(mode_sd)
# collect_audio() # this should be done by react, we're testing API calls for now
song_tempo, song_key, song_mode, song_feel, gpt_message = "", "", "", "", ""
if os.path.exists(TEMP_REC_PATH):
tempo = output_bpm()
bpm, feel, key, mode = process_song_and_make_preds(TEMP_REC_PATH, bpm_model, feel_model, major_key_model, minor_key_model, mode_model)
# note that the bpm from this call is not what's returned or displayed, for that we use tempo from prev line
# os.remove(TEMP_REC_PATH)
if bpm is not None and feel is not None and key is not None:
# print(f"BPM: {bpms[int(bpm)]} | Key: {keys[int(key)]} | Mode: {modes[int(mode)]} | Feel: {feels[int(feel)]}")
# print(f"Generating tips to start the beat... ")
message = ask_gpt(tempo, keys[int(key)], modes[int(mode)], feels[int(feel)])
# print(message)
song_tempo = str(int(tempo))
song_feel = str(feels[int(feel)])
song_key = str(keys[int(key)])
song_mode = str(modes[int(mode)])
gpt_message = message
# else:
# print("there was an issue in processing")
return song_tempo, song_feel, song_key, song_mode, gpt_message
if __name__ == "__main__":
tempo, feel, key, mode, message = run_inference()
print(f"{tempo}|{feel}|{key}|{mode}|{message}")
# if any of the above are "" then there was a problem | [
"give me 3 tips (one sentence each) to create a good sounding, PLACEHOLDER, PLACEHOLDER bpm, trap song in the key of PLACEHOLDER PLACEHOLDER. Answer in this exact format with NO OTHER TEXT: 1. Tip 1 2. Tip 2 3. Tip 3"
] |
2024-01-10 | ArneBinder/convqa | dep~convai_evaluation.py | # # Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import random
import logging
from pprint import pformat
from collections import defaultdict
from functools import partial
from tqdm import trange
import torch
import torch.nn.functional as F
from parlai.core.agents import Agent
from parlai.scripts.eval_model import setup_args as base_setup_args
from projects.convai2.eval_hits import eval_hits, setup_args as setup_args_hits
from projects.convai2.eval_f1 import eval_f1, setup_args as setup_args_f1
from projects.convai2.eval_ppl import eval_ppl, setup_args as setup_args_ppl
from projects.convai2.build_dict import build_dict
from pytorch_pretrained_bert import OpenAIGPTDoubleHeadsModel, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, GPT2Tokenizer, \
GPT2DoubleHeadsModel, GPT2LMHeadModel
from convqa.train import build_input_from_segments, pad_dataset, SPECIAL_TOKENS
from convqa.utils import download_pretrained_model, AttrDict
from convqa.interact import sample_sequence
class TransformerAgent(Agent):
@staticmethod
def add_cmdline_args(argparser):
agent_args = argparser.add_argument_group('Agent parameters')
agent_args.add_argument("--model_checkpoint", type=str, default="", help="Path, url or short name of the model")
agent_args.add_argument("--model", type=str, default="openai-gpt", help="Model type (openai-gpt or gpt2)")
agent_args.add_argument("--max_history", type=int, default=2, help="Number of previous utterances to keep in history")
agent_args.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu", help="Device (cuda or cpu)")
agent_args.add_argument("--eval_type", type=str, default="hits@1", help="hits@1, ppl or f1")
agent_args.add_argument("--no_sample", action='store_true')
agent_args.add_argument("--max_length", type=int, default=20)
agent_args.add_argument("--min_length", type=int, default=1)
agent_args.add_argument("--seed", type=int, default=0)
agent_args.add_argument("--temperature", type=int, default=0.7)
agent_args.add_argument("--top_k", type=int, default=20)
agent_args.add_argument("--top_p", type=float, default=0.9, help="Nucleus filtering (top-p) before sampling (<=0.0: no filtering)")
return argparser
def __init__(self, opt, shared=None):
super(TransformerAgent, self).__init__(opt, shared)
args = AttrDict(opt) # to keep most commands identical to the interact.py script
self.args = args
logging.basicConfig(level=logging.INFO)
self.logger = logging.getLogger(__file__)
self.logger.info(pformat(args))
random.seed(args.seed)
torch.random.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
if shared is None:
self.logger.info("Get pretrained model and tokenizer")
if args.model_checkpoint == "":
args.model_checkpoint = download_pretrained_model()
if args.model.startswith('gpt2'):
self.tokenizer = GPT2Tokenizer.from_pretrained(args.model_checkpoint)
if self.args.eval_type == "hits@1":
self.model_checkpoint = GPT2DoubleHeadsModel.from_pretrained(args.model_checkpoint)
else:
self.model_checkpoint = GPT2LMHeadModel.from_pretrained(args.model_checkpoint)
elif args.model == 'openai-gpt':
self.tokenizer = OpenAIGPTTokenizer.from_pretrained(args.model_checkpoint)
if self.args.eval_type == "hits@1":
self.model_checkpoint = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_checkpoint)
else:
self.model_checkpoint = OpenAIGPTLMHeadModel.from_pretrained(args.model_checkpoint)
else:
raise NotImplementedError('model type "%s" not implemented. Use either "openai-gpt" or "gpt2"')
self.model_checkpoint.to(args.device)
self.model_checkpoint.eval()
self.logger.info("Build BPE prefix dictionary")
convai_dict = build_dict()
assert len(convai_dict) == 19304
self.prefix2words = self.get_prefix2words(convai_dict)
else:
self.model_checkpoint = shared['model']
self.tokenizer = shared['tokenizer']
self.prefix2words = shared['prefix2words']
self.special_tokens_ids = self.tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
self.persona = []
self.history = []
self.labels = []
self.reset()
def observe(self, observation):
if self.episode_done:
self.reset()
if self.labels:
# Add the previous response to the history
self.history.append(self.labels)
if 'labels' in observation or 'eval_labels' in observation:
text = observation.get('labels', observation.get('eval_labels', [[]]))[0]
self.labels = self.tokenizer.encode(text)
if 'text' in observation:
text = observation['text']
for subtext in text.split('\n'):
subtext = subtext.strip()
if subtext.startswith('your persona:'):
subtext = subtext.replace('your persona:', '').strip()
self.persona.append(self.tokenizer.encode(subtext))
else:
self.history.append(self.tokenizer.encode(subtext))
self.history = self.history[-(2*self.args.max_history+1):]
candidates = []
if 'label_candidates' in observation:
for candidate in observation['label_candidates']:
candidates.append((self.tokenizer.encode(candidate), candidate))
self.candidates = candidates
self.episode_done = observation['episode_done']
self.observation = observation
return observation
def act(self):
reply = {}
if self.args.eval_type == "hits@1" and len(self.candidates) > 0:
instances = defaultdict(list)
for candidate, _ in self.candidates:
instance, _ = build_input_from_segments(self.persona, self.history, candidate, self.tokenizer)
for input_name, input_array in instance.items():
instances[input_name].append(input_array)
inputs = pad_dataset(instances, padding=self.special_tokens_ids[-1])
tensor_inputs = {}
for input_name in ["input_ids", "mc_token_ids", "token_type_ids"]:
tensor = torch.tensor(inputs[input_name], device=self.args.device)
tensor = tensor.view((-1, len(self.candidates)) + tensor.shape[1:])
tensor_inputs[input_name] = tensor
with torch.no_grad():
_, mc_logits = self.model_checkpoint(**tensor_inputs)
val, ind = torch.sort(mc_logits[0], descending=True)
ypred = self.candidates[ind[0].item()][1] # match
tc = []
for j in range(len(self.candidates)):
tc.append(self.candidates[ind[j].item()][1])
reply = {'text': ypred, 'text_candidates': tc}
else:
# We are in interactive of f1 evaluation mode => just sample
with torch.no_grad():
out_ids = sample_sequence(self.persona, self.history, self.tokenizer, self.model_checkpoint, self.args)
out_text = self.tokenizer.decode(out_ids, skip_special_tokens=True,
clean_up_tokenization_spaces=(self.args.eval_type != 'f1'))
reply = {'text': out_text}
return reply
def next_word_probability(self, partial_out):
"""Return probability distribution over next words given an input and
partial true output. This is used to calculate the per-word perplexity.
"""
partial_out_ids = self.tokenizer.encode(' '.join(partial_out))
instance, _ = build_input_from_segments(self.persona, self.history, partial_out_ids,
self.tokenizer, with_eos=False)
input_ids = torch.tensor(instance["input_ids"], device=self.args.device).unsqueeze(0)
token_type_ids = torch.tensor(instance["token_type_ids"], device=self.args.device).unsqueeze(0)
with torch.no_grad():
logits = self.model_checkpoint(input_ids, token_type_ids=token_type_ids)
probs = F.softmax(logits[0, -1], dim=0)
dist = {}
for prefix_id, words in self.prefix2words.items():
for word, ratio in words.items():
dist[word] = probs[prefix_id].item() * ratio
return dist
def get_prefix2words(self, convai_dict, smoothing_freq=5):
""" map BPE-prefix => dict(full_words beginning with BPE-prefix, associated words_counts) """
prefix2words = defaultdict(dict)
for i in trange(len(convai_dict)):
word = convai_dict[i]
freq = convai_dict.freq[word] + smoothing_freq
bpe_tokens = self.tokenizer.bpe(word).split(' ')
prefix_id = self.tokenizer.convert_tokens_to_ids(bpe_tokens[0])
prefix2words[prefix_id].update(dict([(word, freq)]))
for prefix_id, words in prefix2words.items():
total_counts = sum(words.values())
prefix2words[prefix_id] = dict((word, count/total_counts) for word, count in words.items())
return prefix2words
def share(self):
shared = super(TransformerAgent, self).share()
shared['tokenizer'] = self.tokenizer
shared['model'] = self.model_checkpoint
shared['prefix2words'] = self.prefix2words
return shared
def reset(self):
self.persona = []
self.history = []
self.labels = []
self.candidates = []
self.episode_done = True
self.observation = None
if __name__ == '__main__':
parser = base_setup_args(None)
parser.set_params(
model='convai_evaluation:TransformerAgent')
opt = parser.parse_args(print_args=False)
if opt['eval_type'] == "hits@1":
setup_args = setup_args_hits(None)
eval_fct = partial(eval_hits, print_parser=setup_args)
elif opt['eval_type'] == "ppl":
setup_args = setup_args_ppl(None)
eval_fct = eval_ppl
elif opt['eval_type'] == "f1":
setup_args = setup_args_f1(None)
eval_fct = partial(eval_f1, print_parser=setup_args)
else:
raise ValueError
setup_args.set_params(
model='convai_evaluation:TransformerAgent')
opt = setup_args.parse_args(print_args=False)
eval_fct(opt)
| [] |
2024-01-10 | ArneBinder/convqa | convqa~convert.py | import bz2
import json
import logging
import os
import random
import tarfile
from collections import Counter
import numpy as np
import plac
from tqdm import tqdm
from utils import create_sentencizer
logger = logging.getLogger(__file__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
# too large for memory
def sample_neg_indices_old(n_instances, n_candidates):
# create index array [[0, 1, .., n_instances-1], .., [0, 1, .., n_instances-1]]
a = np.tile(np.arange(n_instances), n_instances).reshape((n_instances, n_instances))
# for each row, replace current idx with last
np.fill_diagonal(a, n_instances-1)
# truncate replaced index (last one)
a = a[:, :-1]
# shuffle each row
#np.random.shuffle(a.T)
np.apply_along_axis(np.random.shuffle, axis=1, arr=a)
# return first n_candidates of each row
return a[:, :n_candidates]
def sample_neg_candidates(instances, n_candidates):
if not isinstance(instances, np.ndarray):
instances = np.array(instances)
#counts = Counter(instances)
#frequencies = np.empty(len(counts), dtype=float)
#unique = np.empty(len(counts), dtype=str)
#indices = {}
#for i, s in enumerate(counts):
# frequencies[i] = counts[s]
# indices[s] = i
# #unique[i] = s
#u = np.array(list(counts.keys()), dtype=str)
logger.debug('number of instances: %i' % len(instances))
u, c = np.unique(instances, return_counts=True)
indices = {s: i for i, s in enumerate(u)}
#unique = list(counts.keys())
logger.info('collected %i unique instances' % len(u))
n_collisions = 0
nn_collisions = 0
collision_log = []
#a = np.empty(shape=(len(instances), n_candidates - 1), dtype=str)
a = []
for i, instance in tqdm(enumerate(instances), total=len(instances)):
idx = indices[instance]
current_count = c[idx]
c[idx] = 0.0
a.append(np.random.choice(u, n_candidates - 1, p=c / (len(instances)-current_count)))
c[idx] = current_count
assert u[idx] == instance, 'mismatch'
# much slower
#mask = instances != instance
#a[i] = np.random.choice(instances[mask], n_candidates - 1)
if instance in a[i]:
nn_collisions += 1
collision_indices = np.nonzero(a[i] == instance)[0]
n_collisions += len(collision_indices)
collision_log.append('collision: %s is %i times in %s @%i' % (instance, len(collision_indices), str(a[i]), i))
logger.info('collisions: %i (in %i instances; total: %i)' % (n_collisions, nn_collisions, len(instances)))
for e in collision_log:
logger.debug(e)
return a
def count_sentences(s, sentencizer, counter=None):
s = s.strip()
try:
sents = sentencizer(s)
except Exception as e:
if ' ' in s:
logger.warning('could not sentencize "%s", return as ONE sentence (%s)' % (s.strip(), e))
sents = [s]
if counter is not None:
counter[len(sents)] +=1
return len(sents)
def create_instance_from_coqa(record, stats, sentencizer=None, max_sentences_qa=1, max_sentences_background=None):
all_questions = []
all_answers = []
was_truncated = False
instance = {}
instance['background'] = record['story']
if max_sentences_background is not None:
if 'background' not in stats:
stats['background'] = {'n_sents': Counter()}
stats['background']['n_sents'][len(instance['background'])] += 1
if len(instance['background']) > max_sentences_background:
was_truncated = True
instance['background'] = instance['background'][:max_sentences_background]
assert len(record['questions']) == len(record['answers']), 'number of questions / answers mismatch'
#instance['utterances'] = []
instance['n_utterances'] = 0
#history = []
for i in range(len(record['questions'])):
#utterance = {}
assert record['questions'][i]['turn_id'] == record['answers'][i]['turn_id'] == i + 1, 'turn_id mismatch'
question_text = record['questions'][i]['input_text']
answer_text = record['answers'][i]['input_text']
# skip answer-question pairs if number of sentences in one of them > max_sentences
continue_this = False
if sentencizer is not None:
if 'question' not in stats:
stats['question'] = {'n_sents': Counter()}
if max_sentences_qa and count_sentences(s=question_text, sentencizer=sentencizer,
counter=stats['question']['n_sents']) > max_sentences_qa:
continue_this = True
if 'answer' not in stats:
stats['answer'] = {'n_sents': Counter()}
if max_sentences_qa and count_sentences(s=answer_text, sentencizer=sentencizer,
counter=stats['answer']['n_sents']) > max_sentences_qa:
continue_this = True
if continue_this:
was_truncated = True
continue
all_answers.append(answer_text)
all_questions.append(question_text)
instance['n_utterances'] += 1
return instance, all_questions, all_answers, was_truncated
def create_instance_from_squad(record, stats, sentencizer=None, max_sentences_qa=1, max_sentences_background=None):
all_questions = []
all_answers = []
was_truncated = False
instance = {}
instance['background'] = record['context']
if max_sentences_background is not None:
if 'background' not in stats:
stats['background'] = {'n_sents': Counter()}
stats['background']['n_sents'][len(instance['background'])] += 1
if len(instance['background']) > max_sentences_background:
was_truncated = True
instance['background'] = instance['background'][:max_sentences_background]
instance['n_utterances'] = 0
# shuffle because impossible questions tend to be at the end
random.shuffle(record['qas'])
for qa in record['qas']:
question_text = qa['question']
if qa['is_impossible']:
answer_text = 'unknown'
else:
allowed_answers = [a['text'] for a in qa['answers']]
answer_text = max(allowed_answers, key=len)
# skip answer-question pairs if number of sentences in one of them > max_sentences
continue_this = False
if sentencizer is not None:
if 'question' not in stats:
stats['question'] = {'n_sents': Counter()}
if max_sentences_qa and count_sentences(s=question_text, sentencizer=sentencizer,
counter=stats['question']['n_sents']) > max_sentences_qa:
continue_this = True
if 'answer' not in stats:
stats['answer'] = {'n_sents': Counter()}
if max_sentences_qa and count_sentences(s=answer_text, sentencizer=sentencizer,
counter=stats['answer']['n_sents']) > max_sentences_qa:
continue_this = True
if continue_this:
was_truncated = True
continue
all_answers.append(answer_text)
all_questions.append(question_text)
instance['n_utterances'] += 1
return instance, all_questions, all_answers, was_truncated
def dataset_split_to_dialog(data, instance_builder=create_instance_from_coqa, n_candidates=20,
create_question_utterances=False, **instance_builder_kargs
):
instances = []
all_answers = []
all_questions = []
stats = {}
n_skipped = 0
for record in data:
instance, current_questions, current_answers, was_truncated = instance_builder(
record=record, stats=stats, **instance_builder_kargs)
if was_truncated:
n_skipped += 1
continue
instances.append(instance)
all_questions.extend(current_questions)
all_answers.extend(current_answers)
logger.info('data created (skipped %i out of %i)' % (n_skipped, len(instances) + n_skipped))
#logger.info('max_sentences_background: %s' % str(max_sentences_background))
#logger.info('max_sentences_qa: %s' % str(max_sentences_qa))
logger.info(stats)
logger.info('sample negative answers...')
sampled_neg_answers = sample_neg_candidates(instances=all_answers, n_candidates=n_candidates)
sampled_neg_questions = None
if create_question_utterances:
logger.info('sample negative questions...')
sampled_neg_questions = sample_neg_candidates(instances=all_questions, n_candidates=n_candidates)
logger.info('negative samples created')
#all_candidates = np.concatenate([sampled_neg_answers.T, [all_answers]]).T
i = 0
for instance in instances:
instance['utterances'] = []
history = []
#for j, utterance in enumerate(instance['utterances']):
for _ in range(instance['n_utterances']):
if sampled_neg_questions is not None:
new_utterance = {'history': history.copy(),
'candidates': sampled_neg_questions[i].tolist() + [all_questions[i]]}
instance['utterances'].append(new_utterance)
history.append(all_questions[i])
new_utterance = {'history': history.copy(),
'candidates': sampled_neg_answers[i].tolist() + [all_answers[i]]}
instance['utterances'].append(new_utterance)
history.append(all_answers[i])
i += 1
del instance['n_utterances']
logger.info('candidates created')
return instances
def convert_to_dialog(dir='/mnt/DATA/ML/data/corpora/QA/CoQA',
dev='coqa-dev-v1.0.json',
train='coqa-train-v1.0.json',
out=None,
n_candidates=20,
create_question_utterances=False,
data_loader=lambda file_name: json.load(open(file_name))['data'],
instance_builder=create_instance_from_coqa,
**instance_builder_kwargs
):
dev = os.path.join(dir, dev)
train = os.path.join(dir, train)
if out is None:
dataset_name = os.path.basename(dir) or os.path.basename(os.path.dirname(dir))
fn = '%s_converted_dialog' % dataset_name.lower()
if instance_builder_kwargs.get('max_sentences_qa', -1) >= 0:
fn += '_sentsqa%i' % instance_builder_kwargs['max_sentences_qa']
if instance_builder_kwargs.get('max_sentences_background', -1) >= 0:
fn += '_sentsb%i' % instance_builder_kwargs['max_sentences_background']
if create_question_utterances:
fn += '_questionutterances'
out = os.path.join(dir, '%s.json' % fn)
converted = {}
logger.info('convert dev...')
data_dev = list(data_loader(dev))
converted['valid'] = dataset_split_to_dialog(data=data_dev, n_candidates=n_candidates, instance_builder=instance_builder,
create_question_utterances=False, **instance_builder_kwargs)
if create_question_utterances:
converted['valid_questionutterances'] = dataset_split_to_dialog(data=data_dev, n_candidates=n_candidates,
instance_builder=instance_builder,
create_question_utterances=True,
**instance_builder_kwargs)
logger.info('convert train...')
data_train = data_loader(train)
converted['train'] = dataset_split_to_dialog(data=data_train, n_candidates=n_candidates, instance_builder=instance_builder,
create_question_utterances=create_question_utterances,
**instance_builder_kwargs)
logger.info('dump to json: %s ...' % out)
json.dump(converted, open(out, 'w'), indent=2)
return out
def gen_dataset_extract(fn, extract_size=10, start_idx=0):
data = json.load(open(fn))
if start_idx > 0:
fn_out = fn.replace('.json', '_extract%s_start%s.json' % (str(extract_size), str(start_idx)))
else:
fn_out = fn.replace('.json', '_extract%s.json' % str(extract_size))
# print dataset size
for k in data:
logger.info('%s: %i' % (k, len(data[k])))
logger.info('write to: %s' % fn_out)
if extract_size is not None:
data = {k: data[k][start_idx:extract_size+start_idx] for k in data}
json.dump(data, open(fn_out, 'w'), indent=2)
def convert_hotpotqa_wikidump_to_dict(fn, fields=('text', 'title')):
entries = {}
with tarfile.open(fn, "r:bz2") as tar:
for tarinfo in tqdm(tar):
f = tar.extractfile(tarinfo)
if f is not None:
uncomp = bz2.decompress(f.read())
for l in uncomp.split(b'\n'):
if l.strip() != b'':
entry = json.loads(l)
entries[int(entry['id'])] = {f: entry[f] for f in fields}
return entries
def dummy_tokenize():
from pytorch_pretrained_bert import OpenAIGPTTokenizer
# OPTIONAL: if you want to have more information on what's happening, activate the logger as follows
import logging
logging.basicConfig(level=logging.INFO)
# Load pre-trained model tokenizer (vocabulary)
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
# Tokenized input
text = "Who was Jim Henson ? Jim Henson was a puppeteer"
tokenized_text = tokenizer.tokenize(text)
return tokenized_text
def convert_coqa(directory='/mnt/DATA/ML/data/corpora/QA/CoQA', create_question_utterances=True, max_sentences_qa=1):
# convert CoQA to conversational QA format
logger.info('load data from directory: %s' % directory)
sentencizer = create_sentencizer() if max_sentences_qa >= 0 else None
return convert_to_dialog(dir=directory,
dev='coqa-dev-v1.0.json',
train='coqa-train-v1.0.json',
out=None,
instance_builder=create_instance_from_coqa, max_sentences_qa=max_sentences_qa,
create_question_utterances=create_question_utterances, sentencizer=sentencizer)
# stats: train: 7199; valid: 500
def convert_squad(directory='/mnt/DATA/ML/data/corpora/QA/SQaAD', create_question_utterances=True, max_sentences_qa=1):
# convert SQaAD to conversational QA format
def squad_data_loader(fn):
data = json.load(open(fn))
for article in data['data']:
for paragraph in article['paragraphs']:
yield paragraph
sentencizer = create_sentencizer() if max_sentences_qa >= 0 else None
logger.info('load data from directory: %s' % directory)
return convert_to_dialog(dir=directory,
dev='dev-v2.0.json',
train='train-v2.0.json',
out=None,
data_loader=squad_data_loader,
instance_builder=create_instance_from_squad, max_sentences_qa=max_sentences_qa,
create_question_utterances=create_question_utterances, sentencizer=sentencizer)
# stats: train: 7199; valid: 500
def main(dataset: ('the dataset', 'positional', None, str, ['CoQA', 'SQuAD']),
*args: ('dataset specific parameters',)):
logger.info('convert %s dataset to dialog format...' % dataset)
if dataset == 'CoQA':
out_fn = plac.call(convert_coqa, args)
elif dataset == 'SQuAD':
out_fn = plac.call(convert_squad, args)
else:
raise NotImplementedError('no converter for dataset "%s" implemented' % dataset)
gen_dataset_extract(fn=out_fn, extract_size=10, start_idx=0)
#x = dummy_tokenize()
logger.info('done')
if __name__ == '__main__':
plac.call(main)
| [] |
2024-01-10 | joelborellis/autogen-sandbox | autogen_test.py | import os
import openai
from openai import OpenAI
from dotenv import load_dotenv
import autogen
from autogen.agentchat.contrib.gpt_assistant_agent import GPTAssistantAgent
load_dotenv()
openai_model: str = os.environ.get("OPENAI_MODEL")
openai.api_key = os.environ.get("OPENAI_API_KEY")
# create client for OpenAI
client = OpenAI(api_key=openai.api_key)
if __name__ == '__main__':
file_stats = client.files.create(
file=open("./data/nfl_offensive_stats.csv", "rb"),
purpose='assistants'
)
file_map = client.files.create(
file=open("./data/nfl_offensive_stats_mapping.csv", "rb"),
purpose='assistants'
)
file_map_teams = client.files.create(
file=open("./data/nfl_offensive_stats_mapping_teams.csv", "rb"),
purpose='assistants'
)
coder_assistant = client.beta.assistants.create(
name="Python Developer",
instructions="You are a python developer",
model="gpt-4-1106-preview",
tools = [ { "type": "code_interpreter" } ],
file_ids=[file_stats.id, file_map.id, file_map_teams.id]
)
analyst_assistant = client.beta.assistants.create(
name="Data Analyst",
instructions="You are a data analyst",
model="gpt-4-1106-preview",
tools = [ { "type": "code_interpreter" } ],
file_ids=[file_stats.id, file_map.id, file_map_teams.id]
)
coder_llm_config = {
"assistant_id": coder_assistant.id
}
analyst_llm_config = {
"assistant_id": analyst_assistant.id
}
coder = GPTAssistantAgent(
name="Coder_Assistant",
instructions="""
You are an expert at writing python code to solve problems.
Reply TERMINATE when the task is solved and there is no problem
""",
llm_config=coder_llm_config
)
analyst = GPTAssistantAgent(
name="Data_Analyst",
instructions="""
You are a data analyst that offers insight into data.
""",
llm_config=analyst_llm_config,
)
user_proxy = autogen.UserProxyAgent(
name="UserProxy",
code_execution_config={
"work_dir" : "coding",
},
system_message="Admin"
)
groupchat = autogen.GroupChat(agents=[user_proxy, coder, analyst], messages=[], max_round=10)
manager = autogen.GroupChatManager(groupchat=groupchat)
user_proxy.initiate_chat(
manager,
message="""
What are the player trends.
Give me an overview of the data.
Show the code you used to solve it.
"""
) | [] |
2024-01-10 | joelborellis/autogen-sandbox | autogen_test_search.py | import os
import openai
from openai import OpenAI
from dotenv import load_dotenv
import autogen
from autogen.agentchat.contrib.gpt_assistant_agent import GPTAssistantAgent
from backend.tools.searchtool import Search
load_dotenv()
openai_model: str = os.environ.get("OPENAI_MODEL")
# create client for OpenAI
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
config_list_gpt4 = autogen.config_list_from_json(
"OAI_CONFIG_LIST",
filter_dict={
"model": ["gpt-4-1106-preview", "gpt-4"],
},
)
gpt4_config = {
"cache_seed": 42, # change the cache_seed for different trials
"temperature": 0,
"config_list": config_list_gpt4,
"timeout": 120,
}
# Function to perform a Shadow Search
def generic_retriever(query, index):
print(f"calling search with - {query} - {index}")
search: Search = Search(index) # get instance of search to query corpus using the name of the index
search_result = search.search_hybrid(query)
return search_result
if __name__ == '__main__':
# Retrieve an existing assistant already setup as an OpenAI Assistant
# this is OpenAI Assistant stuff
retriever_assistant = client.beta.assistants.retrieve(
assistant_id="asst_CqfJXxZLQk6xv2zNzVGU0zVj",
)
# Retrieve an existing assistant already setup as an OpenAI Assistant
# this is OpenAI Assistant stuff
#planner_assistant = client.beta.assistants.retrieve(
# assistant_id="asst_7LH25ZRiZXMk05J7F9NkyDSY",
# )
# define the config including the tools that the assistant has access to
# this will be used by the GPTAssistant Agent that is Shadow Retriever
retriever_config = {
"assistant_id": retriever_assistant.id,
"tools": [
{
"type": "function",
"function": generic_retriever,
}
]
}
# define the config including the tools that the assistant has access to
# this will be used by the GPTAssistant Agent that is Shadow Retriever
#planner_config = {
# "assistant_id": planner_assistant.id,
#}
# this is autogen stuff defining the agent that is going to be in the group
generic_retriever_agent = GPTAssistantAgent(
name="GenericRetriever",
llm_config=retriever_config,
)
generic_retriever_agent.register_function(
function_map={
"generic_retriever": generic_retriever,
}
)
# this is autogen stuff defining the agent that is going to be in the group
#planner = GPTAssistantAgent(
# name="GenericPlanner",
# llm_config=planner_config,
# instructions='''Planner. Suggest a plan. Revise the plan based on feedback from admin, until admin approval.
# The plan may involve GenericRetriever who can retrieve data.
# Explain the plan first. Be clear which step is performed by GenericRetriever.
# '''
#)
# this is autogen stuff defining the agent that is going to be in the group
planner = autogen.AssistantAgent(
name="Planner",
system_message='''Planner. Suggest a plan. Revise the plan based on feedback from admin, until admin approval.
The plan may involve retriever who can retrieve data.
Explain the plan first. Be clear which step is performed by a retriever.
''',
llm_config=gpt4_config,
)
user_proxy = autogen.UserProxyAgent(
name="Admin",
system_message="A human admin. Interact with the planner to discuss the plan. Plan execution needs to be approved by this admin."
)
groupchat = autogen.GroupChat(agents=[user_proxy, generic_retriever_agent, planner], messages=[], max_round=10)
manager = autogen.GroupChatManager(groupchat=groupchat)
print("initiating chat")
user_proxy.initiate_chat(
manager,
message="""
I have a first meeting with a prospect United Healthcare - what do I need to find out and what are the most important things I need to relate to them. Use the index called sales_vector_index.
"""
) | [] |
2024-01-10 | joelborellis/autogen-sandbox | autogen_test_reuse.py | import os
import openai
from openai import OpenAI
from dotenv import load_dotenv
import autogen
from autogen.agentchat.contrib.gpt_assistant_agent import GPTAssistantAgent
load_dotenv()
openai_model: str = os.environ.get("OPENAI_MODEL")
openai.api_key = os.environ.get("OPENAI_API_KEY")
# create client for OpenAI
client = OpenAI(api_key=openai.api_key)
if __name__ == '__main__':
# Retrieve an existing assistant
coder_assistant = client.beta.assistants.retrieve(
assistant_id="asst_BD6LjCitqFEzRIatvu2FNW7X",
)
# Retrieve an existing assistant
analyst_assistant = client.beta.assistants.retrieve(
assistant_id="asst_WLmw3TELCkX7tXUyjVxFskzB",
)
coder_llm_config = {
"assistant_id": coder_assistant.id
}
analyst_llm_config = {
"assistant_id": analyst_assistant.id
}
coder = GPTAssistantAgent(
name="Coder_Assistant",
instructions="""
You are an expert at writing python code to solve problems.
Reply TERMINATE when the task is solved and there is no problem
""",
llm_config=coder_llm_config
)
analyst = GPTAssistantAgent(
name="Data_Analyst",
instructions="""
You are a data analyst that offers insight into data.
""",
llm_config=analyst_llm_config,
)
user_proxy = autogen.UserProxyAgent(
name="UserProxy",
code_execution_config={
"work_dir" : "coding",
},
system_message="Admin"
)
groupchat = autogen.GroupChat(agents=[user_proxy, coder, analyst], messages=[], max_round=10)
manager = autogen.GroupChatManager(groupchat=groupchat)
user_proxy.initiate_chat(
manager,
message="""
Does Najee Harris perfoem well against the Cleveland Browns.
"""
) | [] |
2024-01-10 | custom-haystack/haystack | haystack~nodes~retriever~_embedding_encoder.py | import json
import logging
from abc import abstractmethod
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Union
import numpy as np
import requests
import torch
from sentence_transformers import InputExample
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SequentialSampler
from tqdm.auto import tqdm
from transformers import AutoModel, AutoTokenizer
from haystack.document_stores.base import BaseDocumentStore
from haystack.errors import OpenAIError, OpenAIRateLimitError, CohereError
from haystack.modeling.data_handler.dataloader import NamedDataLoader
from haystack.modeling.data_handler.dataset import convert_features_to_dataset, flatten_rename
from haystack.modeling.infer import Inferencer
from haystack.nodes.retriever._losses import _TRAINING_LOSSES
from haystack.schema import Document
from haystack.utils.reflection import retry_with_exponential_backoff
if TYPE_CHECKING:
from haystack.nodes.retriever import EmbeddingRetriever
logger = logging.getLogger(__name__)
class _BaseEmbeddingEncoder:
@abstractmethod
def embed_queries(self, queries: List[str]) -> np.ndarray:
"""
Create embeddings for a list of queries.
:param queries: List of queries to embed.
:return: Embeddings, one per input query, shape: (queries, embedding_dim)
"""
pass
@abstractmethod
def embed_documents(self, docs: List[Document]) -> np.ndarray:
"""
Create embeddings for a list of documents.
:param docs: List of documents to embed.
:return: Embeddings, one per input document, shape: (documents, embedding_dim)
"""
pass
def train(
self,
training_data: List[Dict[str, Any]],
learning_rate: float = 2e-5,
n_epochs: int = 1,
num_warmup_steps: int = None,
batch_size: int = 16,
):
"""
Trains or adapts the underlying embedding model.
Each training data example is a dictionary with the following keys:
* question: The question string.
* pos_doc: Positive document string (the document containing the answer).
* neg_doc: Negative document string (the document that doesn't contain the answer).
* score: The score margin the answer must fall within.
:param training_data: The training data in a dictionary format. Required.
:type training_data: List[Dict[str, Any]]
:param learning_rate: The speed at which the model learns. Required. We recommend that you leave the default `2e-5` value.
:type learning_rate: float
:param n_epochs: The number of epochs (complete passes of the training data through the algorithm) that you want the model to go through. Required.
:type n_epochs: int
:param num_warmup_steps: The number of warmup steps for the model. Warmup steps are epochs when the learning rate is very low. You can use them at the beginning of the training to prevent early overfitting of your model. Required.
:type num_warmup_steps: int
:param batch_size: The batch size to use for the training. Optional. The default values is 16.
:type batch_size: int (optional)
"""
pass
def save(self, save_dir: Union[Path, str]):
"""
Save the model to the directory you specify.
:param save_dir: The directory where the model is saved. Required.
:type save_dir: Union[Path, str]
"""
pass
def _check_docstore_similarity_function(self, document_store: BaseDocumentStore, model_name: str):
"""
Check that document_store uses a similarity function
compatible with the embedding model
"""
if "sentence-transformers" in model_name.lower():
model_similarity = None
if "-cos-" in model_name.lower():
model_similarity = "cosine"
elif "-dot-" in model_name.lower():
model_similarity = "dot_product"
if model_similarity is not None and document_store.similarity != model_similarity:
logger.warning(
f"You seem to be using {model_name} model with the {document_store.similarity} function instead of the recommended {model_similarity}. "
f"This can be set when initializing the DocumentStore"
)
elif "dpr" in model_name.lower() and document_store.similarity != "dot_product":
logger.warning(
f"You seem to be using a DPR model with the {document_store.similarity} function. "
f"We recommend using dot_product instead. "
f"This can be set when initializing the DocumentStore"
)
class _DefaultEmbeddingEncoder(_BaseEmbeddingEncoder):
def __init__(self, retriever: "EmbeddingRetriever"):
self.embedding_model = Inferencer.load(
retriever.embedding_model,
revision=retriever.model_version,
task_type="embeddings",
extraction_strategy=retriever.pooling_strategy,
extraction_layer=retriever.emb_extraction_layer,
gpu=retriever.use_gpu,
batch_size=retriever.batch_size,
max_seq_len=retriever.max_seq_len,
num_processes=0,
use_auth_token=retriever.use_auth_token,
)
if retriever.document_store:
self._check_docstore_similarity_function(
document_store=retriever.document_store, model_name=retriever.embedding_model
)
def embed(self, texts: Union[List[List[str]], List[str], str]) -> np.ndarray:
# TODO: FARM's `sample_to_features_text` need to fix following warning -
# tokenization_utils.py:460: FutureWarning: `is_pretokenized` is deprecated and will be removed in a future version, use `is_split_into_words` instead.
emb = self.embedding_model.inference_from_dicts(dicts=[{"text": t} for t in texts])
emb = np.stack([r["vec"] for r in emb])
return emb
def embed_queries(self, queries: List[str]) -> np.ndarray:
"""
Create embeddings for a list of queries.
:param queries: List of queries to embed.
:return: Embeddings, one per input query, shape: (queries, embedding_dim)
"""
return self.embed(queries)
def embed_documents(self, docs: List[Document]) -> np.ndarray:
"""
Create embeddings for a list of documents.
:param docs: List of documents to embed.
:return: Embeddings, one per input document, shape: (documents, embedding_dim)
"""
passages = [d.content for d in docs]
return self.embed(passages)
def train(
self,
training_data: List[Dict[str, Any]],
learning_rate: float = 2e-5,
n_epochs: int = 1,
num_warmup_steps: int = None,
batch_size: int = 16,
):
raise NotImplementedError(
"You can't train this retriever. You can only use the `train` method with sentence-transformers EmbeddingRetrievers."
)
def save(self, save_dir: Union[Path, str]):
raise NotImplementedError(
"You can't save your record as `save` only works for sentence-transformers EmbeddingRetrievers."
)
class _SentenceTransformersEmbeddingEncoder(_BaseEmbeddingEncoder):
def __init__(self, retriever: "EmbeddingRetriever"):
# pretrained embedding models coming from: https://github.com/UKPLab/sentence-transformers#pretrained-models
# e.g. 'roberta-base-nli-stsb-mean-tokens'
try:
from sentence_transformers import SentenceTransformer
except (ImportError, ModuleNotFoundError) as ie:
from haystack.utils.import_utils import _optional_component_not_installed
_optional_component_not_installed(__name__, "sentence", ie)
self.embedding_model = SentenceTransformer(
retriever.embedding_model, device=str(retriever.devices[0]), use_auth_token=retriever.use_auth_token
)
self.batch_size = retriever.batch_size
self.embedding_model.max_seq_length = retriever.max_seq_len
self.show_progress_bar = retriever.progress_bar
if retriever.document_store:
self._check_docstore_similarity_function(
document_store=retriever.document_store, model_name=retriever.embedding_model
)
def embed(self, texts: Union[List[str], str]) -> np.ndarray:
# texts can be a list of strings
# get back list of numpy embedding vectors
emb = self.embedding_model.encode(
texts, batch_size=self.batch_size, show_progress_bar=self.show_progress_bar, convert_to_numpy=True
)
return emb
def embed_queries(self, queries: List[str]) -> np.ndarray:
"""
Create embeddings for a list of queries.
:param queries: List of queries to embed.
:return: Embeddings, one per input query, shape: (queries, embedding_dim)
"""
return self.embed(queries)
def embed_documents(self, docs: List[Document]) -> np.ndarray:
"""
Create embeddings for a list of documents.
:param docs: List of documents to embed.
:return: Embeddings, one per input document, shape: (documents, embedding_dim)
"""
passages = [d.content for d in docs]
return self.embed(passages)
def train(
self,
training_data: List[Dict[str, Any]],
learning_rate: float = 2e-5,
n_epochs: int = 1,
num_warmup_steps: int = None,
batch_size: int = 16,
train_loss: str = "mnrl",
):
if train_loss not in _TRAINING_LOSSES:
raise ValueError(f"Unrecognized train_loss {train_loss}. Should be one of: {_TRAINING_LOSSES.keys()}")
st_loss = _TRAINING_LOSSES[train_loss]
train_examples = []
for train_i in training_data:
missing_attrs = st_loss.required_attrs.difference(set(train_i.keys()))
if len(missing_attrs) > 0:
raise ValueError(
f"Some training examples don't contain the fields {missing_attrs} which are necessary when using the '{train_loss}' loss."
)
texts = [train_i["question"], train_i["pos_doc"]]
if "neg_doc" in train_i:
texts.append(train_i["neg_doc"])
if "score" in train_i:
train_examples.append(InputExample(texts=texts, label=train_i["score"]))
else:
train_examples.append(InputExample(texts=texts))
logger.info("Training/adapting %s with %s examples", self.embedding_model, len(train_examples))
train_dataloader = DataLoader(train_examples, batch_size=batch_size, drop_last=True, shuffle=True)
train_loss = st_loss.loss(self.embedding_model)
# Tune the model
self.embedding_model.fit(
train_objectives=[(train_dataloader, train_loss)],
epochs=n_epochs,
optimizer_params={"lr": learning_rate},
warmup_steps=int(len(train_dataloader) * 0.1) if num_warmup_steps is None else num_warmup_steps,
)
def save(self, save_dir: Union[Path, str]):
self.embedding_model.save(path=str(save_dir))
class _RetribertEmbeddingEncoder(_BaseEmbeddingEncoder):
def __init__(self, retriever: "EmbeddingRetriever"):
self.progress_bar = retriever.progress_bar
self.batch_size = retriever.batch_size
self.max_length = retriever.max_seq_len
self.embedding_tokenizer = AutoTokenizer.from_pretrained(
retriever.embedding_model, use_auth_token=retriever.use_auth_token
)
self.embedding_model = AutoModel.from_pretrained(
retriever.embedding_model, use_auth_token=retriever.use_auth_token
).to(str(retriever.devices[0]))
def embed_queries(self, queries: List[str]) -> np.ndarray:
"""
Create embeddings for a list of queries.
:param queries: List of queries to embed.
:return: Embeddings, one per input query, shape: (queries, embedding_dim)
"""
query_text = [{"text": q} for q in queries]
dataloader = self._create_dataloader(query_text)
embeddings: List[np.ndarray] = []
disable_tqdm = True if len(dataloader) == 1 else not self.progress_bar
for i, batch in enumerate(tqdm(dataloader, desc=f"Creating Embeddings", unit=" Batches", disable=disable_tqdm)):
batch = {key: batch[key].to(self.embedding_model.device) for key in batch}
with torch.no_grad():
q_reps = (
self.embedding_model.embed_questions(
input_ids=batch["input_ids"], attention_mask=batch["padding_mask"]
)
.cpu()
.numpy()
)
embeddings.append(q_reps)
return np.concatenate(embeddings)
def embed_documents(self, docs: List[Document]) -> np.ndarray:
"""
Create embeddings for a list of documents.
:param docs: List of documents to embed.
:return: Embeddings, one per input document, shape: (documents, embedding_dim)
"""
doc_text = [{"text": d.content} for d in docs]
dataloader = self._create_dataloader(doc_text)
embeddings: List[np.ndarray] = []
disable_tqdm = True if len(dataloader) == 1 else not self.progress_bar
for i, batch in enumerate(tqdm(dataloader, desc=f"Creating Embeddings", unit=" Batches", disable=disable_tqdm)):
batch = {key: batch[key].to(self.embedding_model.device) for key in batch}
with torch.no_grad():
q_reps = (
self.embedding_model.embed_answers(
input_ids=batch["input_ids"], attention_mask=batch["padding_mask"]
)
.cpu()
.numpy()
)
embeddings.append(q_reps)
return np.concatenate(embeddings)
def _create_dataloader(self, text_to_encode: List[dict]) -> NamedDataLoader:
dataset, tensor_names = self.dataset_from_dicts(text_to_encode)
dataloader = NamedDataLoader(
dataset=dataset, sampler=SequentialSampler(dataset), batch_size=self.batch_size, tensor_names=tensor_names
)
return dataloader
def dataset_from_dicts(self, dicts: List[dict]):
texts = [x["text"] for x in dicts]
tokenized_batch = self.embedding_tokenizer(
texts,
return_token_type_ids=True,
return_attention_mask=True,
max_length=self.max_length,
truncation=True,
padding=True,
)
features_flat = flatten_rename(
tokenized_batch,
["input_ids", "token_type_ids", "attention_mask"],
["input_ids", "segment_ids", "padding_mask"],
)
dataset, tensornames = convert_features_to_dataset(features=features_flat)
return dataset, tensornames
def train(
self,
training_data: List[Dict[str, Any]],
learning_rate: float = 2e-5,
n_epochs: int = 1,
num_warmup_steps: int = None,
batch_size: int = 16,
):
raise NotImplementedError(
"You can't train this retriever. You can only use the `train` method with sentence-transformers EmbeddingRetrievers."
)
def save(self, save_dir: Union[Path, str]):
raise NotImplementedError(
"You can't save your record as `save` only works for sentence-transformers EmbeddingRetrievers."
)
class _OpenAIEmbeddingEncoder(_BaseEmbeddingEncoder):
def __init__(self, retriever: "EmbeddingRetriever"):
# See https://beta.openai.com/docs/guides/embeddings for more details
# OpenAI has a max seq length of 2048 tokens and unknown max batch size
self.max_seq_len = min(2048, retriever.max_seq_len)
self.url = "https://api.openai.com/v1/embeddings"
self.api_key = retriever.api_key
self.batch_size = min(64, retriever.batch_size)
self.progress_bar = retriever.progress_bar
model_class: str = next(
(m for m in ["ada", "babbage", "davinci", "curie"] if m in retriever.embedding_model), "babbage"
)
self.query_model_encoder_engine = f"text-search-{model_class}-query-001"
self.doc_model_encoder_engine = f"text-search-{model_class}-doc-001"
self.tokenizer = AutoTokenizer.from_pretrained("gpt2")
def _ensure_text_limit(self, text: str) -> str:
"""
Ensure that length of the text is within the maximum length of the model.
OpenAI embedding models have a limit of 2048 tokens
"""
tokenized_payload = self.tokenizer(text)
return self.tokenizer.decode(tokenized_payload["input_ids"][: self.max_seq_len])
@retry_with_exponential_backoff(backoff_in_seconds=10, max_retries=5)
def embed(self, model: str, text: List[str]) -> np.ndarray:
payload = {"model": model, "input": text}
headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
response = requests.request("POST", self.url, headers=headers, data=json.dumps(payload), timeout=30)
res = json.loads(response.text)
if response.status_code != 200:
openai_error: OpenAIError
if response.status_code == 429:
openai_error = OpenAIRateLimitError(f"API rate limit exceeded: {response.text}")
else:
openai_error = OpenAIError(
f"OpenAI returned an error.\n"
f"Status code: {response.status_code}\n"
f"Response body: {response.text}",
status_code=response.status_code,
)
raise openai_error
unordered_embeddings = [(ans["index"], ans["embedding"]) for ans in res["data"]]
ordered_embeddings = sorted(unordered_embeddings, key=lambda x: x[0])
generated_embeddings = [emb[1] for emb in ordered_embeddings]
return np.array(generated_embeddings)
def embed_batch(self, model: str, text: List[str]) -> np.ndarray:
all_embeddings = []
for i in tqdm(
range(0, len(text), self.batch_size), disable=not self.progress_bar, desc="Calculating embeddings"
):
batch = text[i : i + self.batch_size]
batch_limited = [self._ensure_text_limit(content) for content in batch]
generated_embeddings = self.embed(model, batch_limited)
all_embeddings.append(generated_embeddings)
return np.concatenate(all_embeddings)
def embed_queries(self, queries: List[str]) -> np.ndarray:
return self.embed_batch(self.query_model_encoder_engine, queries)
def embed_documents(self, docs: List[Document]) -> np.ndarray:
return self.embed_batch(self.doc_model_encoder_engine, [d.content for d in docs])
def train(
self,
training_data: List[Dict[str, Any]],
learning_rate: float = 2e-5,
n_epochs: int = 1,
num_warmup_steps: int = None,
batch_size: int = 16,
):
raise NotImplementedError(f"Training is not implemented for {self.__class__}")
def save(self, save_dir: Union[Path, str]):
raise NotImplementedError(f"Saving is not implemented for {self.__class__}")
class _CohereEmbeddingEncoder(_BaseEmbeddingEncoder):
def __init__(self, retriever: "EmbeddingRetriever"):
# See https://docs.cohere.ai/embed-reference/ for more details
# Cohere has a max seq length of 4096 tokens and a max batch size of 16
self.max_seq_len = min(4096, retriever.max_seq_len)
self.url = "https://api.cohere.ai/embed"
self.api_key = retriever.api_key
self.batch_size = min(16, retriever.batch_size)
self.progress_bar = retriever.progress_bar
self.model: str = next((m for m in ["small", "medium", "large"] if m in retriever.embedding_model), "large")
self.tokenizer = AutoTokenizer.from_pretrained("gpt2")
def _ensure_text_limit(self, text: str) -> str:
"""
Ensure that length of the text is within the maximum length of the model.
Cohere embedding models have a limit of 4096 tokens
"""
tokenized_payload = self.tokenizer(text)
return self.tokenizer.decode(tokenized_payload["input_ids"][: self.max_seq_len])
@retry_with_exponential_backoff(backoff_in_seconds=10, max_retries=5, errors=(CohereError,))
def embed(self, model: str, text: List[str]) -> np.ndarray:
payload = {"model": model, "texts": text}
headers = {"Authorization": f"BEARER {self.api_key}", "Content-Type": "application/json"}
response = requests.request("POST", self.url, headers=headers, data=json.dumps(payload), timeout=30)
res = json.loads(response.text)
if response.status_code != 200:
raise CohereError(response.text, status_code=response.status_code)
generated_embeddings = [e for e in res["embeddings"]]
return np.array(generated_embeddings)
def embed_batch(self, text: List[str]) -> np.ndarray:
all_embeddings = []
for i in tqdm(
range(0, len(text), self.batch_size), disable=not self.progress_bar, desc="Calculating embeddings"
):
batch = text[i : i + self.batch_size]
batch_limited = [self._ensure_text_limit(content) for content in batch]
generated_embeddings = self.embed(self.model, batch_limited)
all_embeddings.append(generated_embeddings)
return np.concatenate(all_embeddings)
def embed_queries(self, queries: List[str]) -> np.ndarray:
return self.embed_batch(queries)
def embed_documents(self, docs: List[Document]) -> np.ndarray:
return self.embed_batch([d.content for d in docs])
def train(
self,
training_data: List[Dict[str, Any]],
learning_rate: float = 2e-5,
n_epochs: int = 1,
num_warmup_steps: int = None,
batch_size: int = 16,
):
raise NotImplementedError(f"Training is not implemented for {self.__class__}")
def save(self, save_dir: Union[Path, str]):
raise NotImplementedError(f"Saving is not implemented for {self.__class__}")
_EMBEDDING_ENCODERS: Dict[str, Callable] = {
"farm": _DefaultEmbeddingEncoder,
"transformers": _DefaultEmbeddingEncoder,
"sentence_transformers": _SentenceTransformersEmbeddingEncoder,
"retribert": _RetribertEmbeddingEncoder,
"openai": _OpenAIEmbeddingEncoder,
"cohere": _CohereEmbeddingEncoder,
}
| [] |
2024-01-10 | custom-haystack/haystack | test~others~test_utils.py | import logging
from random import random
import numpy as np
import pytest
import pandas as pd
from pathlib import Path
import responses
from responses import matchers
from haystack.errors import OpenAIRateLimitError
from haystack.utils.deepsetcloud import DeepsetCloud, DeepsetCloudExperiments
from haystack.utils.preprocessing import convert_files_to_docs, tika_convert_files_to_docs
from haystack.utils.cleaning import clean_wiki_text
from haystack.utils.augment_squad import augment_squad
from haystack.utils.reflection import retry_with_exponential_backoff
from haystack.utils.squad_data import SquadData
from haystack.utils.context_matching import calculate_context_similarity, match_context, match_contexts
from ..conftest import DC_API_ENDPOINT, DC_API_KEY, MOCK_DC, SAMPLES_PATH, deepset_cloud_fixture
TEST_CONTEXT = context = """Der Merkantilismus förderte Handel und Verkehr mit teils marktkonformen, teils dirigistischen Maßnahmen.
An der Schwelle zum 19. Jahrhundert entstand ein neuer Typus des Nationalstaats, der die Säkularisation durchsetzte,
moderne Bildungssysteme etablierte und die Industrialisierung vorantrieb.\n
Beim Begriff der Aufklärung geht es auch um die Prozesse zwischen diesen frühneuzeitlichen Eckpunkten.
Man versucht die fortschrittlichen Faktoren zu definieren, die in das 19. Jahrhundert führten.
Widerstände gegen diesen Fortschritt werden anti-aufklärerischen Kräften oder unreflektierten Traditionen zugeordnet.
Die Epochendefinition rückt vor allem publizistisch tätige Gruppen in den gesellschaftlichen Fokus,
die zunächst selten einen bürgerlichen Hintergrund aufwiesen, sondern weitaus häufiger der Geistlichkeit oder Aristokratie angehörten:
Wissenschaftler, Journalisten, Autoren, sogar Regenten, die Traditionen der Kritik unterzogen, indem sie sich auf die Vernunftperspektive beriefen."""
TEST_CONTEXT_2 = """Beer is one of the oldest[1][2][3] and most widely consumed[4] alcoholic drinks in the world, and the third most popular drink overall after water and tea.[5] It is produced by the brewing and fermentation of starches, mainly derived from cereal grains—most commonly from malted barley, though wheat, maize (corn), rice, and oats are also used. During the brewing process, fermentation of the starch sugars in the wort produces ethanol and carbonation in the resulting beer.[6] Most modern beer is brewed with hops, which add bitterness and other flavours and act as a natural preservative and stabilizing agent. Other flavouring agents such as gruit, herbs, or fruits may be included or used instead of hops. In commercial brewing, the natural carbonation effect is often removed during processing and replaced with forced carbonation.[7]
Some of humanity's earliest known writings refer to the production and distribution of beer: the Code of Hammurabi included laws regulating beer and beer parlours,[8] and "The Hymn to Ninkasi", a prayer to the Mesopotamian goddess of beer, served as both a prayer and as a method of remembering the recipe for beer in a culture with few literate people.[9][10]
Beer is distributed in bottles and cans and is also commonly available on draught, particularly in pubs and bars. The brewing industry is a global business, consisting of several dominant multinational companies and many thousands of smaller producers ranging from brewpubs to regional breweries. The strength of modern beer is usually around 4% to 6% alcohol by volume (ABV), although it may vary between 0.5% and 20%, with some breweries creating examples of 40% ABV and above.[11]
Beer forms part of the culture of many nations and is associated with social traditions such as beer festivals, as well as a rich pub culture involving activities like pub crawling, pub quizzes and pub games.
When beer is distilled, the resulting liquor is a form of whisky.[12]
"""
def test_convert_files_to_docs():
documents = convert_files_to_docs(
dir_path=(SAMPLES_PATH).absolute(), clean_func=clean_wiki_text, split_paragraphs=True
)
assert documents and len(documents) > 0
@pytest.mark.tika
def test_tika_convert_files_to_docs():
documents = tika_convert_files_to_docs(dir_path=SAMPLES_PATH, clean_func=clean_wiki_text, split_paragraphs=True)
assert documents and len(documents) > 0
def test_squad_augmentation():
input_ = SAMPLES_PATH / "squad" / "tiny.json"
output = SAMPLES_PATH / "squad" / "tiny_augmented.json"
glove_path = SAMPLES_PATH / "glove" / "tiny.txt" # dummy glove file, will not even be use when augmenting tiny.json
multiplication_factor = 5
augment_squad(
model="distilbert-base-uncased",
tokenizer="distilbert-base-uncased",
squad_path=input_,
output_path=output,
glove_path=glove_path,
multiplication_factor=multiplication_factor,
)
original_squad = SquadData.from_file(input_)
augmented_squad = SquadData.from_file(output)
assert original_squad.count(unit="paragraph") == augmented_squad.count(unit="paragraph") * multiplication_factor
def test_squad_to_df():
df = pd.DataFrame(
[["title", "context", "question", "id", "answer", 1, False]],
columns=["title", "context", "question", "id", "answer_text", "answer_start", "is_impossible"],
)
expected_result = [
{
"title": "title",
"paragraphs": [
{
"context": "context",
"qas": [
{
"question": "question",
"id": "id",
"answers": [{"text": "answer", "answer_start": 1}],
"is_impossible": False,
}
],
}
],
}
]
result = SquadData.df_to_data(df)
assert result == expected_result
def test_calculate_context_similarity_on_parts_of_whole_document():
whole_document = TEST_CONTEXT
min_length = 100
margin = 5
context_size = min_length + margin
for i in range(len(whole_document) - context_size):
partial_context = whole_document[i : i + context_size]
score = calculate_context_similarity(partial_context, whole_document, min_length=min_length)
assert score == 100.0
def test_calculate_context_similarity_on_parts_of_whole_document_different_case():
whole_document = TEST_CONTEXT
min_length = 100
margin = 5
context_size = min_length + margin
for i in range(len(whole_document) - context_size):
partial_context = whole_document[i : i + context_size].lower()
score = calculate_context_similarity(partial_context, whole_document, min_length=min_length)
assert score == 100.0
def test_calculate_context_similarity_on_parts_of_whole_document_different_whitesapce():
whole_document = TEST_CONTEXT
words = whole_document.split()
min_length = 100
context_word_size = 20
for i in range(len(words) - context_word_size):
partial_context = "\n\t\t\t".join(words[i : i + context_word_size])
score = calculate_context_similarity(partial_context, whole_document, min_length=min_length)
assert score == 100.0
def test_calculate_context_similarity_min_length():
whole_document = TEST_CONTEXT
min_length = 100
context_size = min_length - 1
for i in range(len(whole_document) - context_size):
partial_context = whole_document[i : i + context_size]
score = calculate_context_similarity(partial_context, whole_document, min_length=min_length)
assert score == 0.0
def test_calculate_context_similarity_on_partially_overlapping_contexts():
whole_document = TEST_CONTEXT
min_length = 100
margin = 5
context_size = min_length + margin
stride = context_size // 2
for i in range(len(whole_document) - context_size - stride):
partial_context_1 = whole_document[i : i + context_size]
partial_context_2 = whole_document[i + stride : i + stride + context_size]
score = calculate_context_similarity(partial_context_1, partial_context_2, min_length=min_length)
assert score >= 65.0
def test_calculate_context_similarity_on_non_matching_contexts():
whole_document = TEST_CONTEXT
min_length = 100
margin = 5
context_size = min_length + margin
scores = []
for i in range(len(whole_document) - context_size):
partial_context = whole_document[i : i + context_size // 2] + _get_random_chars(context_size // 2)
score = calculate_context_similarity(partial_context, whole_document, min_length=min_length)
scores.append(score)
for i in range(len(whole_document) - context_size):
partial_context = (
_get_random_chars(context_size // 2) + whole_document[i + context_size // 2 : i + context_size]
)
score = calculate_context_similarity(partial_context, whole_document, min_length=min_length)
scores.append(score)
accuracy = np.where(np.array(scores) < 65, 1, 0).mean()
assert accuracy > 0.99
def test_calculate_context_similarity_on_parts_of_whole_document_with_noise():
whole_document = TEST_CONTEXT
min_length = 100
margin = 5
context_size = min_length + margin
for i in range(len(whole_document) - context_size):
partial_context = _insert_noise(whole_document[i : i + context_size], 0.1)
score = calculate_context_similarity(partial_context, whole_document, min_length=min_length)
assert score >= 85.0
def test_calculate_context_similarity_on_partially_overlapping_contexts_with_noise():
whole_document = TEST_CONTEXT
min_length = 100
margin = 5
context_size = min_length + margin
stride = context_size // 2
scores = []
for i in range(len(whole_document) - context_size - stride):
partial_context_1 = whole_document[i : i + context_size]
partial_context_2 = _insert_noise(whole_document[i + stride : i + stride + context_size], 0.1)
score = calculate_context_similarity(partial_context_1, partial_context_2, min_length=min_length)
scores.append(score)
accuracy = np.where(np.array(scores) >= 65, 1, 0).mean()
assert accuracy > 0.99
def test_match_context_multi_process():
whole_document = TEST_CONTEXT[:300]
min_length = 100
margin = 5
context_size = min_length + margin
for i in range(len(whole_document) - context_size):
partial_context = whole_document[i : i + context_size]
candidates = ((str(i), TEST_CONTEXT if i == 0 else TEST_CONTEXT_2) for i in range(1000))
results = match_context(partial_context, candidates, min_length=min_length, num_processes=2)
assert len(results) == 1
id, score = results[0]
assert id == "0"
assert score == 100.0
def test_match_context_single_process():
whole_document = TEST_CONTEXT
min_length = 100
margin = 5
context_size = min_length + margin
for i in range(len(whole_document) - context_size):
partial_context = whole_document[i : i + context_size]
candidates = ((str(i), TEST_CONTEXT if i == 0 else TEST_CONTEXT_2) for i in range(10))
results = match_context(partial_context, candidates, min_length=min_length, num_processes=1)
assert len(results) == 1
id, score = results[0]
assert id == "0"
assert score == 100.0
def test_match_contexts_multi_process():
whole_document = TEST_CONTEXT
min_length = 100
margin = 5
context_size = min_length + margin
candidates = ((str(i), TEST_CONTEXT if i == 0 else TEST_CONTEXT_2) for i in range(10))
partial_contexts = [whole_document[i : i + context_size] for i in range(len(whole_document) - context_size)]
result_list = match_contexts(partial_contexts, candidates, min_length=min_length, num_processes=2)
assert len(result_list) == len(partial_contexts)
for results in result_list:
assert len(results) == 1
id, score = results[0]
assert id == "0"
assert score == 100.0
def _get_random_chars(size: int):
chars = np.random.choice(
list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZß?/.,;:-#äöüÄÖÜ+*~1234567890$€%&!§ "), size=size
)
return "".join(list(chars))
def _insert_noise(input: str, ratio):
size = int(ratio * len(input))
insert_idxs = sorted(np.random.choice(range(len(input)), size=size, replace=False), reverse=True)
insert_chars = np.random.choice(
list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZß?/.,;:-#äöüÄÖÜ+*~1234567890$€%&!§"), size=size
)
for idx, char in zip(insert_idxs, insert_chars):
input = input[:idx] + char + input[idx:]
return input
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_upload_file_to_deepset_cloud(caplog):
if MOCK_DC:
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/files",
json={"file_id": "abc"},
status=200,
)
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/files",
json={"file_id": "def"},
status=200,
)
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/files",
json={"file_id": "def"},
status=200,
)
client = DeepsetCloud.get_file_client(api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY)
file_paths = [
SAMPLES_PATH / "docx/sample_docx.docx",
SAMPLES_PATH / "pdf/sample_pdf_1.pdf",
SAMPLES_PATH / "docs/doc_1.txt",
]
metas = [{"file_id": "sample_docx.docx"}, {"file_id": "sample_pdf_1.pdf"}, {"file_id": "doc_1.txt"}]
with caplog.at_level(logging.INFO):
client.upload_files(file_paths=file_paths, metas=metas)
assert f"Successfully uploaded {len(file_paths)} files." in caplog.text
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_upload_file_to_deepset_cloud_file_fails(caplog):
if MOCK_DC:
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/files",
json={"file_id": "abc"},
status=200,
)
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/files",
json={"error": "my-error"},
status=500,
)
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/files",
json={"file_id": "def"},
status=200,
)
client = DeepsetCloud.get_file_client(api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY)
file_paths = [
SAMPLES_PATH / "docx/sample_docx.docx",
SAMPLES_PATH / "pdf/sample_pdf_1.pdf",
SAMPLES_PATH / "docs/doc_1.txt",
]
metas = [{"file_id": "sample_docx.docx"}, {"file_id": "sample_pdf_1.pdf"}, {"file_id": "doc_1.txt"}]
with caplog.at_level(logging.INFO):
client.upload_files(file_paths=file_paths, metas=metas)
assert f"Successfully uploaded 2 files." in caplog.text
assert f"Error uploading file" in caplog.text
assert f"my-error" in caplog.text
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_delete_file_to_deepset_cloud():
if MOCK_DC:
responses.add(method=responses.DELETE, url=f"{DC_API_ENDPOINT}/workspaces/default/files/abc", status=200)
client = DeepsetCloud.get_file_client(api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY)
client.delete_file(file_id="abc")
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_delete_all_file_to_deepset_cloud():
if MOCK_DC:
responses.add(method=responses.DELETE, url=f"{DC_API_ENDPOINT}/workspaces/default/files", status=200)
client = DeepsetCloud.get_file_client(api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY)
client.delete_all_files()
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_list_files_on_deepset_cloud():
if MOCK_DC:
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/files",
json={
"data": [
{
"characters": -1,
"created_at": "2022-05-19T15:40:07.538162+00:00",
"file_id": "b6cdd48b-3db5-488b-a44d-4240c12a96d5",
"languages": [],
"meta": {},
"name": "sample_pdf_1.pdf",
"params": {"id_hash_keys": ["content", "meta"]},
"size": 44524,
"url": "/api/v1/workspaces/e282219f-19b2-41ff-927e-bda4e6e67418/files/b6cdd48b-3db5-488b-a44d-4240c12a96d5",
},
{
"characters": -1,
"created_at": "2022-05-23T12:39:53.393716+00:00",
"file_id": "51e9c2af-5676-453d-9b71-db9a560ae266",
"languages": [],
"meta": {"file_id": "sample_pdf_2.pdf"},
"name": "sample_pdf_2.pdf",
"params": {"id_hash_keys": ["content", "meta"]},
"size": 26093,
"url": "/api/v1/workspaces/e282219f-19b2-41ff-927e-bda4e6e67418/files/51e9c2af-5676-453d-9b71-db9a560ae266",
},
],
"has_more": False,
"total": 2,
},
status=200,
)
client = DeepsetCloud.get_file_client(api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY)
files = [f for f in client.list_files()]
assert len(files) == 2
assert files[0]["name"] == "sample_pdf_1.pdf"
assert files[1]["name"] == "sample_pdf_2.pdf"
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_create_eval_run():
if MOCK_DC:
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/eval_runs",
json={"data": {"eval_run_name": "my-eval-run-1"}},
status=200,
match=[
matchers.json_params_matcher(
{
"name": "my-eval-run-1",
"pipeline_name": "my-pipeline-1",
"evaluation_set_name": "my-eval-set-1",
"eval_mode": 0,
"comment": "this is my first run",
"debug": False,
"tags": ["my-experiment-1"],
}
)
],
)
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/eval_runs",
json={
"data": [
{
"created_at": "2022-05-24T12:13:16.445857+00:00",
"eval_mode": 0,
"eval_run_id": "17875c63-7c07-42d8-bb01-4fcd95ce113c",
"name": "my-eval-run-1",
"comment": "this is my first run",
"tags": ["my-experiment-1"],
"eval_run_labels": [],
"logs": {},
"metrics": {
"integrated_exact_match": None,
"integrated_f1": None,
"integrated_sas": None,
"isolated_exact_match": None,
"isolated_f1": None,
"isolated_sas": None,
"mean_average_precision": None,
"mean_reciprocal_rank": None,
"normal_discounted_cummulative_gain": None,
"precision": None,
"recall_multi_hit": None,
"recall_single_hit": None,
},
"parameters": {
"debug": False,
"eval_mode": 0,
"evaluation_set_name": "my-eval-set-1",
"pipeline_name": "my-pipeline-1",
},
"status": 1,
}
],
"has_more": False,
"total": 1,
},
status=200,
)
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/eval_runs/my-eval-run-1",
json={
"created_at": "2022-05-24T12:13:16.445857+00:00",
"eval_mode": 0,
"eval_run_id": "17875c63-7c07-42d8-bb01-4fcd95ce113c",
"name": "my-eval-run-1",
"comment": "this is my first run",
"tags": ["my-experiment-1"],
"eval_run_labels": [],
"logs": {},
"metrics": {
"integrated_exact_match": None,
"integrated_f1": None,
"integrated_sas": None,
"isolated_exact_match": None,
"isolated_f1": None,
"isolated_sas": None,
"mean_average_precision": None,
"mean_reciprocal_rank": None,
"normal_discounted_cummulative_gain": None,
"precision": None,
"recall_multi_hit": None,
"recall_single_hit": None,
},
"parameters": {
"debug": False,
"eval_mode": 0,
"evaluation_set_name": "my-eval-set-1",
"pipeline_name": "my-pipeline-1",
},
"status": 1,
},
status=200,
)
client = DeepsetCloud.get_eval_run_client(api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY)
client.create_eval_run(
eval_run_name="my-eval-run-1",
pipeline_config_name="my-pipeline-1",
evaluation_set="my-eval-set-1",
eval_mode="integrated",
comment="this is my first run",
tags=["my-experiment-1"],
)
runs = client.get_eval_runs()
assert len(runs) == 1
assert runs[0]["name"] == "my-eval-run-1"
assert runs[0]["tags"] == ["my-experiment-1"]
assert runs[0]["comment"] == "this is my first run"
assert runs[0]["parameters"]["pipeline_name"] == "my-pipeline-1"
assert runs[0]["parameters"]["evaluation_set_name"] == "my-eval-set-1"
run = client.get_eval_run("my-eval-run-1")
assert run["name"] == "my-eval-run-1"
assert run["tags"] == ["my-experiment-1"]
assert run["comment"] == "this is my first run"
assert run["parameters"]["pipeline_name"] == "my-pipeline-1"
assert run["parameters"]["evaluation_set_name"] == "my-eval-set-1"
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_update_eval_run():
if MOCK_DC:
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/eval_runs",
json={"data": {"eval_run_name": "my-eval-run-1"}},
status=200,
match=[
matchers.json_params_matcher(
{
"name": "my-eval-run-1",
"pipeline_name": "my-pipeline-1",
"evaluation_set_name": "my-eval-set-1",
"eval_mode": 0,
"comment": "this is my first run",
"debug": False,
"tags": ["my-experiment-1"],
}
)
],
)
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/eval_runs/my-eval-run-1",
json={
"created_at": "2022-05-24T12:13:16.445857+00:00",
"eval_mode": 0,
"eval_run_id": "17875c63-7c07-42d8-bb01-4fcd95ce113c",
"name": "my-eval-run-1",
"comment": "this is my first run",
"tags": ["my-experiment-1"],
"eval_run_labels": [],
"logs": {},
"metrics": {
"integrated_exact_match": None,
"integrated_f1": None,
"integrated_sas": None,
"isolated_exact_match": None,
"isolated_f1": None,
"isolated_sas": None,
"mean_average_precision": None,
"mean_reciprocal_rank": None,
"normal_discounted_cummulative_gain": None,
"precision": None,
"recall_multi_hit": None,
"recall_single_hit": None,
},
"parameters": {
"debug": False,
"eval_mode": 0,
"evaluation_set_name": "my-eval-set-1",
"pipeline_name": "my-pipeline-1",
},
"status": "CREATED",
},
status=200,
)
responses.add(
method=responses.PATCH,
url=f"{DC_API_ENDPOINT}/workspaces/default/eval_runs/my-eval-run-1",
json={"data": {"eval_run_name": "my-eval-run-1"}},
status=200,
match=[
matchers.json_params_matcher(
{"pipeline_name": "my-pipeline-2", "comment": "this is my first run with second pipeline"}
)
],
)
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/eval_runs/my-eval-run-1",
json={
"created_at": "2022-05-24T12:13:16.445857+00:00",
"eval_mode": 0,
"eval_run_id": "17875c63-7c07-42d8-bb01-4fcd95ce113c",
"name": "my-eval-run-1",
"comment": "this is my first run with second pipeline",
"tags": ["my-experiment-1"],
"eval_run_labels": [],
"logs": {},
"metrics": {
"integrated_exact_match": None,
"integrated_f1": None,
"integrated_sas": None,
"isolated_exact_match": None,
"isolated_f1": None,
"isolated_sas": None,
"mean_average_precision": None,
"mean_reciprocal_rank": None,
"normal_discounted_cummulative_gain": None,
"precision": None,
"recall_multi_hit": None,
"recall_single_hit": None,
},
"parameters": {
"debug": False,
"eval_mode": 0,
"evaluation_set_name": "my-eval-set-1",
"pipeline_name": "my-pipeline-2",
},
"status": "CREATED",
},
status=200,
)
client = DeepsetCloud.get_eval_run_client(api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY)
client.create_eval_run(
eval_run_name="my-eval-run-1",
pipeline_config_name="my-pipeline-1",
evaluation_set="my-eval-set-1",
eval_mode="integrated",
comment="this is my first run",
tags=["my-experiment-1"],
)
run = client.get_eval_run("my-eval-run-1")
assert run["name"] == "my-eval-run-1"
assert run["tags"] == ["my-experiment-1"]
assert run["comment"] == "this is my first run"
assert run["parameters"]["pipeline_name"] == "my-pipeline-1"
assert run["parameters"]["evaluation_set_name"] == "my-eval-set-1"
client.update_eval_run(
eval_run_name="my-eval-run-1",
pipeline_config_name="my-pipeline-2",
comment="this is my first run with second pipeline",
)
run = client.get_eval_run("my-eval-run-1")
assert run["name"] == "my-eval-run-1"
assert run["tags"] == ["my-experiment-1"]
assert run["comment"] == "this is my first run with second pipeline"
assert run["parameters"]["pipeline_name"] == "my-pipeline-2"
assert run["parameters"]["evaluation_set_name"] == "my-eval-set-1"
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_start_eval_run():
if MOCK_DC:
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/eval_runs",
json={"data": {"eval_run_name": "my-eval-run-1"}},
status=200,
match=[
matchers.json_params_matcher(
{
"name": "my-eval-run-1",
"pipeline_name": "my-pipeline-1",
"evaluation_set_name": "my-eval-set-1",
"eval_mode": 0,
"comment": "this is my first run",
"debug": False,
"tags": ["my-experiment-1"],
}
)
],
)
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/eval_runs/my-eval-run-1",
json={
"created_at": "2022-05-24T12:13:16.445857+00:00",
"eval_mode": 0,
"eval_run_id": "17875c63-7c07-42d8-bb01-4fcd95ce113c",
"name": "my-eval-run-1",
"comment": "this is my first run",
"tags": ["my-experiment-1"],
"eval_run_labels": [],
"logs": {},
"metrics": {
"integrated_exact_match": None,
"integrated_f1": None,
"integrated_sas": None,
"isolated_exact_match": None,
"isolated_f1": None,
"isolated_sas": None,
"mean_average_precision": None,
"mean_reciprocal_rank": None,
"normal_discounted_cummulative_gain": None,
"precision": None,
"recall_multi_hit": None,
"recall_single_hit": None,
},
"parameters": {
"debug": False,
"eval_mode": 0,
"evaluation_set_name": "my-eval-set-1",
"pipeline_name": "my-pipeline-1",
},
"status": "CREATED",
},
status=200,
)
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/eval_runs/my-eval-run-1/start",
json={},
status=200,
)
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/eval_runs/my-eval-run-1",
json={
"created_at": "2022-05-24T12:13:16.445857+00:00",
"eval_mode": 0,
"eval_run_id": "17875c63-7c07-42d8-bb01-4fcd95ce113c",
"name": "my-eval-run-1",
"comment": "this is my first run",
"tags": ["my-experiment-1"],
"eval_run_labels": [],
"logs": {},
"metrics": {
"integrated_exact_match": None,
"integrated_f1": None,
"integrated_sas": None,
"isolated_exact_match": None,
"isolated_f1": None,
"isolated_sas": None,
"mean_average_precision": None,
"mean_reciprocal_rank": None,
"normal_discounted_cummulative_gain": None,
"precision": None,
"recall_multi_hit": None,
"recall_single_hit": None,
},
"parameters": {
"debug": False,
"eval_mode": 0,
"evaluation_set_name": "my-eval-set-1",
"pipeline_name": "my-pipeline-1",
},
"status": "STARTED",
},
status=200,
)
client = DeepsetCloud.get_eval_run_client(api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY)
client.create_eval_run(
eval_run_name="my-eval-run-1",
pipeline_config_name="my-pipeline-1",
evaluation_set="my-eval-set-1",
eval_mode="integrated",
comment="this is my first run",
tags=["my-experiment-1"],
)
run = client.get_eval_run("my-eval-run-1")
assert run["name"] == "my-eval-run-1"
assert run["tags"] == ["my-experiment-1"]
assert run["comment"] == "this is my first run"
assert run["parameters"]["pipeline_name"] == "my-pipeline-1"
assert run["parameters"]["evaluation_set_name"] == "my-eval-set-1"
assert run["status"] == "CREATED"
client.start_eval_run(eval_run_name="my-eval-run-1")
run = client.get_eval_run("my-eval-run-1")
assert run["name"] == "my-eval-run-1"
assert run["tags"] == ["my-experiment-1"]
assert run["comment"] == "this is my first run"
assert run["parameters"]["pipeline_name"] == "my-pipeline-1"
assert run["parameters"]["evaluation_set_name"] == "my-eval-set-1"
assert run["status"] == "STARTED"
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_fetch_predictions_for_node():
mock_prediction = {}
if MOCK_DC:
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/eval_runs",
json={"data": {"eval_run_name": "my-eval-run-1"}},
status=200,
match=[
matchers.json_params_matcher(
{
"name": "my-eval-run-1",
"pipeline_name": "my-pipeline-1",
"evaluation_set_name": "my-eval-set-1",
"eval_mode": 0,
"comment": "this is my first run",
"debug": False,
"tags": ["my-experiment-1"],
}
)
],
)
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/eval_runs",
json={
"data": [
{
"created_at": "2022-05-24T12:13:16.445857+00:00",
"eval_mode": 0,
"eval_run_id": "17875c63-7c07-42d8-bb01-4fcd95ce113c",
"name": "my-eval-run-1",
"comment": "this is my first run",
"tags": ["my-experiment-1"],
"eval_run_labels": [],
"logs": {},
"eval_results": [
{
"node_name": "AnswerNode",
"node_type": "answer_node",
"isolated_exact_match": 1.0,
"isolated_f1": 1.0,
"integrated_exact_match": 0,
"integrated_f1": 0,
}
],
"parameters": {
"debug": False,
"eval_mode": 0,
"evaluation_set_name": "my-eval-set-1",
"pipeline_name": "my-pipeline-1",
},
"status": 1,
}
],
"has_more": False,
"total": 1,
},
status=200,
)
mock_prediction = {
"prediction_id": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
"created_at": "2022-08-03T13:42:58.968Z",
"updated_at": "2022-08-03T13:42:58.968Z",
"eval_node_result_id": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
"eval_mode": "Evaluation mode",
"query": "What?",
"context": "This",
"rank": 0,
"document_id": "0",
"filters": [{}],
"labels": [
{
"label_id": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
"query": "What?",
"answer": "This",
"answer_start": 0,
"answer_end": 3,
"meta": {},
"context": "This",
"external_file_name": "this_file.txt",
"file_id": "3fa85f64-5717-4562-b3fc-2c963f66afa7",
"state": "MATCHED",
"candidates": None,
"answer_exact_match": True,
"f1": 1.0,
"document_id_match": True,
"answer_match": "Answer match",
"context_similarity": "Context similarity",
}
],
"prediction_type": "answer",
"answer": "This",
"exact_match": True,
"f1": 1.0,
"exact_match_context_scope": True,
"f1_document_id_scope": 0,
"exact_match_document_id_and_context_scope": True,
"f1_context_scope": 0,
"f1_document_id_and_context_scope": 0,
"answer_start": "Answer start",
"answer_end": "Answer end",
}
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/eval_runs/my-eval-run-1/nodes/AnswerNode/predictions?page_number=1",
json={"data": [mock_prediction], "has_more": False, "total": 1},
status=200,
)
client = DeepsetCloud.get_eval_run_client(api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY)
client.create_eval_run(
eval_run_name="my-eval-run-1",
pipeline_config_name="my-pipeline-1",
evaluation_set="my-eval-set-1",
eval_mode="integrated",
comment="this is my first run",
tags=["my-experiment-1"],
)
predictions = client.get_eval_run_predictions(eval_run_name="my-eval-run-1", node_name="AnswerNode")
assert len(predictions) == 1
assert predictions[0] == mock_prediction
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_delete_eval_run():
if MOCK_DC:
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/eval_runs",
json={"data": {"eval_run_name": "my-eval-run-1"}},
status=200,
match=[
matchers.json_params_matcher(
{
"name": "my-eval-run-1",
"pipeline_name": "my-pipeline-1",
"evaluation_set_name": "my-eval-set-1",
"eval_mode": 0,
"comment": "this is my first run",
"debug": False,
"tags": ["my-experiment-1"],
}
)
],
)
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/eval_runs",
json={
"data": [
{
"created_at": "2022-05-24T12:13:16.445857+00:00",
"eval_mode": 0,
"eval_run_id": "17875c63-7c07-42d8-bb01-4fcd95ce113c",
"name": "my-eval-run-1",
"comment": "this is my first run",
"tags": ["my-experiment-1"],
"eval_run_labels": [],
"logs": {},
"metrics": {
"integrated_exact_match": None,
"integrated_f1": None,
"integrated_sas": None,
"isolated_exact_match": None,
"isolated_f1": None,
"isolated_sas": None,
"mean_average_precision": None,
"mean_reciprocal_rank": None,
"normal_discounted_cummulative_gain": None,
"precision": None,
"recall_multi_hit": None,
"recall_single_hit": None,
},
"parameters": {
"debug": False,
"eval_mode": 0,
"evaluation_set_name": "my-eval-set-1",
"pipeline_name": "my-pipeline-1",
},
"status": 1,
}
],
"has_more": False,
"total": 1,
},
status=200,
)
responses.add(
method=responses.DELETE, url=f"{DC_API_ENDPOINT}/workspaces/default/eval_runs/my-eval-run-1", status=204
)
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/eval_runs",
json={"data": [], "has_more": False, "total": 0},
status=200,
)
client = DeepsetCloud.get_eval_run_client(api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY)
client.create_eval_run(
eval_run_name="my-eval-run-1",
pipeline_config_name="my-pipeline-1",
evaluation_set="my-eval-set-1",
eval_mode="integrated",
comment="this is my first run",
tags=["my-experiment-1"],
)
runs = client.get_eval_runs()
assert len(runs) == 1
run = client.delete_eval_run("my-eval-run-1")
runs = client.get_eval_runs()
assert len(runs) == 0
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_upload_eval_set(caplog):
if MOCK_DC:
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/evaluation_sets/import",
json={"evaluation_set_id": "c2d06025-2c00-43b5-8f73-b81b12e63afc"},
status=200,
)
client = DeepsetCloud.get_evaluation_set_client(api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY)
with caplog.at_level(logging.INFO):
client.upload_evaluation_set(file_path=SAMPLES_PATH / "dc/matching_test_1.csv")
assert f"Successfully uploaded evaluation set file" in caplog.text
assert f"You can access it now under evaluation set 'matching_test_1.csv'." in caplog.text
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_upload_existing_eval_set(caplog):
if MOCK_DC:
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/evaluation_sets/import",
json={"errors": ["Evaluation set with the same name already exists."]},
status=409,
)
client = DeepsetCloud.get_evaluation_set_client(api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY)
with caplog.at_level(logging.INFO):
client.upload_evaluation_set(file_path=SAMPLES_PATH / "dc/matching_test_1.csv")
assert f"Successfully uploaded evaluation set file" not in caplog.text
assert f"You can access it now under evaluation set 'matching_test_1.csv'." not in caplog.text
assert "Evaluation set with the same name already exists." in caplog.text
@pytest.mark.usefixtures(deepset_cloud_fixture.__name__)
@responses.activate
def test_get_eval_run_results():
if MOCK_DC:
responses.add(
method=responses.POST,
url=f"{DC_API_ENDPOINT}/workspaces/default/eval_runs",
json={"data": {"eval_run_name": "my-eval-run-1"}},
status=200,
match=[
matchers.json_params_matcher(
{
"name": "my-eval-run-1",
"pipeline_name": "my-pipeline-1",
"evaluation_set_name": "my-eval-set-1",
"eval_mode": 0,
"comment": "this is my first run",
"debug": False,
"tags": ["my-experiment-1"],
}
)
],
)
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/eval_runs/my-eval-run-1",
json={
"created_at": "2022-05-24T12:13:16.445857+00:00",
"eval_mode": 0,
"eval_run_id": "17875c63-7c07-42d8-bb01-4fcd95ce113c",
"name": "my-eval-run-1",
"comment": "this is my first run",
"tags": ["my-experiment-1"],
"eval_run_labels": [],
"logs": {},
"eval_results": [
{
"node_name": "AnswerNode",
"node_type": "answer_node",
"isolated_exact_match": 1.0,
"isolated_f1": 1.0,
"integrated_exact_match": 0,
"integrated_f1": 0,
}
],
"parameters": {
"debug": False,
"eval_mode": 0,
"evaluation_set_name": "my-eval-set-1",
"pipeline_name": "my-pipeline-1",
},
"status": 1,
},
status=200,
)
mock_prediction = {
"prediction_id": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
"created_at": "2022-08-03T13:42:58.968Z",
"updated_at": "2022-08-03T13:42:58.968Z",
"eval_node_result_id": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
"eval_mode": "Evaluation mode",
"query": "What?",
"context": "This",
"rank": 0,
"document_id": "0",
"filters": [{}],
"labels": [
{
"label_id": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
"query": "What?",
"answer": "This",
"answer_start": 0,
"answer_end": 3,
"meta": {},
"context": "This",
"external_file_name": "this_file.txt",
"file_id": "3fa85f64-5717-4562-b3fc-2c963f66afa7",
"state": "MATCHED",
"candidates": None,
"answer_exact_match": True,
"f1": 1.0,
"document_id_match": True,
"answer_match": True,
"context_similarity": 1.0,
}
],
"prediction_type": "answer",
"answer": "This",
"exact_match": True,
"f1": 1.0,
"exact_match_context_scope": True,
"f1_document_id_scope": 0.0,
"exact_match_document_id_and_context_scope": True,
"f1_context_scope": 0.0,
"f1_document_id_and_context_scope": 0.0,
"answer_start": 1,
"answer_end": 10,
}
responses.add(
method=responses.GET,
url=f"{DC_API_ENDPOINT}/workspaces/default/eval_runs/my-eval-run-1/nodes/AnswerNode/predictions?page_number=1",
json={"data": [mock_prediction], "has_more": False, "total": 1},
status=200,
)
experiments_client = DeepsetCloudExperiments()
eval_run_results = experiments_client.get_run_result(
eval_run_name="my-eval-run-1", api_endpoint=DC_API_ENDPOINT, api_key=DC_API_KEY
)
assert "AnswerNode" in eval_run_results
node_results = eval_run_results["AnswerNode"]
assert isinstance(node_results, pd.DataFrame)
first_result = node_results.iloc[0]
assert first_result["exact_match"] == True
assert first_result["answer"] == "This"
def test_exponential_backoff():
# Test that the exponential backoff works as expected
# should raise exception, check the exception contains the correct message
with pytest.raises(Exception, match="retries \(2\)"):
@retry_with_exponential_backoff(backoff_in_seconds=1, max_retries=2)
def greet(name: str):
if random() < 1.1:
raise OpenAIRateLimitError("Too many requests")
return f"Hello {name}"
greet("John")
# this should not raise exception and should print "Hello John"
@retry_with_exponential_backoff(backoff_in_seconds=1, max_retries=1)
def greet2(name: str):
return f"Hello {name}"
assert greet2("John") == "Hello John"
| [] |
2024-01-10 | michielswaanen/extendable-search | api~src~modalities~conversation~handler.py | import openai
import av
def preprocess(video_path):
input_container = av.open(video_path)
input_stream = input_container.streams.get(audio=0)[0]
output_container = av.open('code/uploads/live_stream.mp3', 'w')
output_stream = output_container.add_stream('mp3')
for frame in input_container.decode(input_stream):
frame.pts = None
for packet in output_stream.encode(frame):
output_container.mux(packet)
for packet in output_stream.encode(None):
output_container.mux(packet)
output_container.close()
def detect():
audio_file= open("code/uploads/live_stream.mp3", "rb")
print("Transcribing audio...")
transcript = openai.Audio.transcribe("whisper-1", audio_file)
print("Transcript: ", transcript)
def save():
pass
def handle_conversation(video_name):
video_path = 'code/uploads/{filename}'.format(filename=video_name)
preprocess(video_path)
return 'OK' | [] |
2024-01-10 | mfalcioni1/talki | script.py | import openai
import os
openai.api_key = os.getenv("OPENAI_API_KEY")
def generate_podcast_script(text, duration, api_key):
openai.api_key = api_key
system_role = "You are a helper bot that helps people write podcast scripts. You are given a paper and a duration. You must write a podcast script based on the paper for the given duration."
prompt = (f"Create a podcast script based on the following academic paper for a duration of {duration} minutes: "
f"{text} \n\n---\n\n"
"Use the following format for your podcast script:\n\n"
"Podcast Script:\n"
"Introduction:\n"
"{introduction}\n\n"
"Main Content:\n"
"{content}\n\n"
"Closing:\n"
"{closing}")
completion = openai.ChatCompletion.create(
model = 'gpt-3.5-turbo-16k',
messages = [
{'role': 'system', 'content': system_role},
{'role': 'user', 'content': prompt}
],
temperature = 0
)
return completion.choices[0].message.content
def load_text_from_file(file_path):
with open(file_path, 'r', encoding='utf-8') as f:
return f.read()
def save_text(text, save_path):
with open(save_path, 'w', encoding='utf-8') as f:
f.write(text)
print(f"Text saved to {save_path}")
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--paper', '-p', type=str, default="paper.txt")
parser.add_argument('--duration', '-d', type=int, default=10)
parser.add_argument('--save_path', '-s', type=str, default="script.txt")
args = parser.parse_args()
paper = load_text_from_file(args.paper)
script = generate_podcast_script(paper, args.duration, openai.api_key)
save_text(script, args.save_path)
if __name__ == "__main__":
main() | [
"You are a helper bot that helps people write podcast scripts. You are given a paper and a duration. You must write a podcast script based on the paper for the given duration.",
"Create a podcast script based on the following academic paper for a duration of PLACEHOLDER minutes: PLACEHOLDER \n\n---\n\nUse the following format for your podcast script:\n\nPodcast Script:\nIntroduction:\n{introduction}\n\nMain Content:\n{content}\n\nClosing:\n{closing}"
] |
2024-01-10 | 0xmayday/FreshGPT | freshgpt.py | from openai import OpenAI
import datetime
import os
client = OpenAI()
thread = client.beta.threads.create()
def get_assistants():
assistant_list = client.beta.assistants.list(
order="desc",
limit="20"
)
i = 1
while assistant_list.has_more:
print(i)
assistants = client.beta.assistants.list(
order="desc",
limit=str(20 * i)
)
assistant_list.append(assistants.data)
i += 1
return assistant_list
def get_file(file_id):
file = client.files.retrieve(file_id)
return file
def select_file_to_update(json_data):
# Function to convert epoch time to human-readable format
def epoch_to_human_readable(epoch_time):
return datetime.datetime.fromtimestamp(epoch_time).strftime('%Y-%m-%d %H:%M:%S')
# Display the assistant names and their files with indices
file_indices = {}
current_index = 1
for asst_id, asst_data in json_data.items():
print(f"Assistant: {asst_data['name']}")
for file_info in asst_data.get('files', []):
file_name = file_info['filename']
file_id = file_info['file_id']
last_update = epoch_to_human_readable(file_info['last_update'])
print(f" {current_index}: {file_name} (Last Update: {last_update})")
file_indices[current_index] = (asst_id, file_id, file_name)
current_index += 1
# Ask the user to select a file
try:
selected_index = int(input("Enter the number of the file you want to update: "))
if selected_index in file_indices:
return file_indices[selected_index]
else:
print("Invalid selection. Please try again.")
return None
except ValueError:
print("Invalid input. Please enter a number.")
return None
def handle_file(assistant_id, file_id, filename):
# Delete the old file
delete_file = client.files.delete(
file_id
)
# If the file was successfully deleted
if delete_file.deleted:
try:
# Upload the new file
file_content = open(filename, "rb")
file = client.files.create(
file=file_content,
purpose="assistants"
)
# Get list of current files so we can ensure we dont overwrite other files
current_assistant = client.beta.assistants.retrieve(assistant_id)
file_ids = [file.id]
# Get the list of current file ids so we dont clobber them
existing_file_ids = current_assistant.file_ids
for identifier in existing_file_ids:
if identifier == file_id:
continue
else:
file_ids.append(identifier)
# Associate the new file to the current assistant
updated_assistant = client.beta.assistants.update(
assistant_id,
file_ids=file_ids
)
return updated_assistant
except:
return None
def main():
# Define our main dict
assistant_to_file_mappings = {}
# Get our list of assistants
assistants = get_assistants()
# Iterate through them
for assistant in assistants:
assistant_object = {'name': assistant.name}
files = []
# For each file id, get its filename
for file_id in assistant.file_ids:
file_details = {}
file = get_file(file_id)
file_details['file_id'] = file.id
file_details['filename'] = file.filename
file_details['last_update'] = file.created_at
files.append(file_details)
# Add the files dict to our assistant_object dict
assistant_object['files'] = files
# Add this object to our main dict
assistant_to_file_mappings[assistant.id] = assistant_object
# Iterate through our mappings
while True:
selected_file = select_file_to_update(assistant_to_file_mappings) # Assuming json_data is defined
if selected_file:
assistant_id, file_id, filename = selected_file
confirm = input(f"Confirm file selection: {filename} (Y/n): ").lower() or 'y'
if confirm not in ['y', 'yes']:
continue
# Listing files in the current directory
print("Files in current directory:")
files_in_dir = [f for f in os.listdir('.') if os.path.isfile(f)]
for i, file in enumerate(files_in_dir):
print(f"{i + 1}: {file}")
# Selecting a file to update
try:
file_index = int(input("Select a file to update by entering its number: ")) - 1
if 0 <= file_index < len(files_in_dir):
# Call the function to update the file using OpenAI API
updated_assistant = handle_file(assistant_id, file_id, files_in_dir[file_index])
if updated_assistant:
current_files = updated_assistant.file_ids
assistant_object = {'name': updated_assistant.name}
files = []
for file_id in current_files:
file_details = {}
file = get_file(file_id)
file_details['file_id'] = file.id
file_details['filename'] = file.filename
file_details['last_update'] = file.created_at
files.append(file_details)
assistant_object['files'] = files
assistant_to_file_mappings[updated_assistant.id] = assistant_object
print(f"File updated successfully.")
else:
print('[-] File failed to update.')
else:
print("Invalid file selection.")
except ValueError:
print("Please enter a valid number.")
# Check if the user wants to update more files
update_more = input("Do you want to update more files? (Y/n): ").lower() or 'y'
if update_more not in ['y', 'yes']:
break
if __name__ == '__main__':
main()
| [] |
2024-01-10 | pouriamrt/Pet_eHospital_FullStackApp | app~main~routes.py | from app.main import bp
from flask import render_template, request, url_for, jsonify, session
from flask_login import login_required, current_user
from app.extensions import db
from app.models.ContactForm import ContactForm
from openai import OpenAI
import os
from app.models.ContactForm import ContactForm
from app.extensions import db
from app.models.paid_chats import PaidChats
client = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
departments = ["general", "dental", "orthopedic", "surgery", "ophthalmology"]
@bp.route('/')
@login_required
def index():
global messages
messages=[
{"role": "system", "content": "You are an intelligent assistant for a pet online hospital app that gives a short general and unprofessional solution that they can try."},
{"role": "system", "content": "You are an intelligent assistant for a pet online hospital app that recommends a suitable online hospital department webpage in the app to the customer and nothing more."},
{"role": "system", "content": "You are an intelligent assistant for a pet online hospital which its departments are only [general, dental, orthopedic, surgery, ophthalmology] ."}
]
return render_template('index.html', name=current_user.name)
@bp.route("/get", methods=["POST"])
@login_required
def chat():
msg = request.form["msg"]
reply, recommended_department = get_Chat_response(msg)
if recommended_department:
link_str = ' * Here is the link to the department: <a style="color:red;" href="' + url_for('AI_suggestion.get_suggestion_page', department=recommended_department, _external=True) + '">' + f'The {recommended_department} department</a>'
reply += link_str
messages.append({"role": "assistant", "content": link_str})
return reply
def get_Chat_response(text):
if text:
messages.append({"role": "user", "content": text})
completion = client.chat.completions.create(
model="gpt-3.5-turbo", messages=messages, temperature=0.5
)
reply = completion.choices[0].message.content
messages.extend([{"role": "assistant", "content": text}, {"role": "assistant", "content": reply}])
recommended_department = ""
for department in departments:
if department in reply.lower():
recommended_department = department
break
return reply, recommended_department
@bp.route('/about')
@login_required
def about():
return render_template('About.html')
@bp.route('/contact')
@login_required
def contact():
return render_template('Contact.html')
@bp.route('/submit_contact_request', methods=['POST'])
@login_required
def submit_contact_request():
name = request.form.get('name')
phone = request.form.get('phone')
email = request.form.get('email')
subject = request.form.get('subject')
message = request.form.get('message')
new_request = ContactForm(name=name, phone=phone, email=email, subject=subject, message=message)
db.session.add(new_request)
db.session.commit()
return jsonify({'success': True})
@bp.route('/my_chats')
@login_required
def my_chats():
paid_chats = PaidChats.query.filter_by(user=session['email']).order_by(PaidChats.timestamp).all()
return render_template('paid_chats.html', paid_chats=paid_chats)
| [
" * Here is the link to the department: <a style=\"color:red;\" href=\"' + url_for('AI_suggestion.get_suggestion_page', department=recommended_department, _external=True) + '\">' + f'The {recommended_department} department</a>",
"You are an intelligent assistant for a pet online hospital app that gives a short general and unprofessional solution that they can try.",
"You are an intelligent assistant for a pet online hospital app that recommends a suitable online hospital department webpage in the app to the customer and nothing more.",
"You are an intelligent assistant for a pet online hospital which its departments are only [general, dental, orthopedic, surgery, ophthalmology] ."
] |
2024-01-10 | ilyamk/PentestGPT | pentestgpt~utils~llm_api.py | import dataclasses
import re
import time
import os
from typing import Any, Dict, List, Tuple
from uuid import uuid1
from pentestgpt.config.chatgpt_config import ChatGPTConfig
import inspect
from tenacity import *
import loguru
import openai, tiktoken
logger = loguru.logger
logger.remove()
# logger.add(level="WARNING", sink="logs/chatgpt.log")
@dataclasses.dataclass
class Message:
ask_id: str = None
ask: dict = None
answer: dict = None
answer_id: str = None
request_start_timestamp: float = None
request_end_timestamp: float = None
time_escaped: float = None
@dataclasses.dataclass
class Conversation:
conversation_id: str = None
message_list: List[Message] = dataclasses.field(default_factory=list)
def __hash__(self):
return hash(self.conversation_id)
def __eq__(self, other):
if not isinstance(other, Conversation):
return False
return self.conversation_id == other.conversation_id
class LLMAPI:
def __init__(self, config: ChatGPTConfig):
self.name = "LLMAPI_base_class"
self.config = config
openai.api_key = config.openai_key
openai.proxy = config.proxies
openai.api_base = config.api_base
self.log_dir = config.log_dir
self.history_length = 5 # maintain 5 messages in the history. (5 chat memory)
self.conversation_dict: Dict[str, Conversation] = {}
logger.add(sink=os.path.join(self.log_dir, "chatgpt.log"), level="WARNING")
def _count_token(self, messages) -> int:
"""
Count the number of tokens in the messages
Parameters
----------
messages: a list of messages
Returns
-------
num_tokens: int
"""
# count the token. Use model gpt-3.5-turbo-0301, which is slightly different from gpt-4
# https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
model = "gpt-3.5-turbo-0301"
tokens_per_message = (
4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
)
tokens_per_name = -1 # if there's a name, the role is omitted
encoding = tiktoken.encoding_for_model(model)
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
def _token_compression(self, complete_messages) -> str:
"""
Compress the message if it is beyond the token limit.
For GPT-4, limit is 8k. Others are set to 16k.
Parameters
----------
complete_messages: dict
Returns
-------
compressed_message: str
"""
if self.config.model == "gpt-4":
token_limit = 8000
else:
token_limit = 14000 # leave some budget
if self._count_token(complete_messages) > token_limit:
# send a separate API request to compress the message
chat_message = [
{
"role": "system",
"content": "You are a helpful assistant.",
},
{
"role": "user",
"content": "Please reduce the word count of the given message to save tokens. Keep its original meaning so that it can be understood by a large language model.",
},
]
compressed_message = self._chat_completion(chat_message)
return compressed_message
# if not compressed, return the last message
raw_message = complete_messages[-1]["content"]
return raw_message
def _chat_completion_fallback(self) -> str:
"""
A fallback method for chat completion.
This method should be overwritten by the child class to use the custom API.
"""
return "fallback"
def _chat_completion(self, history: List, **kwargs) -> str:
"""
Send a chat completion request to the API
This method should be overwritten by the child class to use the custom API.
Given a history of messages, return the response from the API.
Parameters
----------
history: list
A list of messages
**kwargs: dict
Additional arguments to be passed to the API
Returns
-------
response: str
"""
model = "gpt-4"
temperature = 0.5
try:
response = openai.ChatCompletion.create(
model=model,
messages=history,
temperature=temperature,
)
except openai.error.APIConnectionError as e: # give one more try
logger.warning(
"API Connection Error. Waiting for {} seconds".format(
self.config.error_wait_time
)
)
logger.log("Connection Error: ", e)
time.sleep(self.config.error_wait_time)
response = openai.ChatCompletion.create(
model=model,
messages=history,
temperature=temperature,
)
except openai.error.RateLimitError as e: # give one more try
logger.warning(
"Rate limit reached. Waiting for {} seconds".format(
self.config.error_wait_time
)
)
logger.error("Rate Limit Error: ", e)
time.sleep(self.config.error_wait_time)
response = openai.ChatCompletion.create(
model=model,
messages=history,
temperature=temperature,
)
except openai.error.InvalidRequestError as e: # token limit reached
logger.warning("Token size limit reached. The recent message is compressed")
logger.error("Token size error; will retry with compressed message ", e)
# compress the message in two ways.
## 1. compress the last message
history[-1]["content"] = self.token_compression(history)
## 2. reduce the number of messages in the history. Minimum is 2
if self.history_length > 2:
self.history_length -= 1
## update the history
history = history[-self.history_length :]
response = openai.ChatCompletion.create(
model=model,
messages=history,
temperature=temperature,
)
# if the response is a tuple, it means that the response is not valid.
if isinstance(response, tuple):
logger.warning("Response is not valid. Waiting for 5 seconds")
try:
time.sleep(5)
response = openai.ChatCompletion.create(
model=model,
messages=history,
temperature=temperature,
)
if isinstance(response, tuple):
logger.error("Response is not valid. ")
raise Exception("Response is not valid. ")
except Exception as e:
logger.error("Response is not valid. ", e)
raise Exception(
"Response is not valid. The most likely reason is the connection to OpenAI is not stable. "
"Please doublecheck with `pentestgpt-connection`"
)
return response["choices"][0]["message"]["content"]
def send_new_message(self, message):
# create a message
start_time = time.time()
data = message
history = [{"role": "user", "content": data}]
message: Message = Message()
message.ask_id = str(uuid1())
message.ask = data
message.request_start_timestamp = start_time
response = self._chat_completion(history)
message.answer = response
message.request_end_timestamp = time.time()
message.time_escaped = (
message.request_end_timestamp - message.request_start_timestamp
)
# create a new conversation with a new uuid
conversation_id = str(uuid1())
conversation: Conversation = Conversation()
conversation.conversation_id = conversation_id
conversation.message_list.append(message)
self.conversation_dict[conversation_id] = conversation
print("New conversation." + conversation_id + " is created." + "\n")
return response, conversation_id
# add retry handler to retry 1 more time if the API connection fails
@retry(stop=stop_after_attempt(2))
def send_message(self, message, conversation_id, debug_mode=False):
# create message history based on the conversation id
chat_message = [
{
"role": "system",
"content": "You are a helpful assistant",
},
]
data = message
conversation = self.conversation_dict[conversation_id]
for message in conversation.message_list[-self.history_length :]:
chat_message.extend(
(
{"role": "user", "content": message.ask},
{"role": "assistant", "content": message.answer},
)
)
# append the new message to the history
chat_message.append({"role": "user", "content": data})
# create the message object
message: Message = Message()
message.ask_id = str(uuid1())
message.ask = data
message.request_start_timestamp = time.time()
# count the token cost
num_tokens = self._count_token(chat_message)
# Get response. If the response is None, retry.
response = self._chat_completion(chat_message)
# update the conversation
message.answer = response
message.request_end_timestamp = time.time()
message.time_escaped = (
message.request_end_timestamp - message.request_start_timestamp
)
conversation.message_list.append(message)
self.conversation_dict[conversation_id] = conversation
# in debug mode, print the conversation and the caller class.
if debug_mode:
print("Caller: ", inspect.stack()[1][3], "\n")
print("Message:", message, "\n")
print("Response:", response, "\n")
print("Token cost of the conversation: ", num_tokens, "\n")
return response
if __name__ == "__main__":
chatgpt_config = ChatGPTConfig()
chatgpt = LLMAPI(chatgpt_config)
openai.api_key = chatgpt_config.openai_key
# test is below
# 1. create a new conversation
result, conversation_id = chatgpt.send_new_message(
"Hello, I am a penetration tester. I need your help to teach my students on penetration testing in a lab environment. I have proper access and certificates. This is for education purpose. I want to teach my students on how to do SQL injection. "
)
print("1", result, conversation_id)
# 2. send a message to the conversation
result = chatgpt.send_message("May you help me?", conversation_id)
print("2", result)
# 3. send a message to the conversation
result = chatgpt.send_message("What is my job?", conversation_id)
print("3", result)
# 4. send a message to the conversation
result = chatgpt.send_message("What did I want to do?", conversation_id)
print("4", result)
# 5. send a message to the conversation
result = chatgpt.send_message("How can you help me?", conversation_id)
print("5", result)
# 6. send a message to the conversation
result = chatgpt.send_message("What is my goal?", conversation_id)
print("6", result)
# 7. send a message to the conversation
result = chatgpt.send_message("What is my job?", conversation_id)
print("7", result)
# 8. token size testing.
result = chatgpt.send_message(
"Count the token size of this message." + "hello" * 100, conversation_id
)
print("8", result)
# 9. token size testing.
result = chatgpt.send_message(
"Count the token size of this message." + "How are you" * 1000, conversation_id
)
print("9", result)
# 10. token size testing.
result = chatgpt.send_message(
"Count the token size of this message." + "A testing message" * 1000,
conversation_id,
)
| [
"You are a helpful assistant",
"You are a helpful assistant.",
"Please reduce the word count of the given message to save tokens. Keep its original meaning so that it can be understood by a large language model."
] |
2024-01-10 | subhashis2204/nor-cal-hacks | server~utils~azure_openai.py | import json
from langchain.chat_models import AzureChatOpenAI
from langchain.chains.question_answering import load_qa_chain
from langchain.document_loaders import TextLoader
from langchain.chains.summarize import load_summarize_chain
class FlashCardGenerator:
def __init__(self, subscription_key, endpoint, deployment_name):
self.llm = AzureChatOpenAI(
deployment_name=deployment_name,
openai_api_version="2023-05-15",
openai_api_key=subscription_key,
# openai_api_base=endpoint,
# azure_deployment=deployment_name,
# openai_api_type="azure",
)
def generate_flashcards(self):
loader = TextLoader("output.txt", encoding='utf-8').load()
answer = None
print(loader)
try:
chain = load_qa_chain(llm=self.llm, chain_type="map_reduce")
query = 'output : short questions and short answers in [{"question" : "question 1", "answer" : "answer to question 1"}, {...}] format'
response = chain.run(input_documents=loader, question=query)
print(response)
answer = json.loads(response)
except Exception as e:
print(e)
answer = []
return answer
def generate_summary(self):
loader = TextLoader("output.txt", encoding='utf-8').load()
try:
chain = load_summarize_chain(llm=self.llm, chain_type="map_reduce")
response = chain.run(input_documents=loader)
answer = response
except Exception as e:
print(e)
answer = ""
return answer | [] |
2024-01-10 | adamc-forum/DDQ-Prod | backend~functions.py | from azure.identity import DefaultAzureCredential
from azure.mgmt.cognitiveservices import CognitiveServicesManagementClient
from openai import AzureOpenAI
from constants import (
SUBSCRIPTION_ID,
OPENAI_API_VERSION,
OPENAI_API_KEY,
OPENAI_API_ENDPOINT,
RG_NAME,
ACCOUNT_NAME,
CONNECTION_STRING,
DATABASE_NAME,
COLLECTION_NAME
)
from database import (
DatabaseClient
)
def get_service_management_client():
return CognitiveServicesManagementClient (
credential=DefaultAzureCredential(),
subscription_id=SUBSCRIPTION_ID
)
def get_openai_client() -> AzureOpenAI:
return AzureOpenAI(
api_version=OPENAI_API_VERSION,
api_key=OPENAI_API_KEY,
azure_endpoint=OPENAI_API_ENDPOINT
)
def get_db_client() -> DatabaseClient:
return DatabaseClient(
connection_string=CONNECTION_STRING,
database_name=DATABASE_NAME,
collection_name=COLLECTION_NAME
)
def get_models() -> tuple[str, str]:
service_management_client = get_service_management_client()
deployments = service_management_client.deployments.list(RG_NAME, ACCOUNT_NAME)
deployment_models = [deployment.name for deployment in deployments]
embedding_model = "text-embedding-ada-002"
completion_model = "gpt-35-turbo-16k"
for deployment_model in deployment_models:
embedding_model = deployment_model if "embedding" in deployment_model.lower() else embedding_model
completion_model = deployment_model if "completion" in deployment_model.lower() else completion_model
return (embedding_model, completion_model) | [] |
2024-01-10 | pq-yang/PGDiff | guided_diffusion~logger.py | """
Logger copied from OpenAI baselines to avoid extra RL-based dependencies:
https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/logger.py
"""
import os
import sys
import shutil
import os.path as osp
import json
import time
import datetime
import tempfile
import warnings
from collections import defaultdict
from contextlib import contextmanager
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, "wt")
self.own_file = True
else:
assert hasattr(filename_or_file, "read"), (
"expected file or str, got %s" % filename_or_file
)
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if hasattr(val, "__float__"):
valstr = "%-8.3g" % val
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print("WARNING: tried to write empty key-value dict")
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = "-" * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items(), key=lambda kv: kv[0].lower()):
lines.append(
"| %s%s | %s%s |"
% (key, " " * (keywidth - len(key)), val, " " * (valwidth - len(val)))
)
lines.append(dashes)
self.file.write("\n".join(lines) + "\n")
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
maxlen = 30
return s[: maxlen - 3] + "..." if len(s) > maxlen else s
def writeseq(self, seq):
seq = list(seq)
for (i, elem) in enumerate(seq):
self.file.write(elem)
if i < len(seq) - 1: # add space unless this is the last one
self.file.write(" ")
self.file.write("\n")
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "wt")
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, "dtype"):
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + "\n")
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, "w+t")
self.keys = []
self.sep = ","
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = list(kvs.keys() - self.keys)
extra_keys.sort()
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
self.file.write(k)
self.file.write("\n")
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write("\n")
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(",")
v = kvs.get(k)
if v is not None:
self.file.write(str(v))
self.file.write("\n")
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = "events"
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {"tag": k, "simple_value": float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = (
self.step
) # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir, log_suffix=""):
os.makedirs(ev_dir, exist_ok=True)
if format == "stdout":
return HumanOutputFormat(sys.stdout)
elif format == "log":
return HumanOutputFormat(osp.join(ev_dir, "log%s.txt" % log_suffix))
elif format == "json":
return JSONOutputFormat(osp.join(ev_dir, "progress%s.json" % log_suffix))
elif format == "csv":
return CSVOutputFormat(osp.join(ev_dir, "progress%s.csv" % log_suffix))
elif format == "tensorboard":
return TensorBoardOutputFormat(osp.join(ev_dir, "tb%s" % log_suffix))
else:
raise ValueError("Unknown format specified: %s" % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
If called many times, last value will be used.
"""
get_current().logkv(key, val)
def logkv_mean(key, val):
"""
The same as logkv(), but if called many times, values averaged.
"""
get_current().logkv_mean(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
"""
return get_current().dumpkvs()
def getkvs():
return get_current().name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
get_current().log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
get_current().set_level(level)
def set_comm(comm):
get_current().set_comm(comm)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return get_current().get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
@contextmanager
def profile_kv(scopename):
logkey = "wait_" + scopename
tstart = time.time()
try:
yield
finally:
get_current().name2val[logkey] += time.time() - tstart
def profile(n):
"""
Usage:
@profile("my_func")
def my_func(): code
"""
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with profile_kv(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
# ================================================================
# Backend
# ================================================================
def get_current():
if Logger.CURRENT is None:
_configure_default_logger()
return Logger.CURRENT
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats, comm=None):
self.name2val = defaultdict(float) # values this iteration
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
self.comm = comm
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
oldval, cnt = self.name2val[key], self.name2cnt[key]
self.name2val[key] = oldval * cnt / (cnt + 1) + val / (cnt + 1)
self.name2cnt[key] = cnt + 1
def dumpkvs(self):
if self.comm is None:
d = self.name2val
else:
d = mpi_weighted_mean(
self.comm,
{
name: (val, self.name2cnt.get(name, 1))
for (name, val) in self.name2val.items()
},
)
if self.comm.rank != 0:
d["dummy"] = 1 # so we don't get a warning about empty dict
out = d.copy() # Return the dict for unit testing purposes
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(d)
self.name2val.clear()
self.name2cnt.clear()
return out
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def set_comm(self, comm):
self.comm = comm
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
def get_rank_without_mpi_import():
# check environment variables here instead of importing mpi4py
# to avoid calling MPI_Init() when this module is imported
for varname in ["PMI_RANK", "OMPI_COMM_WORLD_RANK"]:
if varname in os.environ:
return int(os.environ[varname])
return 0
def mpi_weighted_mean(comm, local_name2valcount):
"""
Copied from: https://github.com/openai/baselines/blob/ea25b9e8b234e6ee1bca43083f8f3cf974143998/baselines/common/mpi_util.py#L110
Perform a weighted average over dicts that are each on a different node
Input: local_name2valcount: dict mapping key -> (value, count)
Returns: key -> mean
"""
all_name2valcount = comm.gather(local_name2valcount)
if comm.rank == 0:
name2sum = defaultdict(float)
name2count = defaultdict(float)
for n2vc in all_name2valcount:
for (name, (val, count)) in n2vc.items():
try:
val = float(val)
except ValueError:
if comm.rank == 0:
warnings.warn(
"WARNING: tried to compute mean on non-float {}={}".format(
name, val
)
)
else:
name2sum[name] += val * count
name2count[name] += count
return {name: name2sum[name] / name2count[name] for name in name2sum}
else:
return {}
def configure(dir=None, format_strs=None, comm=None, log_suffix=""):
"""
If comm is provided, average all numerical stats across that comm
"""
if dir is None:
dir = os.getenv("OPENAI_LOGDIR")
if dir is None:
dir = osp.join(
tempfile.gettempdir(),
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"),
)
assert isinstance(dir, str)
dir = os.path.expanduser(dir)
os.makedirs(os.path.expanduser(dir), exist_ok=True)
rank = get_rank_without_mpi_import()
if rank > 0:
log_suffix = log_suffix + "-rank%03i" % rank
if format_strs is None:
if rank == 0:
format_strs = os.getenv("OPENAI_LOG_FORMAT", "stdout").split(",")
else:
format_strs = os.getenv("OPENAI_LOG_FORMAT_MPI", "log").split(",")
format_strs = filter(None, format_strs)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats, comm=comm)
if output_formats:
log("Logging to %s" % dir)
def _configure_default_logger():
configure()
Logger.DEFAULT = Logger.CURRENT
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log("Reset logger")
@contextmanager
def scoped_configure(dir=None, format_strs=None, comm=None):
prevlogger = Logger.CURRENT
configure(dir=dir, format_strs=format_strs, comm=comm)
try:
yield
finally:
Logger.CURRENT.close()
Logger.CURRENT = prevlogger
| [] |
2024-01-10 | Noxcode99/Chatbot-gpt4 | BOT_API.py | from flask import Flask, request
import openai
from twilio.twiml.messaging_response import MessagingResponse
import os
# Init the Flask App
app = Flask(__name__)
# Initialize the OpenAI API key
# export OPENAI_API_KEY=YOUR API KEY
openai.api_key = os.environ.get("OPENAI_API_KEY")
#openai.api_key = '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'
# Define a function to generate answers using GPT-3
def generate_answer(question):
model_engine = "text-davinci-002"
prompt = (f"Q: {question}\n"
"A:")
response = openai.Completion.create(
engine=model_engine,
prompt=prompt,
max_tokens=1024,
n=1,
stop=None,
temperature=0.7,
)
answer = response.choices[0].text.strip()
return answer
# Define a route to handle incoming requests
@app.route('/chatgpt', methods=['POST'])
def chatgpt():
incoming_que = request.values.get('Body', '').lower()
print("Question: ", incoming_que)
# Generate the answer using GPT-3
answer = generate_answer(incoming_que)
print("BOT Answer: ", answer)
bot_resp = MessagingResponse()
msg = bot_resp.message()
msg.body(answer)
return str(bot_resp)
# Run the Flask app
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=False, port=5000)
| [
"Q: PLACEHOLDER\nA:"
] |
2024-01-10 | anoopkarnik/chatgpt-api-service | test~testing.py | '''
functions for testing all services and apis
'''
import unittest
import json
import os
import logging
import requests
import time
import json
import pandas as pd
from flask_sqlalchemy import SQLAlchemy
from main.services.chat import chat_with_gpt
from main.services.assistants import get_reply_by_assistant
from main.services.audio import create_text_to_speech,create_speech_to_text,translate_speech_to_text
from main.controllers.Controller import payload_controller
from openai import OpenAI
from app import create_app
logger = logging.getLogger(__name__)
# class TestChat(unittest.TestCase):
# def __init__(self,*args,**kwargs):
# self.client = OpenAI(api_key=os.environ.get('OPENAI_API_KEY'))
# def test_chat_with_gpt(self):
# client = self.client
# message_body = {
# "message":"Hello, how are you?",
# "system_instructions":"You are a helpful assistant"
# }
# result = chat_with_gpt(message_body)
# self.assertTrue(result['role'] == 'system')
# self.assertTrue(result['text'] != '')
# self.assertTrue(result['text'] != message_body['message'])
# class TestAssistants(unittest.TestCase):
# def __init__(self,*args,**kwargs):
# super(TestAssistants,self).__init__(*args,**kwargs)
# self.client = OpenAI(api_key=os.environ.get('OPENAI_API_KEY'))
# self.assitant_name='SOFTWARE_ENGINEER_MENTOR'
# def test_get_reply_by_assistant(self):
# message_body = {
# "message":"Hello, how are you?",
# "thread":"new",
# "assistant_name":self.assitant_name
# }
# client = self.client
# result = get_reply_by_assistant(message_body)
# self.assertTrue(result['role'] == 'system')
# self.assertTrue(result['text'] != '')
# self.assertTrue(result['text'] != message_body['message'])
# class TestAudio(unittest.TestCase):
# def test_create_text_to_speech(self):
# message_body = {
# "message":"Hello, how are you?",
# "output_path":"data",
# "file_name":"test.mp3"
# }
# create_text_to_speech(message_body)
# self.assertTrue(os.path.exists(os.path.join(message_body['output_path'],message_body['file_name'])))
# def test_create_speech_to_text(self):
# message_body = {
# "path":"data/test.mp3"
# }
# transcript = create_speech_to_text(message_body)
# self.assertTrue(transcript['data']['text'] != '')
# def test_translate_speech_to_text(self):
# message_body = {
# "path":"data/test.mp3"
# }
# transcript = translate_speech_to_text(message_body)
# self.assertTrue(transcript['data']['text'] != '')
# class TestController(unittest.TestCase):
# def test_health_check(self):
# response = requests.get('http://0.0.0.0:8111/')
# self.assertTrue(response.status_code == 200)
# self.assertTrue(response.json()['status'] == 'success')
# def test_chat_with_assistant(self):
# message_body = {
# "message":"Hello, how are you?",
# "system_instructions":"You are a helpful assistant"
# }
# response = requests.post('http://0.0.0.0:8111/',json=message_body)
# self.assertTrue(response.status_code == 200)
# self.assertTrue(response.json()['role'] == 'system')
# self.assertTrue(response.json()['text'] != '')
# self.assertTrue(response.json()['text'] != message_body['message'])
# def test_text_to_speech(self):
# message_body = {
# "message":"Hello, how are you?",
# "output_path":"data",
# "file_name":"test.mp3"
# }
# response = requests.post('http://0.0.0.0:8111/',json=message_body)
# self.assertTrue(response.status_code == 200)
# self.assertTrue(os.path.exists(os.path.join(message_body['output_path'],message_body['file_name'])))
# os.remove(os.path.join(message_body['output_path'],message_body['file_name']))
# def test_speech_to_text(self):
# message_body = {
# "path":"data/test.mp3"
# }
# response = requests.post('http://0.0.0.0:8111/',json=message_body)
# self.assertTrue(response.status_code == 200)
# self.assertTrue(response.json()['data']['text'] != '')
# def test_translate(self):
# message_body = {
# "path":"data/test.mp3"
# }
# response = requests.post('http://0.0.0.0:8111/',json=message_body)
# self.assertTrue(response.status_code == 200)
# self.assertTrue(response.json()['data']['text'] != '')
if __name__ == '__main__':
unittest.main()
| [] |
2024-01-10 | williammunnich/AI-Dream-Story | ai_text_create.py | # Importing necessary libraries
import os
import openai
import random
from dotenv import load_dotenv
# Defining function to return text and choice
def return_text_and_choice():
# Loading environment variables from .env file
load_dotenv()
# Setting API key for OpenAI
openai.api_key = os.getenv("OPENAI_API_KEY")
# Defining upper and lower limits for random year
upper_limit_rand = 3000
lower_limit_rand = -10000
year = None
# Generating a random year between lower and upper limits
rand_number = random.randint(lower_limit_rand, upper_limit_rand)
if rand_number <= -1:
rand_number = rand_number * -1
rand_number = str(rand_number)
year = rand_number + " BC"
else:
rand_number = str(rand_number)
year = rand_number + " AD"
# Defining helper function to read a file
def read_file(filename):
with open(filename, 'r', encoding='UTF8') as file:
data = file.read().replace('\n', '')
return data
# Formulating a request to OpenAI API
formulated_request = "You wake up in your bed on a normal day in the year " + year + \
".\nWrite a few lines about what you do for the day then provide two choices for how to continue the day \
in the form of 'Choice 1:' and 'Choice 2:'"
# Sending request to OpenAI API and storing the response
response = openai.Completion.create(
model="text-davinci-002",
prompt = formulated_request,
temperature=0.39,
max_tokens=500,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# Storing the output and returning
output = "The year is ", year, " .", response['choices'][0]['text']
return str(output)
# Defining function to return text and choice given previous text and option
def given_input_return_text_and_choice(previous_text, option):
# Loading environment variables from .env file
load_dotenv()
# Setting API key for OpenAI
openai.api_key = os.getenv("OPENAI_API_KEY")
# Defining helper function to read a file
def read_file(filename):
with open(filename, 'r', encoding='UTF8') as file:
data = file.read().replace('\n', '')
return data
# Formulating a request to OpenAI API
formulated_request = "Given that this is what happened previously: " + previous_text + " and the option that was\
chosen was " + option + " continue the story onwards and provide exactly two options on what to do next."
# Call OpenAI API to get the response
response = openai.Completion.create(
model="text-davinci-002",
prompt = formulated_request,
temperature=0.39,
max_tokens=500,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
# Format the output
output = response['choices'][0]['text']
#return output
return str(output)
print(return_text_and_choice()) | [] |
2024-01-10 | williammunnich/AI-Dream-Story | ai_image_create.py | # Importing the openai library and the os module
import openai
import os
# Loading environment variables from a .env file
from dotenv import load_dotenv
load_dotenv()
# Setting the OpenAI API key to the value stored in the environment variable "OPENAI_API_KEY"
openai.api_key = os.getenv("OPENAI_API_KEY")
# Importing the requests library
import requests
# Defining a test URL to fetch an image
url_test = "https://oaidalleapiprodscus.blob.core.windows.net/private/org-2dA58464KfGXsNQqhZrf0VqG/user-USitdQFQhBbmQsgHPhbzMEA1/img-blnmD1xZiFnbLFS36Dh4kg6s.png?st=2023-02-04T17%3A52%3A18Z&se=2023-02-04T19%3A52%3A18Z&sp=r&sv=2021-08-06&sr=b&rscd=inline&rsct=image/png&skoid=6aaadede-4fb3-4698-a8f6-684d7786b067&sktid=a48cca56-e6da-484e-a814-9c849652bcb3&skt=2023-02-03T23%3A27%3A51Z&ske=2023-02-04T23%3A27%3A51Z&sks=b&skv=2021-08-06&sig=LhuKHpnknvEscnc%2BNUUh7XX2TBcPEo1nNN/AGWm/2zQ%3D"
# Defining a function that takes in an input text and returns the URL of the generated image
def return_url(input_text):
# Making a request to the OpenAI API to generate an image based on the input text
response = openai.Image.create(
prompt = input_text,
size = "512x512"
)
# Returning the URL of the generated image
return str(response['data'][0]['url'])
# Defining a function that takes in a URL and a picture name and saves the image from the URL to the file system
def save_image(url, picture_name):
# Fetching the image data from the URL
img_data = requests.get(url).content
# Saving the image data to a file with the specified picture name
with open('graphics/' + str(picture_name) + '.jpg', 'wb') as handler:
handler.write(img_data)
# Returning a confirmation message that the image was saved
return "image " + picture_name + " saved"
# Defining a function that takes in an input text and a picture name and generates an image based on the input text, then saves the image to the file system
def make_image_and_save(input_text, picture_name):
# Getting the URL of the generated image based on the input text
url = return_url(input_text)
# Save the image from the URL
save_image(url, picture_name)
#make_image_and_save("rustic computer", "rustyboi")
| [] |
2024-01-10 | Dombom123/x-reacts | tests~test_svd.py | import streamlit as st
import replicate as rep
from openai import OpenAI
def get_assistant_response(prompt):
api_key = st.secrets["openai"]["OPENAI_API_KEY"]
client = OpenAI(api_key=api_key)
assistant = client.beta.assistants.create(
name="Math Tutor",
instructions="You are a personal math tutor. Write and run code to answer math questions.",
tools=[{"type": "code_interpreter"}],
model="gpt-4-1106-preview"
)
thread = client.beta.threads.create()
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content="I need to solve the equation `3x + 11 = 14`. Can you help me?"
)
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant.id
)
thread_messages = client.beta.threads.messages.list(thread.id)
print(thread_messages.data)
return thread_messages.data
def generate_video(prompt):
get_assistant_response(prompt)
def main():
st.set_page_config(page_title="X-Reacts", page_icon="🎥")
st.title("X-Reacts Video Generation 🎥")
st.image("data/header.png")
prompt = st.text_area("Prompt", "This is a test prompt.")
if st.button("Generate"):
result = get_assistant_response(prompt)
st.write(result)
st.success("Video generated!")
if __name__ == "__main__":
main() | [
"This is a test prompt."
] |
2024-01-10 | appassionate/rubatochat | test~chat_test.py | from rubatochat.core.chat import ChatOpenAIChat
from langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler
_OPENAI_API_KEY = "SECRET"
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
from langchain.chat_models import ChatOpenAI
def test_base_chat():
chat = ChatOpenAI(model_name="gpt-3.5-turbo",temperature=0.3
,openai_api_key=_OPENAI_API_KEY)
messages = [
SystemMessage(content="You are an expert data scientist"),
HumanMessage(content="Write a Python script that trains a neural network on simulated data ")
]
response=chat(messages)
print(response.content,end='\n')
return None
def testChatOpenAIChat():
callback = AsyncIteratorCallbackHandler()
_chat = ChatOpenAIChat(
streaming=True,
openai_api_key=_OPENAI_API_KEY,
verbose=True,
callbacks=[callback]
)
print(_chat.question("hello, who are you?"))
return _chat
#test_base_chat()
_chat = testChatOpenAIChat()
_response = _chat.question("who is beethoven, introduce him in 300 words")
print(_response)
import pickle
_serialized=pickle.dumps(_chat)
pickle.loads(_serialized)
#print(_serialized)
| [
"You are an expert data scientist",
"Write a Python script that trains a neural network on simulated data "
] |
2024-01-10 | appassionate/rubatochat | rubatochat~api~v1~apikeys.py | from datetime import datetime
from typing import AsyncIterable, Awaitable, Callable, List, Optional, Union
import openai
from pydantic import BaseModel
from fastapi import Depends, FastAPI, HTTPException, status, APIRouter
from rubatochat.core.database.models import User, OpenAIKEYS, Session, add_item, delete_item_by_id, select
from rubatochat.api import db_engine
from .auth import get_current_active_user
app = APIRouter()
#根据auth_token获取username, 以后清除函数中的username参数
#TODO: 中间一些函数应该是MODEL的部分,之后移到core中
@app.post("/openaikeys/add", tags=["apikeys"])
async def add_openapikeys(openapikeys:List[str], user: User = Depends(get_current_active_user)):
"""
user to add their apikeys to database
"""
username = user.username
with Session(db_engine) as session:
stmt = select(User).where(User.username == username)
user = session.exec(stmt)
user = user.first()
if not user:
raise HTTPException(status_code=404, detail="username not found")
_user_id = user.id
for apikey in openapikeys:
_content = apikey
_isvalid = True
_create_at = datetime.utcnow()
add_item(db_engine, "openaikeys", **{
"content":_content,
"user_id":_user_id,
"create_at":_create_at,
"is_valid":_isvalid,
})
return {"status": "success"}
#copied from ...
#not a api currently
def is_open_ai_key_valid(openai_api_key) -> bool:
if not openai_api_key:
return False
try:
openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "test"}],
api_key=openai_api_key,
)
except Exception as e:
return False
return True
# def get_userid(user: User = Depends(get_current_active_user)):
# username = user.username
# with Session(db_engine) as session:
# stmt = select(User).where(User.username == username)
# user = session.exec(stmt)
# user = user.first()
# if not user:
# raise HTTPException(status_code=404, detail="username not found")
# return user
def get_key_by_id(key_id:int, user:User=Depends(get_current_active_user)):
_user_id = user.id
with Session(db_engine) as session:
# 构建查询语句
stmt = select(OpenAIKEYS).where(OpenAIKEYS.user_id == _user_id).where(OpenAIKEYS.id == key_id)
# 执行查询
_key = session.exec(stmt)
_key = _key.first() #解开迭代器?
# 返回查询结果列表
if _key:
return _key
else:
raise HTTPException(status_code=404, detail="select key's rank out of range.")
def get_key_by_rank(rank:int, user:User=Depends(get_current_active_user)):
_user_id = user.id
with Session(db_engine) as session:
# 构建查询语句
stmt = select(OpenAIKEYS).where(OpenAIKEYS.user_id == _user_id).order_by(OpenAIKEYS.id).offset(rank).limit(1)
# 执行查询
_key = session.exec(stmt)
_key = _key.first() #解开迭代器?
# 返回查询结果列表
if _key:
return _key
else:
raise HTTPException(status_code=404, detail="select key's rank out of range.")
def update_key_by_id(key_id:int, new_content:str, user:User=Depends(get_current_active_user)):
_user_id = user.id
with Session(db_engine) as session:
# 构建查询语句
stmt = select(OpenAIKEYS).where(OpenAIKEYS.user_id == _user_id).where(OpenAIKEYS.id == key_id)
# 执行查询
_key = session.exec(stmt)
_key = _key.first() #解开迭代器?
# 返回查询结果列表
if _key:
_key.content = new_content
session.commit()
session.refresh(_key)
return _key
else:
raise HTTPException(status_code=404, detail="select key's rank out of range.")
def get_key_count(user:User=Depends(get_current_active_user)):
_user_id = user.id
with Session(db_engine) as session:
# 构建查询语句
stmt = select(OpenAIKEYS).where(OpenAIKEYS.user_id == _user_id)
# 执行查询
_key = session.exec(stmt)
_key = _key.all() #解开迭代器?
# 返回查询结果列表
return len(_key)
@app.get("/openaikeys/count", tags=["apikeys"])
async def get_openapikeys_count(user: User=Depends(get_current_active_user)):
"""
user to get their apikeys in database
"""
_count = get_key_count(user)
return {"status": "success",
"message":"get key count success",
"key_type": "openai",
"count": _count,
}
@app.get("/openaikeys/check", tags=["apikeys"])
async def check_openapikey(rank:int, user:User=Depends(get_current_active_user)):
"""
user to get their apikeys in database
"""
_key = get_key_by_rank(rank, user)
return {"status": "success",
"key_id": _key.id,
"key": _key.content,
"key_obj":_key}
@app.get("/openaikeys/isvalid", tags=["apikeys"])
async def check_openapikey_valid(key_id:int, user: User=Depends(get_current_active_user)):
"""
user to get their apikeys in database
"""
raise HTTPException(status_code=403, detail="not implemented")
#still buggy
_key = get_key_by_id(key_id, user=user)
return {"status": "success",
"key_id": _key.id,
"is_valid": is_open_ai_key_valid(_key.content),
}
@app.post("/openaikeys/update", tags=["apikeys"])
async def update_openapikey(key_id:int, new_key:str, user: User=Depends(get_current_active_user)):
"""
user to update their apikeys in database
"""
_key = update_key_by_id(key_id,new_content=new_key, user=user)
return {"status": "success",
"key_id": _key.id,
"key": _key.content,
"key_obj":_key}
@app.delete("/openaikeys/delete", tags=["apikeys"])
async def delete_openapikey(key_id:int, user: User=Depends(get_current_active_user)):
"""
user to delete their apikeys in database
"""
#BUG: 好像会删掉库里的其他id,必须要有正确的用户筛选
try:
delete_item_by_id(db_engine, "openaikeys", key_id)
except:
raise HTTPException(status_code=403, detail=f"failed to delete key, key id: {key_id}")
#如何pop删掉的key?
return {"status": "success",
"key_id": key_id,
"message":"halo"
}
| [
"test"
] |
2024-01-10 | mickpah/sentry | tests~sentry~api~endpoints~test_event_ai_suggested_fix.py | import pytest
from django.test.utils import override_settings
from django.urls import reverse
from sentry.testutils.helpers import Feature
from sentry.testutils.pytest.fixtures import django_db_all
from sentry.testutils.skips import requires_snuba
pytestmark = [requires_snuba]
@pytest.fixture(autouse=True)
def openai_features():
with Feature({"organizations:open-ai-suggestion": True}):
with override_settings(OPENAI_API_KEY="X"):
yield
@pytest.fixture(autouse=True)
def auto_login(client, default_user):
assert client.login(username=default_user.username, password="admin")
@pytest.fixture(autouse=True)
def openai_mock(monkeypatch):
def dummy_response(*a, **kw):
return {"choices": [{"message": {"content": "AI generated response"}}]}
monkeypatch.setattr("openai.ChatCompletion.create", dummy_response)
@pytest.fixture
def test_event(default_project, factories):
event_data = {
"exception": {
"values": [
{
"type": "ZeroDivisionError",
"stacktrace": {"frames": [{"function": f} for f in ["a", "b"]]},
}
]
}
}
return factories.store_event(data=event_data, project_id=default_project.id)
@pytest.fixture
def openai_policy():
from sentry.api.endpoints.event_ai_suggested_fix import openai_policy_check
data = {"result": "allowed"}
def policy(sender, **kwargs):
return data["result"]
try:
openai_policy_check.connect(policy)
yield data
finally:
openai_policy_check.disconnect(policy)
@django_db_all
def test_consent(client, default_project, test_event, openai_policy):
path = reverse(
"sentry-api-0-event-ai-fix-suggest",
kwargs={
"organization_slug": default_project.organization.slug,
"project_slug": default_project.slug,
"event_id": test_event.event_id,
},
)
openai_policy["result"] = "individual_consent"
response = client.get(path)
assert response.status_code == 403
assert response.json() == {"restriction": "individual_consent"}
response = client.get(path + "?consent=yes")
assert response.status_code == 200
assert response.json() == {"suggestion": "AI generated response"}
openai_policy["result"] = "subprocessor"
response = client.get(path)
assert response.status_code == 403
assert response.json() == {"restriction": "subprocessor"}
openai_policy["result"] = "allowed"
response = client.get(path)
assert response.status_code == 200
assert response.json() == {"suggestion": "AI generated response"}
| [
"AI generated response"
] |
2024-01-10 | kesamet/retrieval-augmented-generation | src~reranker~tart~tart.py | import os
from typing import List
import torch
import torch.nn.functional as F
from langchain.schema import Document
from src import CFG
from src.reranker.base import BaseReranker
from .modeling_enc_t5 import EncT5ForSequenceClassification
from .tokenization_enc_t5 import EncT5Tokenizer
class TARTReranker(BaseReranker):
"""Reranker based on TART (https://github.com/facebookresearch/tart)."""
def __init__(self, instruction: str):
model_path = os.path.join(CFG.MODELS_DIR, "models/tart-full-flan-t5-xl")
self.tokenizer = EncT5Tokenizer.from_pretrained(model_path)
self.model = EncT5ForSequenceClassification.from_pretrained(model_path).to(
CFG.DEVICE
)
self.model.eval()
self.instruct_template = instruction + " [SEP] {query}"
def rerank(self, query: str, passages: List[Document]) -> List[Document]:
contents: List[str] = [passage.page_content for passage in passages]
instruction_queries: List[str] = [
self.instruct_template.format(query=query) for _ in range(len(contents))
]
features = self.tokenizer(
instruction_queries,
contents,
padding=True,
truncation=True,
return_tensors="pt",
)
with torch.no_grad():
scores = self.model(**features).logits
normalized_scores = [float(score[1]) for score in F.softmax(scores, dim=1)]
sorted_pairs = sorted(
zip(passages, normalized_scores), key=lambda x: x[1], reverse=True
)
sorted_passages = [passage for passage, _ in sorted_pairs]
return sorted_passages
| [] |
2024-01-10 | kesamet/retrieval-augmented-generation | src~llms.py | """
LLM
"""
import os
from langchain.llms.ctransformers import CTransformers
from src import CFG
def build_llm():
"""Builds language model defined in config."""
llm = CTransformers(
model=os.path.join(CFG.MODELS_DIR, CFG.LLM_MODEL_PATH),
model_type=CFG.LLM_MODEL_TYPE,
config={
"max_new_tokens": CFG.MAX_NEW_TOKENS,
"temperature": CFG.TEMPERATURE,
"repetition_penalty": CFG.REPETITION_PENALTY,
"context_length": CFG.CONTEXT_LENGTH,
},
)
return llm
| [] |
2024-01-10 | kesamet/retrieval-augmented-generation | src~reranker~bge.py | from __future__ import annotations
import os
from typing import Optional, Sequence, Tuple
from langchain.schema import Document
from langchain.pydantic_v1 import Extra
from langchain.callbacks.manager import Callbacks
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from sentence_transformers import CrossEncoder
from src import CFG
class BGEReranker(BaseDocumentCompressor):
"""Reranker based on BGE-reranker (https://huggingface.co/BAAI/bge-reranker-base)."""
model_path: str = os.path.join(CFG.MODELS_DIR, CFG.BGE_PATH)
top_n: int = 4
"""Number of documents to return."""
model: CrossEncoder = CrossEncoder(model_path)
"""CrossEncoder instance to use for reranking."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""
Compress documents using BAAI/bge-reranker models.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
if len(documents) == 0: # to avoid empty api call
return []
doc_list = list(documents)
_docs = [d.page_content for d in doc_list]
results = self.rerank(query, _docs)
final_results = []
for r in results:
doc = doc_list[r[0]]
doc.metadata["relevance_score"] = r[1]
final_results.append(doc)
return final_results
def rerank(self, query: str, docs: Sequence[str]) -> Sequence[Tuple[int, float]]:
"""
Reranks a list of documents based on a given query using a pre-trained model.
Args:
query: The query string.
docs: The list of documents to be reranked.
Returns:
A list of tuples containing the index of the document and its reranked score.
"""
model_inputs = [[query, doc] for doc in docs]
scores = self.model.predict(model_inputs)
results = sorted(enumerate(scores), key=lambda x: x[1], reverse=True)
return results[: self.top_n]
| [] |
2024-01-10 | kesamet/retrieval-augmented-generation | src~reranker~bm25.py | from typing import Optional, Sequence, Tuple
from rank_bm25 import BM25Okapi
from transformers import AutoTokenizer
from langchain.schema import Document
from langchain.pydantic_v1 import Extra
from langchain.callbacks.manager import Callbacks
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
class BM25Reranker(BaseDocumentCompressor):
"""Reranker based on BM25."""
tokenizer: AutoTokenizer = AutoTokenizer.from_pretrained("gpt2")
top_n: int = 4
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""
Compress documents using gpt2 and BM25.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
if len(documents) == 0: # to avoid empty api call
return []
doc_list = list(documents)
_docs = [d.page_content for d in doc_list]
results = self.rerank(query, _docs)
final_results = []
for r in results:
doc = doc_list[r[0]]
doc.metadata["relevance_score"] = r[1]
final_results.append(doc)
return final_results
def rerank(self, query: str, docs: Sequence[str]) -> Sequence[Tuple[int, float]]:
"""
Reranks a list of documents based on a given query using a pre-trained model.
Args:
query: The query string.
docs: The list of documents to be reranked.
Returns:
A list of tuples containing the index of the document and its reranked score.
"""
# tokenize content for bm25 instance
tokenized_content = self.tokenizer(docs).input_ids
# tokenize query
tokenized_query = self.tokenizer([query]).input_ids[0]
bm25 = BM25Okapi(tokenized_content)
scores = bm25.get_scores(tokenized_query)
results = sorted(enumerate(scores), key=lambda x: x[1], reverse=True)
return results[: self.top_n]
| [] |
2024-01-10 | kesamet/retrieval-augmented-generation | app_conv.py | import streamlit as st
from langchain.callbacks import StreamlitCallbackHandler
from src import CFG
from src.embeddings import build_base_embeddings
from src.llms import build_llm
from src.retrieval_qa import build_retrieval_chain
from src.vectordb import build_vectordb, load_faiss, load_chroma
from streamlit_app.utils import perform
st.set_page_config(page_title="Conversational Retrieval QA")
if "uploaded_filename" not in st.session_state:
st.session_state["uploaded_filename"] = ""
def init_chat_history():
"""Initialise chat history."""
clear_button = st.sidebar.button("Clear Conversation", key="clear")
if clear_button or "chat_history" not in st.session_state:
st.session_state["chat_history"] = list()
st.session_state["source_documents"] = list()
@st.cache_resource
def load_retrieval_chain():
embeddings = build_base_embeddings()
if CFG.VECTORDB_TYPE == "faiss":
vectordb = load_faiss(embeddings)
elif CFG.VECTORDB_TYPE == "chroma":
vectordb = load_chroma(embeddings)
llm = build_llm()
return build_retrieval_chain(vectordb, llm)
def doc_conv_qa():
with st.sidebar:
st.title("Conversational DocQA using quantized LLM")
st.info(f"Running on {CFG.DEVICE}")
uploaded_file = st.file_uploader(
"Upload a PDF and build VectorDB", type=["pdf"]
)
if st.button("Build VectorDB"):
if uploaded_file is None:
st.error("No PDF uploaded")
else:
with st.spinner("Building VectorDB..."):
perform(build_vectordb, uploaded_file.read())
st.session_state.uploaded_filename = uploaded_file.name
if st.session_state.uploaded_filename != "":
st.info(f"Current document: {st.session_state.uploaded_filename}")
try:
with st.status("Load retrieval_chain", expanded=False) as status:
st.write("Loading retrieval_chain...")
retrieval_chain = load_retrieval_chain()
status.update(
label="Loading complete!", state="complete", expanded=False
)
st.success("Reading from existing VectorDB")
except Exception:
st.error("No existing VectorDB found")
st.sidebar.write("---")
init_chat_history()
# Display chat history
for (question, answer), source_documents in zip(
st.session_state.chat_history, st.session_state.source_documents
):
if question != "":
with st.chat_message("user"):
st.markdown(question)
with st.chat_message("assistant"):
st.markdown(answer)
with st.expander("Retrieved extracts"):
for row in source_documents:
st.write("**Page {}**".format(row.metadata["page"] + 1))
st.info(row.page_content)
if user_query := st.chat_input("Your query"):
with st.chat_message("user"):
st.markdown(user_query)
if user_query is not None:
with st.chat_message("assistant"):
st_callback = StreamlitCallbackHandler(
parent_container=st.container(),
expand_new_thoughts=True,
collapse_completed_thoughts=True,
)
response = retrieval_chain(
{
"question": user_query,
"chat_history": st.session_state.chat_history,
},
callbacks=[st_callback],
)
st_callback._complete_current_thought()
st.markdown(response["answer"])
with st.expander("Retrieved extracts"):
for row in response["source_documents"]:
st.write("**Page {}**".format(row.metadata["page"] + 1))
st.info(row.page_content)
st.session_state.chat_history.append(
(response["question"], response["answer"])
)
st.session_state.source_documents.append(response["source_documents"])
if __name__ == "__main__":
doc_conv_qa()
| [] |
2024-01-10 | kesamet/retrieval-augmented-generation | src~elements~propositionizer.py | import json
import os
from typing import List, Union
import torch
from langchain.schema import Document
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from torch.utils.data import DataLoader, Dataset
from src import CFG
class Propositionizer:
"""Based on https://github.com/chentong0/factoid-wiki."""
def __init__(self):
model_path = os.path.join(
CFG.MODELS_DIR, "models/propositionizer-wiki-flan-t5-large"
)
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
self.model = AutoModelForSeq2SeqLM.from_pretrained(model_path).to(CFG.DEVICE)
self.model.eval()
def _predict(self, texts: Union[str, List[str]]) -> List[str]:
input_ids = self.tokenizer(texts, return_tensors="pt").input_ids.to(CFG.DEVICE)
with torch.no_grad():
outputs = self.model.generate(input_ids, max_new_tokens=512).cpu()
output_texts = self.tokenizer.decode(outputs, skip_special_tokens=True)
return output_texts
def generate(
self, passage: Document, title: str = "", section: str = ""
) -> List[Document]:
input_text = (
f"Title: {title}. Section: {section}. Content: {passage.page_content}"
)
output_text = self._predict(input_text)[0]
metadata = passage.metadata.copy()
return [
Document(page_content=x, metadata=metadata) for x in json.loads(output_text)
]
def batch(
self,
passages: List[Document],
title: str = "",
section: str = "",
) -> List[Document]:
data_set = DocDataset(passages, title=title, section=section)
data_loader = DataLoader(
data_set, batch_size=16, shuffle=False, drop_last=False
)
prop_texts = []
for data in data_loader:
input_texts, sources = data
output_texts = self._predict(input_texts)
for output_text, source, input_text in zip(
output_texts, sources, input_texts
):
try:
prop_texts.extend(
[
Document(page_content=x, metadata={"source": source})
for x in json.loads(output_text)
]
)
except json.JSONDecodeError:
prop_texts.append(
Document(page_content=input_text, metadata={"source": source})
)
return prop_texts
class DocDataset(Dataset):
def __init__(self, passages: List[Document], title: str = "", section: str = ""):
self.texts = [
f"Title: {title}. Section: {section}. Content: {passage.page_content}"
for passage in passages
]
self.sources = [passage.metadata["source"] for passage in passages]
def __len__(self):
return len(self.texts)
def __getitem__(self, idx):
return self.texts[idx], self.sources[idx]
| [] |
2024-01-10 | kesamet/retrieval-augmented-generation | src~elements~extract.py | import io
import os
import logging
from typing import List
import fitz
from PIL import Image
from langchain.schema import Document
logging.basicConfig(level=logging.INFO)
def extract_images(filename: str, image_output_dir_path: str) -> None:
"""Extract images from PDF."""
pdf_file = fitz.open(filename)
for page in pdf_file:
image_list = page.get_images()
if image_list:
logging.info(f"[+] Found {len(image_list)} images on page {page.number}")
else:
logging.info(f"[!] No images found on page {page.number}")
for image_index, img in enumerate(page.get_images(), start=1):
xref = img[0]
base_image = pdf_file.extract_image(xref)
image_bytes = base_image["image"]
image_ext = base_image["ext"]
image = Image.open(io.BytesIO(image_bytes))
image.save(
os.path.join(
image_output_dir_path,
f"figure-{page.number}-{image_index}.{image_ext}",
)
)
def save_pages_as_images(filename: str, image_output_dir_path: str) -> None:
pdf_file = fitz.open(filename)
for page in pdf_file:
pix = page.get_pixmap()
pix.save(os.path.join(image_output_dir_path, f"page-{page.number}.png"))
def extract_tables(filename: str) -> List[Document]:
"""Extract tables from PDF."""
pdf_file = fitz.open(filename)
table_docs = list()
for page in pdf_file:
tabs = page.find_tables()
logging.info(f"[+] Found {len(tabs.tables)} table(s) on page {page.number}")
for tab in tabs:
try:
df = tab.to_pandas()
if df.shape == (1, 1):
logging.info(" [!] dataframe shape is (1, 1)")
continue
d = Document(
page_content=df.to_json(),
metadata={"source": filename, "page": page.number},
)
table_docs.append(d)
except Exception:
logging.info(" [!] unable to convert to dataframe")
return table_docs
| [] |
2024-01-10 | crowdbotics-apps/review-updated-modu-43072 | backend~modules~django_openai~openai~viewsets.py | import os
from rest_framework import viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.parsers import JSONParser, MultiPartParser
from .services.OpenAIServices import OpenAIService
from .serializers import CreateChatCompletionSerializer, CreateCompletionSerializer, CreateEditsSerializer, \
CreateImageSerializer, CreateTranscriptionSerializer, CreateTranslationSerializer
class OpenAiViewSet(viewsets.GenericViewSet):
parser_classes = (JSONParser, MultiPartParser)
openai_service = OpenAIService(
base_url=os.getenv('OPENAI_BASE_URL', "https://api.openai.com"),
openai_api_key=os.getenv('OPENAI_API_KEY', ""),
)
allowed_serializer = {
"create_a_chat_completion": CreateChatCompletionSerializer,
"create_a_completion": CreateCompletionSerializer,
"create_an_edit": CreateEditsSerializer,
"create_an_image_generations": CreateImageSerializer,
"create_an_audio_translation": CreateTranscriptionSerializer,
"create_an_audio_transcription": CreateTranslationSerializer
}
def get_serializer_class(self):
return self.allowed_serializer.get(self.action)
@action(detail=False, methods=['get'], url_path='models')
def get_models_list(self, request):
"""
To get the models list
:return: Returns all list of models.
"""
response = self.openai_service.list_models()
return Response(data=response.get("data"), status=response.get("status_code"))
@action(detail=False, methods=['get'], url_path='models/(?P<model_id>[^/.]+)')
def get_model_detail(self, request, model_id):
"""
To get the models details
:path_params: model_id
:return: Returns models details.
"""
response = self.openai_service.retrieve_model(model_id)
return Response(data=response.get("data"), status=response.get("status_code"))
@action(detail=False, methods=['get'], url_path='engines')
def get_engine_list(self, request):
"""
To get the engine list
:return: Returns list of engine.
"""
response = self.openai_service.list_engine()
return Response(data=response.get("data"), status=response.get("status_code"))
@action(detail=False, methods=['get'], url_path='engines/(?P<engine_id>[^/.]+)')
def get_engine_detail(self, request, engine_id):
"""
To get the engine details'
:path_params: engine_id
:return: Returns models details.
"""
response = self.openai_service.retrieve_engine(engine_id)
return Response(data=response.get("data"), status=response.get("status_code"))
@action(detail=False, methods=['post'], url_path='chat/completions')
def create_a_chat_completion(self, request):
"""
To create a chat completion
:payload: {"model": "", "messages": ["role": "", "content": ""]}
:return: Returns a newly created chat completion detail.
"""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
response = self.openai_service.create_chat_completion(payload=serializer.data)
return Response(data=response.get("data"), status=response.get("status_code"))
@action(detail=False, methods=['post'], url_path='completion')
def create_a_completion(self, request):
"""
To create a completion
:payload: {"model": "", "prompt": "", "max_tokens": "", "temperature": ""}
:return: Returns a newly created completion.
"""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
response = self.openai_service.create_completion(payload=serializer.data)
return Response(data=response.get("data"), status=response.get("status_code"))
@action(detail=False, methods=['post'], url_path='edits')
def create_an_edit(self, request):
"""
To create an edit
:payload: {"model": "", "input": "", "instruction": ""}
:return: Returns a newly created edit with detail.
"""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
response = self.openai_service.create_edit(payload=serializer.data)
return Response(data=response.get("data"), status=response.get("status_code"))
@action(detail=False, methods=['post'], url_path='images/generations')
def create_an_image_generations(self, request):
"""
To create an edit
:payload: {"prompt": "", "n": int, "size": ""}
:return: Returns a newly created image url.
"""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
response = self.openai_service.create_image(payload=serializer.data)
return Response(data=response.get("data"), status=response.get("status_code"))
@action(detail=False, methods=['post'], url_path='audio/transcription')
def create_an_audio_transcription(self, request):
"""
To create a transcription
:payload: {"file": "", "model": ""}
:return: Returns a newly created transcription text.
"""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
response = self.openai_service.create_transcription(payload=serializer.validated_data)
return Response(data=response.get("data"), status=response.get("status_code"))
@action(detail=False, methods=['post'], url_path='audio/translation')
def create_an_audio_translation(self, request):
"""
To create a translation
:payload: {"file": "", "model": ""}
:return: Returns a newly created translation text.
"""
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
response = self.openai_service.create_translation(payload=serializer.validated_data)
return Response(data=response.get("data"), status=response.get("status_code")) | [] |
2024-01-10 | jasmeanfernando/BatchP2FA | p2fa~run_p2fa.py | """
Class that compiles the necessary speech recognition data and automates P2FA's align.py to
generate 500+ batches of data sets.
We use OpenAI's Whisper model to transcribe an audio file and create a transcription file
that is stored to P2FA's input directory. We then use both the audio and transcription file
as a singular input pair to generate its TextGrid file.
Each input pair should have the same exact name, specifically participant#_p14_trial#.txt;
Must be labeled p14 to signify cases detailing contrastive stress production collected
by Language Acquisition and Processing Lab.
"""
import os
from pathlib import Path
import openai
# Import OpenAI API Key.
openai.api_key = ''
# Obtain local directory containing audio files.
audio_dir = Path('/Users/jasmeanfernando/PycharmProjects/BatchP2FA/p2fa/input_wav')
# Obtain local directory to store transcription files.
transcription_dir = Path('/Users/jasmeanfernando/PycharmProjects/BatchP2FA/p2fa/input_txt')
# List of tuples containing .wav, .txt, and .TextGrid files.
arglist = []
for audio_file in os.listdir(audio_dir):
# Base Case: Check if .wav file.
if audio_file.endswith(".wav"):
# Obtain audio_file name.
audio_file_name = audio_file.split(".")[0]
# Open audio.
wav_path = audio_dir.joinpath(audio_file)
audio = open(wav_path, "rb")
# Transcribe audio.
transcription = openai.Audio.transcribe("whisper-1", audio)
print(transcription["text"])
# Initialize .txt file.
transcription_file = audio_file_name + '.txt'
txt_path = transcription_dir.joinpath(transcription_file)
# Base Case: Check if .txt file /already/ exists.
if txt_path.is_file():
print("File already exists, cannot re-write.")
else:
# Write and store .txt file.
with open(txt_path, "w") as file:
file.write(str(transcription["text"]))
print("Creating...", transcription_file)
# Initialize .TextGrid file.
textgrid_file = audio_file_name + '.TextGrid'
# Append .wav, .txt, and .TextGrid files.
arglist.append([audio_file, transcription_file, textgrid_file])
i = 0
for vars in arglist:
print('Creating...' + arglist[i][2])
os.system('python3 align.py input_wav/' + arglist[i][0] + ' input_txt/' + arglist[i][1] + ' output_textgrid/' + arglist[i][2])
i = i + 1 | [] |
2024-01-10 | afnanmmir/ut_course_search | ut_course_search_back~server_app~db~pinecone~insert_data.py | import openai
import os
from dotenv import load_dotenv
from tqdm.auto import tqdm
import pickle
import pinecone
from time import sleep
import sys
import random
class Course():
def __init__(self, title: str, description: str, plain_text: str):
self.title = title
self.description = description
self.plain_text = plain_text
# Create getter and setter methods for each attribute
def get_title(self):
return self.title
def set_title(self, title):
self.title = title
def get_description(self):
return self.description
def set_description(self, description):
self.description = description
def get_plain_text(self):
return self.plain_text
def set_plain_text(self, plain_text):
self.plain_text = plain_text
# Create a method to return a dictionary of the object
def to_dict(self):
return {
'title': self.title,
'description': self.description,
'plain_text': self.plain_text
}
def __str__(self):
return f"Course(title={self.title}, description={self.description}, plain_text={self.plain_text})"
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
PINECONE_API_KEY = os.getenv("PINECONE_API_KEY")
PINECONE_ENVIRONMENT = os.getenv("PINECONE_ENV")
EMBED_MODEL = "text-embedding-ada-002"
EMBED_DIM = 1536
batch_size = 25 # how many embeddings we create and insert at once
with open("../../data/plain_texts_courses.pkl", "rb") as f:
courses_list = pickle.load(f)
def find_second_period(string):
try:
first_period = string.index(".") # Find the index of the first period
second_period = string.index(".", first_period + 1) # Find the index of the second period
return second_period
except ValueError:
return -1 # If no period found or only one period exists
def create_courses_list(texts):
courses_list = []
for text in texts:
second_period_index = find_second_period(text)
title = text[0:second_period_index + 1]
description = text[second_period_index + 1:]
courses_list.append(Course(title, description, text))
# print(str(courses_list[-1]))
return courses_list
# courses_list = create_courses_list(courses)
index_name = "ut-courses"
pinecone.init(
api_key=PINECONE_API_KEY,
environment=PINECONE_ENVIRONMENT
)
if index_name not in pinecone.list_indexes():
pinecone.create_index(
name=index_name,
dimension=EMBED_DIM,
metric="cosine",
)
index = pinecone.Index(index_name)
delay = 1
for i in tqdm(range(0, len(courses_list), batch_size)):
i_end = min(len(courses_list), i + batch_size) # find end of batch
ids_batch = [str(i) for i in range(i, i_end)] # create a list of ids
meta_batch = courses_list[i:i_end] # create a list of metadata
texts = [course.get_plain_text() for course in meta_batch]
try:
res = openai.Embedding.create(input=texts, engine=EMBED_MODEL)
except Exception as e:
print(f"Error: {e}")
done = False
while not done:
delay *= 2 * (1 + 1*random.random())
sleep(delay)
try:
res = openai.Embedding.create(input=texts, engine=EMBED_MODEL)
done = True
except Exception as e:
print(f"Error: {e}")
pass
embeds = [record["embedding"] for record in res["data"]]
meta_batch = [{
'title': course.get_title(),
'description': course.get_description(),
} for course in meta_batch]
to_upsert = list(zip(ids_batch, embeds, meta_batch))
num_bytes = sys.getsizeof(to_upsert)
# print(f"Uploading {num_bytes} bytes to Pinecone")
index.upsert(vectors=to_upsert)
| [] |
2024-01-10 | afnanmmir/ut_course_search | ut_course_search_back~server_app~db~pinecone~index_methods.py | import pinecone
from dotenv import load_dotenv
import os
from openai import OpenAI
def init_pinecone(index_name):
load_dotenv()
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
PINECONE_API_KEY = os.getenv("PINECONE_API_KEY")
PINECONE_ENVIRONMENT = os.getenv("PINECONE_ENV")
EMBED_DIM = 1536
pinecone.init(
api_key=PINECONE_API_KEY,
environment=PINECONE_ENVIRONMENT
)
if index_name not in pinecone.list_indexes():
pinecone.create_index(
name=index_name,
dimension=EMBED_DIM,
metric="cosine",
)
pinecone_index = pinecone.Index(index_name)
return pinecone_index
# def query_index(pinecone_index, query, top_k=20):
# client = OpenAI(
# # This is the default and can be omitted
# api_key=os.environ.get("OPENAI_API_KEY"),
# )
# EMBED_MODEL = "text-embedding-ada-002"
# res_embed = openai.Embedding.create(input=[query], model=EMBED_MODEL)
# query_embedding = res_embed['data'][0]['embedding']
# res_query = pinecone_index.query(queries=[query_embedding], top_k=top_k, include_metadata=True)
# return res_query
def extract_context_from_match(match):
course_title = match['metadata']['title']
course_description = match['metadata']['description']
course_plain_text = f"{course_title}: {course_description}\n"
return course_plain_text
def extract_context_from_course(course):
course_title = course['title']
course_description = course['description']
course_plain_text = f"{course_title}: {course_description}\n"
return course_plain_text
def extract_context(request_result, top_k = 20, similarity_threshold = 0.79):
# matches = request_result['results'][0]['matches'][0:top_k]
# context_text = ""
# for match in matches:
# if(match['score'] > similarity_threshold):
# context_text += extract_context_from_match(match)
# return context_text
context = ""
for match in request_result['response']:
context += extract_context_from_course(match)
return context
def create_query(query: str, context: str):
formatted_query = (
"Here are the context information about classes at the University of Texas at Austin for the query:"
"\n-------------------------------------------\n"
f"{context}"
"\n-------------------------------------------\n"
f"Answer the following questions based on the context above. If the context does not help you, please answer with 'Sorry, I could not find any information on this query':{query}\n"
)
return formatted_query
def query_chat(context_text: str, query: str):
client = OpenAI(
# This is the default and can be omitted
api_key=os.getenv("OPENAI_API_KEY"),
)
query_with_context = create_query(query, context_text)
messages = [
{"role": "system", "content": "Your job is to only give advice about what classes to take at the University of Texas at Austin given the query. This includes career advice, course advice, and professor advice."},
{"role": "system", "content": "If the query does not pertain to this topic, answer with 'I apologize, but that is not related to the University of Texas Course Catalog. As an AI language model, I can answer queries related to courses, professors, and departments at the University of Texas. Please let me know if you have any course-related questions.'"},
{"role": "user", "content": query_with_context},
]
response = client.chat.completions.create(
messages=messages,
model="gpt-3.5-turbo",
)
return response
| [
"Your job is to only give advice about what classes to take at the University of Texas at Austin given the query. This includes career advice, course advice, and professor advice.",
"If the query does not pertain to this topic, answer with 'I apologize, but that is not related to the University of Texas Course Catalog. As an AI language model, I can answer queries related to courses, professors, and departments at the University of Texas. Please let me know if you have any course-related questions.'"
] |
2024-01-10 | mohamed-elghafiani/2023-GenAI-Hackathon | codify~retieve_student_chat.py | from openai import OpenAI
import shelve
from gradio_client import Client
from dotenv import load_dotenv
import os
load_dotenv()
OPEN_AI_API_KEY = os.getenv("OPEN_AI_API_KEY")
client = OpenAI(api_key=OPEN_AI_API_KEY)
def retrieve_chat(wa_id):
with shelve.open("threads_db") as threads_shelf:
thread_id = threads_shelf.get(wa_id, None)
if thread_id:
# thread = client.beta.threads.retrieve(thread_id)
messages = client.beta.threads.messages.list(thread_id=thread_id, limit=100)
print(messages.data)
chat = []
for message_ in messages:
message = {}
message["role"] = message_.role
message["content"] = message_.content[0].text.value
chat.append(message)
return chat
def evaluate_student(user_id):
"""Evaluates the student based on the Conversation between him and the chatbot"""
conversation = retrieve_chat(user_id)
full_chat = ""
if conversation:
for chat in conversation:
full_chat += f'{chat["role"]}: {chat["content"]}'
prompt = f"""Evaluate the student's performance in solving the coding task based on the conversation below. Offer insights into their level, identify difficulties, and provide specific recommendations for improvement:
@conversation
{full_chat}
Evaluate the student's understanding of Python basics and command line arguments. Highlight specific areas of struggle and offer succinct suggestions for improvement.
"""
client = Client("https://ise-uiuc-magicoder-s-ds-6-7b.hf.space/--replicas/dccwp/")
response = client.predict(
prompt,
0, # Temperature
2048, # Max Tokens
api_name="/evaluate_magicoder"
)
return response
# print(evaluate_student("task_123.mohamed")) | [
"Evaluate the student's performance in solving the coding task based on the conversation below. Offer insights into their level, identify difficulties, and provide specific recommendations for improvement:\n @conversation\n PLACEHOLDER\n Evaluate the student's understanding of Python basics and command line arguments. Highlight specific areas of struggle and offer succinct suggestions for improvement.\n "
] |
2024-01-10 | AliKaanT/book-finder | src~core.py | from langchain.agents import load_tools, AgentType, initialize_agent
from langchain import PromptTemplate
from langchain.agents import Tool
from langchain.llms import OpenAI
from dotenv import load_dotenv
from pydantic import BaseModel, Field
from serpapi import GoogleSearch
import os
import json
load_dotenv()
def find_price_w_serpapi(name: str) -> {"price": int, "url": str}:
params = {
"q": name,
"hl": "tr",
"gl": "tr",
"google_domain": "google.com.tr",
"api_key": os.getenv("SERPAPI_API_KEY"),
}
search = GoogleSearch(params)
results = search.get_dict()
organic_results = results.get("organic_results")
for result in organic_results:
try:
price = (
result.get("rich_snippet")
.get("top")
.get("detected_extensions")
.get("price")
)
url = result.get("link")
return {
"price": int(price),
"url": url,
}
except:
continue
return "ERROR. STOP IMMEDIATELY."
def core(user_input: str) -> str:
"""Core function of the application. Takes user input and returns a response."""
llm = OpenAI(temperature=0.2)
class Find_Price_And_Url(BaseModel):
name: str = Field(..., description="Name of the book")
tools = load_tools(
["serpapi"],
llm=llm,
)
tools.append(
Tool.from_function(
name="Find_Price_And_Url",
description="Useful when you need to find the price of the book in a website. Use Turkish name of the book.",
args_schema=Find_Price_And_Url,
func=find_price_w_serpapi,
),
)
agent = initialize_agent(
tools=tools,
llm=llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
)
bookFinder = PromptTemplate.from_template(
template="""
Book: {book}.
1 - Find the name of turkish version of the book.
2 - Search the internet with its Turkish name and find a website that sells it.
3 - Print the author,name,turkish_name, price and url of the book as Python dictionary like below. Make sure it is valid JSON.:
{{"author": "Author", "name": "Book", "turkish_name": "Turkish version name", "price": 0,"url" : "First url that you find"}}
"""
)
prompt = bookFinder.format(book=user_input)
response = agent.run(prompt)
return json.loads(response)
# print(core("Atomic Habits"))
| [
"\n Book: {book}.\n 1 - Find the name of turkish version of the book.\n 2 - Search the internet with its Turkish name and find a website that sells it.\n 3 - Print the author,name,turkish_name, price and url of the book as Python dictionary like below. Make sure it is valid JSON.:\n {{\"author\": \"Author\", \"name\": \"Book\", \"turkish_name\": \"Turkish version name\", \"price\": 0,\"url\" : \"First url that you find\"}}\n "
] |
2024-01-10 | Kevin7744/RAG-search | extract_Information.py | """
This app answers questions based on custom data using vector search and openai
Packages:
pip install langchain pymongo bs4 openai tiktoken gradio requests lxml argparse unstructured
"""
from pymongo import MongoClient
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import MongoDBAtlasVectorSearch
from langchain.document_loaders import DirectoryLoader
from langchain.llms import OpenAI
from langchain.chains import RetrievelQA
import gradio as gr
from gradio.themes.base import Base
import key_param
client = MongoClient(key_param.MONGODB_URI)
dbName = "langchain_demo"
collectionName = "collection_of_text_blobs"
collection = client[dbName][collectionName]
# Initialize the directory loader
loader = DirectoryLoader('./Sample_Files', glob="./*.txt", show_progress=True)
data = loader.load()
embeddings = OpenAIEmbeddings(openai_api_key = key_param.openai_api_key)
# store the data to mongodb collection
vectorStore = MongoDBAtlasVectorSearch.from_documents(data, embeddings, collection=collection)
# 2. Second step
embeddings = OpenAIEmbeddings(openai_api_key = key_param.openai_api_key)
vectorStore = MongoDBAtlasVectorSearch(collection, embeddings)
def query_data(query):
docs = vectorStore.similarity_search(query, K=1)
as_output = docs[0].page_content
llm = OpenAI(openai_api_key=key_param.openai_api_key, temperature=0)
retriever = vectorStore.as_retriever()
qa = RetrievelQA.from_chain_type(llm, chain_type="stuff", retriever=retriever)
retriever_output = qa.run(query)
return as_output, retriever_output
with gr.Blocks(theme=Base(), title="Question answering app using vector search + RAG") as demo:
gr.Markdown(
"""
# Question Answering App using atlas Vector search + RAG architecture
""")
textbox = gr.Textbox(label="Enter your question: ")
with gr.Row():
button = gr.button("Submit", variant="Primary")
with gr.Column():
output1 = gr.Textbox(lines=1, max_lines=10, label= "Output with just Atlas Vector Search (return text field as is)")
output2 = gr.Textbox(lines=1, max_lines=10, label= "Output generated by chaining Atlas Vector Search to langchain's RetrieverQA + OpenAI LLM:")
button.click(query_data, textbox, outputs=p[output1, output2])
demo.launch() | [] |
2024-01-10 | maxpark/Auto-GPT | autogpts~autogpt~tests~unit~test_retry_provider_openai.py | import logging
import pytest
from openai.error import APIError, RateLimitError, ServiceUnavailableError
from autogpt.llm.providers import openai
from autogpt.logs.config import USER_FRIENDLY_OUTPUT_LOGGER
@pytest.fixture(params=[RateLimitError, ServiceUnavailableError, APIError])
def error(request):
if request.param == APIError:
return request.param("Error", http_status=502)
else:
return request.param("Error")
def error_factory(error_instance, error_count, retry_count, warn_user=True):
"""Creates errors"""
class RaisesError:
def __init__(self):
self.count = 0
@openai.retry_api(
max_retries=retry_count, backoff_base=0.001, warn_user=warn_user
)
def __call__(self):
self.count += 1
if self.count <= error_count:
raise error_instance
return self.count
return RaisesError()
def test_retry_open_api_no_error(caplog: pytest.LogCaptureFixture):
"""Tests the retry functionality with no errors expected"""
@openai.retry_api()
def f():
return 1
result = f()
assert result == 1
output = caplog.text
assert output == ""
assert output == ""
@pytest.mark.parametrize(
"error_count, retry_count, failure",
[(2, 10, False), (2, 2, False), (10, 2, True), (3, 2, True), (1, 0, True)],
ids=["passing", "passing_edge", "failing", "failing_edge", "failing_no_retries"],
)
def test_retry_open_api_passing(
caplog: pytest.LogCaptureFixture,
error: Exception,
error_count: int,
retry_count: int,
failure: bool,
):
"""Tests the retry with simulated errors [RateLimitError, ServiceUnavailableError, APIError], but should ulimately pass"""
# Add capture handler to non-propagating logger
logging.getLogger(USER_FRIENDLY_OUTPUT_LOGGER).addHandler(caplog.handler)
call_count = min(error_count, retry_count) + 1
raises = error_factory(error, error_count, retry_count)
if failure:
with pytest.raises(type(error)):
raises()
else:
result = raises()
assert result == call_count
assert raises.count == call_count
output = caplog.text
if error_count and retry_count:
if type(error) == RateLimitError:
assert "Reached rate limit" in output
assert "Please double check" in output
if type(error) == ServiceUnavailableError:
assert "The OpenAI API engine is currently overloaded" in output
assert "Please double check" in output
else:
assert output == ""
def test_retry_open_api_rate_limit_no_warn(caplog: pytest.LogCaptureFixture):
"""Tests the retry logic with a rate limit error"""
error_count = 2
retry_count = 10
raises = error_factory(RateLimitError, error_count, retry_count, warn_user=False)
result = raises()
call_count = min(error_count, retry_count) + 1
assert result == call_count
assert raises.count == call_count
output = caplog.text
assert "Reached rate limit" in output
assert "Please double check" not in output
def test_retry_open_api_service_unavairable_no_warn(caplog: pytest.LogCaptureFixture):
"""Tests the retry logic with a service unavairable error"""
error_count = 2
retry_count = 10
raises = error_factory(
ServiceUnavailableError, error_count, retry_count, warn_user=False
)
result = raises()
call_count = min(error_count, retry_count) + 1
assert result == call_count
assert raises.count == call_count
output = caplog.text
assert "The OpenAI API engine is currently overloaded" in output
assert "Please double check" not in output
def test_retry_openapi_other_api_error(caplog: pytest.LogCaptureFixture):
"""Tests the Retry logic with a non rate limit error such as HTTP500"""
error_count = 2
retry_count = 10
raises = error_factory(APIError("Error", http_status=500), error_count, retry_count)
with pytest.raises(APIError):
raises()
call_count = 1
assert raises.count == call_count
output = caplog.text
assert output == ""
| [] |
2024-01-10 | maxpark/Auto-GPT | autogpts~autogpt~autogpt~json_utils~utilities.py | """Utilities for the json_fixes package."""
import ast
import logging
from typing import Any
logger = logging.getLogger(__name__)
def extract_dict_from_response(response_content: str) -> dict[str, Any]:
# Sometimes the response includes the JSON in a code block with ```
if response_content.startswith("```") and response_content.endswith("```"):
# Discard the first and last ```, then re-join in case the response naturally included ```
response_content = "```".join(response_content.split("```")[1:-1])
# response content comes from OpenAI as a Python `str(content_dict)`, literal_eval reverses this
try:
return ast.literal_eval(response_content)
except BaseException as e:
logger.info(f"Error parsing JSON response with literal_eval {e}")
logger.debug(f"Invalid JSON received in response: {response_content}")
# TODO: How to raise an error here without causing the program to exit?
return {}
| [] |
2024-01-10 | BotzillaX/ImmobilienBot | ueberarbeiteter%20Bot.py | import pyautogui as py
import time
import clipboard
import cv2
import keyboard
import openai
from datetime import datetime
import csv
from datetime import datetime
import pandas as pd
import os
import telebot
import json
import sys
originalerUrl = ""
running = True
timestop = 0.2
timestopAktualisieren = 0.5
script_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
dateipfad = os.path.join(script_dir, "Informationen für den Bot", "persönliche Daten.json")
with open(dateipfad, 'r') as json_datei:
eigeneDaten = json.load(json_datei)
dateipfad = os.path.join(script_dir, "Informationen für den Bot", "Keys.json")
with open(dateipfad, 'r') as json_datei:
keys = json.load(json_datei)
#das abschicken muss noch aktiviert werden, wenn die Testphase fertig ist
#was Ihr braucht:
#Bildschirmauflösung: 2560x1440
#Skalierung: 150%
#Opera Browser
#Immoscout24 PREMIUM ACCOUNT (wichtig!)
#zoom in Opera auf 100% # strg/ctrl + mausrad nach oben oder unten
#Opera in Dark Mode
#Tauschwohnungen deaktivieren #dazu wurde nichts programmiert
#adblocker aktivieren
#keine anderen Tabs offen haben
#keine anderen Programme offen haben
#keine anderen Fenster offen haben
#keine anderen Monitore haben
#keine anderen Browser offen haben
#keine anderen Programme im Hintergrund laufen lassen
#budget von mindestens 5€ für die Berliner // ca 1 cent kosten 5-7 Bewerbungen über ChatGPT je nach Größe des Anschreibens (die Größes der Ausgabe von ChatGPT ist unter keinen Umständen zu empfehlen, solange ChatGPT 5.0 noch nicht existiert)
#um mehr über die Kosten zu erfahren, gehe auf diese Seite: #https://gptforwork.com/guides/openai-gpt3-models
#Beenden des Scriptes mit ctrl+alt+shift
#Video schauen oder mich persönlich fragen, falls etwas ungenau sein sollte
#eigene Daten
premiumOrNot = str(eigeneDaten["premium"]).lower() #yes or no #better of just get yourself a premium account and I'm not kidding.
Your_API_KEY_Tele = keys["Your_API_KEY_Tele"] #euer API Key von Telegram #https://core.telegram.org/bots/api#authorizing-your-bot
API_KEY = keys["API_KEY"] #euer API Key von OpenAI #https://platform.openai.com/account/api-keys
MODEL_NAME = keys["MODEL_NAME"] #welches ChatGPT modell soll verwendet werden? bitte nicht ändern, wenn ihr keine Ahnung dazu habt #https://gptforwork.com/guides/openai-gpt3-models
#DATEN ERSETZEN MIT JSON DATEIEN
###################################################################################################################################################################################################################################
#AB DIESEN PUNKT NUR ÄNDERUNGEN VORNEHMEN, WENN IHR WISST, WAS IHR MACHT!!!!
def greatReset(id):
#if active
keyboard.press_and_release("ctrl+l")
time.sleep(timestop)
clipboard.copy(originalerUrl)
time.sleep(timestop)
keyboard.press_and_release("ctrl+v")
time.sleep(timestop)
keyboard.press_and_release("enter")
main(id)
def überprüfungAufRoboter(id, indetificationSiteRoboter):
if running == True:
time.sleep(1)
max_loc, max_val = lookingForPixel("Roboter")
if max_val > 0.90:
print("Roboter erkannt")
bot.send_message(id, "There is a captcha on the page, please solve it and then write 'done'")
bot.register_next_step_handler_by_chat_id(id, check_captcha, indetificationSiteRoboter)
print("5 sekunden vor exit")
time.sleep(5)
print("vor exit")
return False
else:
print("Roboter nicht erkannt")
return True
else:
print("es wurde \"beenden\" geschrieben")
def main(id):
if running == True:
while True:
for i in range(12): #12 for not triggering the bot detection (the higher the better)
time.sleep(1)
beenden()
lastStatus = aktualisierung(0, id)
if lastStatus == False:
break
status = checkHearth()#checkt, ob das erste Herz rot oder weiss ist OBEN
if status == True:
print("erstes Herz ist weiß und es wurde die Seite geöffnet")
statusTyp= schreibenDerBewerbung(id)
if statusTyp == False:
print("die Seite wurde deaktiviert oder nachricht kann nicht abgeschickt werden")
keyboard.press_and_release("alt+left")
elif statusTyp == "deaktivieren":
break
else:
pass
else:
print("es wurde \"beenden\" geschrieben")
openai.api_key = API_KEY
filePath_pictures = os.path.dirname(os.path.realpath(sys.argv[0]))+"\\"
def MouseMover():
if running == True:
py.moveTo(3, 3)
else:
print("es wurde \"beenden\" geschrieben")
def beenden2():
if running == True:
exit()
else:
print("es wurde \"beenden\" geschrieben")
def beenden():
if running == True:
if keyboard.is_pressed("ctrl+alt+shift"):
exit()
else:
print("es wurde \"beenden\" geschrieben")
def lookingForPixel(name):
if running == True:
beenden()
MouseMover()
print("looking for " + name)
time.sleep(0.2)
picture1 = py.screenshot() #region=(1446, 597, 220, 230) minimap location #start while in game
picture1.save(os.path.join(filePath_pictures, "whatsapp.jpg"))
TestJungle = cv2.imread(os.path.join(filePath_pictures, "Bilder\\", name + ".PNG"), cv2.IMREAD_UNCHANGED)
DesktopTest = cv2.imread(os.path.join(filePath_pictures, "whatsapp.jpg"), cv2.IMREAD_UNCHANGED)
nesne = cv2.cvtColor(DesktopTest, cv2.COLOR_BGR2GRAY)
nesne1 = cv2.cvtColor(TestJungle, cv2.COLOR_BGR2GRAY)
result = cv2.matchTemplate(nesne1, nesne, cv2.TM_CCOEFF_NORMED)
beenden()
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
print("done looking for " + name)
return max_loc, max_val
else:
print("es wurde \"beenden\" geschrieben")
def siteIsLoading(id, indetificationSite):
if running == True:
max_loc, max_val = lookingForPixel("nichtAktualisiert")
while max_val > 0.85:
max_loc, max_val = lookingForPixel("nichtAktualisiert")
print(max_val)
time.sleep(timestopAktualisieren)
beenden()
print("die Seite ist fertig geladen")
print("Untersuchung nach Anti-Bot")
newStatus = überprüfungAufRoboter(id, indetificationSite)
return newStatus
else:
print("es wurde \"beenden\" geschrieben")
###############
def aktualisierung(identification, id):
if running == True:
max_loc, max_val= lookingForPixel("Aktualisieren")
while max_val < 0.80:
max_loc, max_val= lookingForPixel("Aktualisieren")
print(max_val)
time.sleep(timestopAktualisieren)
beenden()
py.moveTo(max_loc[0], max_loc[1])
time.sleep(timestop)
py.click()
nextNewStatus = siteIsLoading(id, identification)
return nextNewStatus
else:
print("es wurde \"beenden\" geschrieben")
##############
def zumScrollen():
if running == True:
max_loc, max_val = lookingForPixel("zumScrollen")
while max_val < 0.80:
max_loc, max_val = lookingForPixel("zumScrollen")
print(max_val)
time.sleep(0.5)
beenden()
py.moveTo(max_loc[0], max_loc[1])
time.sleep(timestop)
py.click()
time.sleep(timestop)
py.scroll(-500)
else:
print("es wurde \"beenden\" geschrieben")
##########
def premiumOrNot1(Info):
if running == True:
if Info == "no":
zumScrollen()
time.sleep(timestop)
keyboard.press_and_release("ctrl+l")
time.sleep(timestop)
keyboard.press_and_release("ctrl+c")
time.sleep(timestop)
originalUrl = clipboard.paste()
return originalUrl
else:
if running == True:
max_loc, max_val = lookingForPixel("zumScrollen")
while max_val < 0.80:
max_loc, max_val = lookingForPixel("zumScrollen")
print(max_val)
time.sleep(0.5)
beenden()
py.moveTo(max_loc[0], max_loc[1])
time.sleep(timestop)
py.click()
time.sleep(timestop)
keyboard.press_and_release("ctrl+l")
time.sleep(timestop)
keyboard.press_and_release("ctrl+c")
time.sleep(timestop)
originalUrl = clipboard.paste()
return originalUrl
else:
print("es wurde \"beenden\" geschrieben")
def save_variables_to_csv(var1, var2, csv_path='variables_db.csv'):
if running == True:
try:
now = datetime.now()
timeString = now.strftime("%Y-%m-%d %H:%M:%S")
with open(csv_path, mode='a', newline='', encoding='utf-8') as file:
writer = csv.writer(file)
writer.writerow([timeString, var1, var2])
except Exception as e:
with open('error_log.csv', mode='a', newline='', encoding='utf-8') as file:
writer = csv.writer(file)
writer.writerow([timeString, str(e)])
else:
print("es wurde \"beenden\" geschrieben")
def read_and_sort_csv(csv_path='variables_db.csv'):
if running == True:
df = pd.read_csv(csv_path, names=['time', 'ChatGPTOutput', 'NameAdressETC'], encoding='ISO-8859-1')
df['time'] = pd.to_datetime(df['time'])
df.sort_values(by=['time'], inplace=True, ascending=False)
return df
else:
print("es wurde \"beenden\" geschrieben")
############
def checkHearth():
if running == True:
time.sleep(0.5) #um sicher zu gehen, dass die Seite 100% geladen wurde #URSPRÜNGLICH 1
max_loc, max_val = lookingForPixel("oberesHerzWeiss")
if max_val > 0.80:
py.moveTo(max_loc[0]-420, max_loc[1]+110)
print(max_val)
time.sleep(timestop)
py.click()
return True
else:
return False
else:
print("es wurde \"beenden\" geschrieben")
def checkSite():
if running == True:
max_loc, max_val = lookingForPixel("notActive")
if max_val > 0.80:
return False
else:
print("es wurde \"beenden\" geschrieben")
def checkIfNoPicture():
if running == True:
max_loc, max_val = lookingForPixel("KeinBild")
if max_val > 0.80:
return True
else:
return False
else:
print("es wurde \"beenden\" geschrieben")
def fullAdressOrNot(adressSecond):
if running == True:
if adressSecond.strip() == "Die vollständige Adresse der Immobilie erhalten Sie vom Anbieter.":
print("die vollständige Adresse ist nicht vorhanden, es wird nur die Stadt und die Postleitzahl provided")
return False #die vollständige Adresse ist nicht vorhanden, es wird nur die Stadt und die Postleitzahl angegeben
else:
print("die vollständige Adresse ist vorhanden")
return True #die vollständige Adresse ist vorhanden
else:
print("es wurde \"beenden\" geschrieben")
def findingAdress(id):
if running == True:
max_loc, max_val = lookingForPixel("Pin")
counter = 0
while max_val < 0.80 and counter <= 10:
max_loc, max_val = lookingForPixel("Pin")
print(max_val)
time.sleep(0.5)
#counter += 1 #wurde erstmal deaktiviert, da es zu Problemen führen kann
if counter >= 10:
greatReset(id)
x, y = max_loc
py.moveTo(x+200, y)
for i in range(3):
py.click()
keyboard.press_and_release("ctrl+c")
time.sleep(0.5)
adressFirst = clipboard.paste()
py.moveTo(x+400, y)
for i in range(3):
py.click()
keyboard.press_and_release("ctrl+c")
time.sleep(0.5)
adressSecond = clipboard.paste()
return adressFirst, adressSecond
else:
print("es wurde \"beenden\" geschrieben")
#prompt = f"""Bitte verfassen Sie mir eine umwerfende Bewerbung um eine Wohnung. Sollten Informationen wie der zweite Teil der Adresse fehlen, sollte die Bewerbung so formuliert sein, dass sie immer noch mit den verfügbaren Daten arbeitet. Beginnen Sie die Bewerbung mit \"Sehr geehrte...\" oder \"Sehr geehrter...\" und verwenden Sie die gegebenen Informationen, die mir zur Verfügung stehen: Stadt und Postleitzahl der Adresse, Straßenname und Hausnummer (falls vorhanden) und den Namen oder die Firma des Vermieters oder der Kontaktperson. Informationen, die nicht verfügbar sind, sollten in der Bewerbung nicht durch Platzhalter wie '[]' repräsentiert werden.: {text}"""
def gpt3(text):
if running == True:
prompt = f"""Bitte verfassen Sie mir eine umwerfende Bewerbung um eine Wohnung. Sollten Informationen wie der zweite Teil der Adresse fehlen, sollte die Bewerbung so formuliert sein, dass sie immer noch mit den verfügbaren Daten arbeitet. Beginnen Sie die Bewerbung mit \"Sehr geehrte...\" oder \"Sehr geehrter...\" und verwenden Sie die gegebenen Informationen, die mir zur Verfügung stehen: Stadt und Postleitzahl der Adresse, Straßenname und Hausnummer (falls vorhanden) und den Namen oder die Firma des Vermieters oder der Kontaktperson. Informationen, die nicht verfügbar sind, sollten in der Bewerbung nicht durch Platzhalter wie '[]' repräsentiert werden.: {text}"""
try:
response = openai.Completion.create(
engine="text-davinci-003",
prompt=prompt,
temperature=0.1,
max_tokens=500,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
)
generated_text = response['choices'][0]['text'].strip()
first_word = generated_text.split()[0]
if str(first_word).lower() == "sehr":
return generated_text
else:
filePathPicturesErrorReport = os.path.dirname(os.path.realpath(sys.argv[0])) + "\\variables_db.csv"
save_variables_to_csv(generated_text, prompt, filePathPicturesErrorReport)#1zeit #2ChatGPTOutput #3NameAdressETC
raise Exception("The generated text does not meet the requirements")
except Exception as e:
print(f"Error occurred: {e}")
return None
else:
print("es wurde \"beenden\" geschrieben")
def writingMessageViaGPT(ersteAdresse, zweiteAdresse, nameOrCompany, id):
if running == True:
if nameOrCompany.strip() == "Nachricht":
nameOrCompany = "kein Name oder Firma vorhanden vorhanden"
print(ersteAdresse, zweiteAdresse, nameOrCompany)
textZumBearbeiten = f"""
\"Die vollständige Adresse oder nur ein Teil der Adresse, worauf wir uns bewerben möchten: \"{ersteAdresse + zweiteAdresse}\",
Name des Ansprechpartners/Kontaktperson oder Unternehmens, welche das Objekt vermietet: \"{nameOrCompany}\",
Mein Name (Bewerber): \"{eigeneDaten["name"]}\",
Meine E-Mail-Adresse: \"{eigeneDaten["email"]}\",
Meine Telefonnummer: \"{eigeneDaten["telefon"]}\",
Meine Adresse: \"{eigeneDaten["adresse"]}\",
Meine Postleitzahl: \"{eigeneDaten["plz"]}\",
Meine Stadt: \"{eigeneDaten["stadt"]}\",
Mein Land: \"{eigeneDaten["land"]}\",
Mein Alter: \"{eigeneDaten["alter"]}\",
Mein Geburtsort: \"{eigeneDaten["geburtsort"]}\",
Mein Geburtsland: \"{eigeneDaten["geburtsland"]}\",
Mein Familienstand: \"{eigeneDaten["familienstand"]}\",
Meine Nationalität: \"{eigeneDaten["nationalität"]}\",
Mein Beruf: \"{eigeneDaten["beruf"]}\",
Mein Arbeitgeber oder meine Schule: \"{eigeneDaten["arbeitgeberOderSchule"]}\",
Meine Haustiere: \"{eigeneDaten["haustiere"]}\",
Ich bin Raucher: \"{eigeneDaten["raucher"]}\"\"
"""
getText = gpt3(textZumBearbeiten)
if getText == None or getText == "":
print("Fehler beim Schreiben der Nachricht, da der Inhalt leer ist")
print("Immobilie wird gespeichert als \"angeschrieben\" und das Ergebnis kommt in eine Log.txt")
max_loc, max_val = lookingForPixel("merken")
while max_val < 0.80:
max_loc, max_val = lookingForPixel("merken")
py.moveTo(max_loc)
time.sleep(timestop)
py.click()
time.sleep(5)
keyboard.press_and_release("alt+left")
newStatusGPT = aktualisierung(0, id)
if newStatusGPT == False:
return False
main(id) #muss geändert werden. lieber das script neu starten, als die function neu aufzurufen
else:
return getText
else:
print("es wurde \"beenden\" geschrieben")
def openingMessage(id):
if running == True:
max_loc, max_val = lookingForPixel("ButtonNachricht")
counter = 0
while max_val < 0.80 and counter <= 10:
max_loc, max_val = lookingForPixel("ButtonNachricht")
print(max_val)
time.sleep(0.5)
#counter += 1 #wurde erstmal deaktiviert, da es zu Problemen führen kann
if counter >= 10:
greatReset(id)
x, y = max_loc
py.moveTo(x+40, y+40)
time.sleep(timestop)
py.click()
else:
print("es wurde \"beenden\" geschrieben")
def schreibenDerNachricht(newText, id):
if running == True:
max_loc, max_val = lookingForPixel("IhreNachricht")
count = 0
while max_val < 0.80 and count <= 7:
max_loc, max_val = lookingForPixel("IhreNachricht")
print(max_val)
count += 1
time.sleep(1)
if count >= 7:
print("es scheint so, dass der Button deaktiviert ist, like wird vergeben und die Seite wird ignoriert")
max_loc, max_val = lookingForPixel("schliessenPlus")
if max_val > 0.90:
py.click(max_loc)
return False
else:
py.moveTo(max_loc[0]+100, max_loc[1]+100)
time.sleep(timestop)
py.click()
time.sleep(timestop)
keyboard.press_and_release("ctrl+a")
time.sleep(timestop)
clipboard.copy(newText)
time.sleep(timestop)
keyboard.press_and_release("ctrl+v")
time.sleep(timestop)
py.click(max_loc[0], max_loc[1])
time.sleep(timestop)
py.scroll(-1500)
time.sleep(timestop)
counter = 0
max_loc, max_val = lookingForPixel("absender")
while max_val < 0.80 and counter <= 10:
max_loc, max_val = lookingForPixel("absender")
print(max_val)
time.sleep(0.5)
#counter += 1 #wurde erstmal deaktiviert, da es zu Problemen führen kann
if counter >= 10:
greatReset(id)
py.moveTo(max_loc[0], max_loc[1])
time.sleep(timestop)
py.click() #clickt auf den Absender
for i in range(8):
time.sleep(1)
beenden()
keyboard.press_and_release("alt+left")
return True
else:
print("es wurde \"beenden\" geschrieben")
def antoherCheckForAvail():
if running == True:
max_loc, max_val = lookingForPixel("merken")
time.sleep(timestop)
py.moveTo(max_loc[0], max_loc[1])
time.sleep(timestop)
py.click()
time.sleep(timestop)
return False
else:
print("es wurde \"beenden\" geschrieben")
def schreibenDerBewerbung(id):
if running == True:
#sicher gehen, dass die Seite funktioniert
nextNewStatusBewerbung = siteIsLoading(id, 1) #sicher gehen, dass die Seite geladen wurde und alle Elemente mit dem menschlichen Auge ersichtlich sind
if nextNewStatusBewerbung == False:
return "deaktivieren"
statusTyp = checkSite() #checkt, ob die Seite aktiv ist oder deaktiviert wurde
if statusTyp == False:
return False
max_loc, max_val = lookingForPixel("ButtonNachricht")
while max_val < 0.80:
max_loc, max_val = lookingForPixel("ButtonNachricht")
x, y = max_loc
statusTypSecond= checkIfNoPicture() #checkt, ob ein Bild vorhanden ist, um die Position des Textes zu ermitteln
#2026 911,
if statusTypSecond == True:
print("Status Typ ist KEINBILD")
py.moveTo(x+500, y-78)
print(x+500, y-90)
time.sleep(timestop)
for i in range(3):
py.click()
keyboard.press_and_release("ctrl+c")
time.sleep(0.5)
nameOrCompany = clipboard.paste()
py.scroll(-500)
ersteAdresse, zweiteAdresse = findingAdress(id) #if Status == False, dann ist die Adresse nicht vorhanden
elif statusTypSecond == False:
print("Status Typ ist BILD")
py.moveTo(x+500, y-130)
time.sleep(timestop)
for i in range(3):
py.click()
keyboard.press_and_release("ctrl+c")
time.sleep(0.5)
nameOrCompany = clipboard.paste()
py.scroll(-500)
ersteAdresse, zweiteAdresse = findingAdress(id) #if Status == False, dann ist die Adresse nicht vorhanden
newText = writingMessageViaGPT(ersteAdresse, zweiteAdresse, nameOrCompany, id)
if newText == False:
return False
py.scroll(500)
openingMessage(id)
time.sleep(timestop)
statusDeak= schreibenDerNachricht(newText,id)
if statusDeak == False:
antoherCheckForAvail()
keyboard.press_and_release("alt+left")
else:
pass
else:
print("es wurde \"beenden\" geschrieben")
bot = telebot.TeleBot(token=Your_API_KEY_Tele)
test = "Start"
@bot.message_handler(func=lambda message: "Start" in message.text or "start" in message.text or "START" in message.text or "beenden" in message.text)
def greet(message):
print("es funktioniert")
if message.text == "beenden":
bot.send_message(message.chat.id, "Damit hast du nun den Bot beendet.")
global running
running = False
if message.text == "Start" or message.text == "start" or message.text == "START":
running = True
bot.send_message(message.chat.id, "Damit hast du nun den Bot gestartet.")
global originalerUrl
originalerUrl= premiumOrNot1(premiumOrNot) #wird rausgenommen, sobald es öffentlich ist, außer es wird auch für nicht premium user verfügbar sein
main(message.chat.id)
def check_captcha(message, identification):
print("telegram")
time.sleep(5)
print("telegram nochmal")
if message.text == "done":
# Captcha was solved, continue with the script
bot.send_message(message.chat.id, "Script execution continued.")
if identification == 0:
main(message.chat.id)
elif identification == 1:
schreibenDerBewerbung(message.chat.id)
else:
# Captcha was not solved, check the next message again
bot.send_message(message.chat.id, "Please solve the captcha and write 'done'")
bot.register_next_step_handler(message, check_captcha)
bot.polling()
| [
"Bitte verfassen Sie mir eine umwerfende Bewerbung um eine Wohnung. Sollten Informationen wie der zweite Teil der Adresse fehlen, sollte die Bewerbung so formuliert sein, dass sie immer noch mit den verfügbaren Daten arbeitet. Beginnen Sie die Bewerbung mit \"Sehr geehrte...\" oder \"Sehr geehrter...\" und verwenden Sie die gegebenen Informationen, die mir zur Verfügung stehen: Stadt und Postleitzahl der Adresse, Straßenname und Hausnummer (falls vorhanden) und den Namen oder die Firma des Vermieters oder der Kontaktperson. Informationen, die nicht verfügbar sind, sollten in der Bewerbung nicht durch Platzhalter wie '[]' repräsentiert werden.: PLACEHOLDER",
"<built-in function id>"
] |
2024-01-10 | mindmeand/serverless-app | resources~consultation.py | from datetime import datetime
from config import Config
from flask import request
from flask_jwt_extended import create_access_token, get_jwt, get_jwt_identity, jwt_required
from flask_restful import Resource
from mysql_connection import get_connection
from mysql.connector import Error
from email_validator import validate_email, EmailNotValidError
from utils import check_password, hash_password
import openai
# chat-gpt
class ConsultationResource(Resource) :
# 고민 상담 API ( 질문과 응답을 DB에 저장 )
@jwt_required()
def post(self) :
# 상담 기능
userId = get_jwt_identity()
data = request.get_json()
content = data["question"]
type = data["type"]
openai.api_key = Config.openAIKey
# 유능하고 친절한 고민상담가
if type == 0:
system_message = "You are a competent and kind trouble counselor who listens to people's concerns and provides helpful advice."
# 객관적이고 냉철한 고민상담가
elif type == 1:
system_message = "You are an objective and cool-headed trouble counselor who listens to people's concerns and provides rational advice."
# 편안한 친구같은 고민상담가
else: # counselor_type == 2
system_message = "You are a comforting friend-like trouble counselor who listens to people's concerns and provides warm and supportive advice."
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": system_message},
{"role": "user", "content": content+" 한국어 210자 정도로 답변해줘. "}
]
)
print(completion.choices[0].message['content'].strip())
response_message = completion.choices[0].message['content'].strip()
# DB에 저장
try :
connection = get_connection()
connection.begin()
query = '''insert into consultation
(userId,question,answer,type)
values
(%s,%s,%s,%s)'''
record = (userId,content,response_message,type)
cursor = connection.cursor()
cursor.execute(query,record)
connection.commit()
except Error as e :
connection.rollback()
print(e)
return{'error':str(e)},500
finally:
cursor.close()
connection.close()
return {'result':'success','question' : content,'answer': response_message },200
# 질문 히스토리 가져오기
class ConsultationHistoryResource(Resource):
@jwt_required()
def get(self) :
userId = get_jwt_identity()
try :
connection = get_connection()
query = '''select * from consultation
where userId=%s
order by createdAt desc;'''
cursor = connection.cursor(dictionary=True)
record = (userId,)
cursor.execute(query,record)
resultList = cursor.fetchall()
print(resultList)
i = 0
for row in resultList :
resultList[i]['createdAt']=row['createdAt'].isoformat()
i = i+1
except Error as e:
print(e)
return{"result" : "fail", "error" : str(e)}, 500
finally:
cursor.close()
connection.close()
return {"result" : "success", "result" : resultList,"count":len(resultList)} ,200
# 히스토리 삭제
class DeleteHistoryResource(Resource):
@jwt_required()
def delete(self, id) :
userId = get_jwt_identity()
try :
connection = get_connection()
connection.begin()
query = '''delete from consultation
where userId = %s and id=%s;'''
cursor = connection.cursor()
record = (userId,id)
cursor.execute(query,record)
connection.commit()
except Error as e:
connection.rollback()
print(e)
return{"result" : "fail", "error" : str(e)}, 500
finally:
cursor.close()
connection.close()
return {"result" : "success"} ,200
# 검색 기능
class ConsultationSearchResource(Resource):
@jwt_required()
def get(self):
userId = get_jwt_identity()
keyword = request.args.get('keyword')
try:
connection = get_connection()
query = '''SELECT * FROM consultation
WHERE userId = %s AND (question LIKE %s OR answer LIKE %s)
ORDER BY createdAt DESC;'''
keyword_pattern = f'%{keyword}%'
cursor = connection.cursor(dictionary=True)
record=(userId, keyword_pattern, keyword_pattern)
cursor.execute(query, record)
search_results = cursor.fetchall()
for idx, row in enumerate(search_results):
search_results[idx]['createdAt'] = row['createdAt'].isoformat()
except Error as e:
print(e)
return {"result": "fail", "error": str(e)}, 500
finally:
cursor.close()
connection.close()
return {"result": "success", "searchResults": search_results, "count": len(search_results)}, 200
| [
"PLACEHOLDER 한국어 210자 정도로 답변해줘. "
] |
2024-01-10 | glee0413/agentsoft | src~service~agents~agent_coder.py | from modules.agent import LLMAgent
from modules.message import Message
from langchain.prompts import ChatPromptTemplate
from prompt.dev_prompt import python_coder_template
from config.constant import ProfessionType
class CoderAgent(LLMAgent):
def __init__(self, name):
super().__init__(name,profession=ProfessionType.PT_EXPERT_PYTHON)
self.python_hat = ChatPromptTemplate.from_template(python_coder_template)
async def Conclude(self,content:str):
# 总结的函数
prompt_value = {'demand':content}
response = await self.llm.ainvoke(python_coder_template,prompt_value)
return response
def stop(self):
return
def stop(self):
# 停止Agent
pass
def test_llmagent():
agent = CoderAgent('Python Expert')
agent.launch()
pass
if __name__ == "__main__":
test_llmagent() | [
"{'demand': PLACEHOLDER}"
] |
2024-01-10 | glee0413/agentsoft | src~modules~model_center.py | from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts import ChatPromptTemplate,PromptTemplate
from langchain.vectorstores import DocArrayInMemorySearch
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from pydantic_settings import BaseSettings
from langchain.chat_models import QianfanChatEndpoint
from langchain.llms import QianfanLLMEndpoint
from langchain_core.output_parsers import StrOutputParser
from langchain.chains import LLMChain
import os
import asyncio
from loguru import logger
class BaiduConfig(BaseSettings):
QIANFAN_AK: str
QIANFAN_SK: str
class Config:
env_file = ".env_baidu"
class ModelCenter:
def __init__(self) -> None:
self.llm_model = {}
self.chat_model = {}
self.llm_model['qianfan'] = self.qianfan()
def qianfan(self):
self.baidu_config = BaiduConfig()
os.environ["QIANFAN_AK"] = self.baidu_config.QIANFAN_AK
os.environ["QIANFAN_SK"] = self.baidu_config.QIANFAN_SK
qianfan_llm = QianfanLLMEndpoint(streaming=True)
return qianfan_llm
def ask(self,question:str, model = 'qianfan'):
answer = self.llm_model['qianfan'](question)
return answer
async def aask(self,question:str, model = 'qianfan'):
answer = await self.llm_model['qianfan'].agenerate(prompts=[question])
#print(answer)
# print(type(answer))
# print(answer)
#output_parser = StrOutputParser()
answer_str = answer.generations[0][0].text
print('parse:',answer.generations[0][0].text)
return answer_str
async def ainvoke(self,prompt_template:str,prompt_value:dict,model = 'qianfan'):
output_parser = StrOutputParser()
prompt = PromptTemplate.from_template(prompt_template)
chain = prompt | self.llm_model[model] | output_parser
reply_content = await chain.ainvoke(prompt_value)
# reply_content = output_parser.invoke(message)
logger.info(f'prompt:{prompt}\n value:{prompt_value}\n reply:{reply_content}')
# prompt_template = PromptTemplate.from_template(prompt)
# chain = LLMChain(llm=self.llm_model[model], prompt=prompt)
# response = await chain.arun(prompt_value)
#return response
def test_modelmodel():
model_center = ModelCenter()
# answer = model_center.ask('请介绍一下鲁迅先生的简历')
answer = model_center.aask('请介绍一下鲁迅先生的简历')
asyncio.get_event_loop().run_until_complete()
return
async def async_test():
model_center = ModelCenter()
# model_center.aask('请介绍一下鲁迅先生的简历')
task1 = asyncio.create_task(model_center.aask('请介绍一下鲁迅先生的简历'))
#task2 = asyncio.create_task(model_center.aask('请介绍一下周树人先生的简历'))
#await asyncio.gather(task1, task2)
await asyncio.gather(task1)
async def chain_test():
prompt_template = """
Given the user's name, write them a personalized greeting.
User's name: {name}
Your response:
"""
prompt_value = {'name':'孙悟空'}
model_center = ModelCenter()
task1 = asyncio.create_task(model_center.invoke(prompt_template,prompt_value))
await asyncio.gather(task1)
if __name__ == "__main__":
#test_modelmodel()
asyncio.get_event_loop().run_until_complete(chain_test())
#asyncio.run(async_test()) | [
"{'name': '孙悟空'}",
"\nGiven the user's name, write them a personalized greeting. \n\nUser's name: {name}\n\nYour response:\n"
] |
2024-01-10 | glee0413/agentsoft | src~component~LLM~LLMAdapter.py | import langchain
class LLMAdapter:
_registered_models = {} # 用于存储注册的模型
def __init__(self, config_path):
self.config_path = config_path
self.selected_model = None
self.load_config()
self.register_models()
def load_config(self,config):
# 从配置文件加载配置,选择要使用的模型
# 在这里读取配置的代码...
pass
def register_models(self):
# 注册所有可用的LLM模型
for name, cls in self._registered_models.items():
# 利用langchain的注册功能自动注册模型
langchain.register(cls, name)
def select_model(self, model_name):
# 选择要使用的模型
# 在这里选择模型的代码...
pass
def generate_text(self, input_text):
# 通过选定的模型生成文本
# 在这里调用选定模型的代码...
pass
# 装饰器用于注册模型
@classmethod
def register_model(cls, name):
def decorator(model_cls):
cls._registered_models[name] = model_cls
return model_cls
return decorator
def main():
adapter = LLMAdapter("config_file.yaml")
adapter.select_model("openai-gpt3")
output_text = adapter.generate_text("你的输入文本")
print(output_text)
if __name__ == "__main__":
main() | [] |
2024-01-10 | glee0413/agentsoft | src~component~LLM~LLM_center.py | from langchain.chat_models import QianfanChatEndpoint
from langchain.schema import HumanMessage
from pydantic_settings import BaseSettings
class BaiduConfig(BaseSettings):
QIANFAN_AK: str
QIANFAN_SK: str
class Config:
env_file = ".env_baidu"
class LLMCenter():
def __init__(self) -> None:
self.baidu_config = BaiduConfig()
pass
async def agentrate(message:str):
chatLLM = QianfanChatEndpoint(streaming=True,)
# res = chatLLM.stream(streaming=True)
# for r in res:
# print(f"chat resp: {r}\n")
async def run_aio_generate():
resp = await chatLLM.agenerate(
messages=[[HumanMessage(content=message)]]
)
print(resp)
print('call run_aio_generate')
await run_aio_generate() | [] |
2024-01-10 | carrabre/embedchain | embedchain~apps~Llama2App.py | import os
from langchain.llms import Replicate
from embedchain.config import AppConfig
from embedchain.embedchain import EmbedChain
class Llama2App(EmbedChain):
"""
The EmbedChain Llama2App class.
Has two functions: add and query.
adds(data_type, url): adds the data from the given URL to the vector db.
query(query): finds answer to the given query using vector database and LLM.
"""
def __init__(self, config: AppConfig = None):
"""
:param config: AppConfig instance to load as configuration. Optional.
"""
if "REPLICATE_API_TOKEN" not in os.environ:
raise ValueError("Please set the REPLICATE_API_TOKEN environment variable.")
if config is None:
config = AppConfig()
super().__init__(config)
def get_llm_model_answer(self, prompt, config: AppConfig = None):
# TODO: Move the model and other inputs into config
llm = Replicate(
model="a16z-infra/llama13b-v2-chat:df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5",
input={"temperature": 0.75, "max_length": 500, "top_p": 1},
)
return llm(prompt)
| [] |
2024-01-10 | carrabre/embedchain | embedchain~loaders~docx_file.py | from langchain.document_loaders import Docx2txtLoader
class DocxFileLoader:
def load_data(self, url):
"""Load data from a .docx file."""
loader = Docx2txtLoader(url)
output = []
data = loader.load()
content = data[0].page_content
meta_data = data[0].metadata
meta_data["url"] = "local"
output.append({"content": content, "meta_data": meta_data})
return output
| [] |
2024-01-10 | carrabre/embedchain | embedchain~loaders~pdf_file.py | from langchain.document_loaders import PyPDFLoader
from embedchain.utils import clean_string
class PdfFileLoader:
def load_data(self, url):
"""Load data from a PDF file."""
loader = PyPDFLoader(url)
output = []
pages = loader.load_and_split()
if not len(pages):
raise ValueError("No data found")
for page in pages:
content = page.page_content
content = clean_string(content)
meta_data = page.metadata
meta_data["url"] = url
output.append(
{
"content": content,
"meta_data": meta_data,
}
)
return output
| [] |
2024-01-10 | carrabre/embedchain | embedchain~embedchain.py | import logging
import os
from chromadb.errors import InvalidDimensionException
from dotenv import load_dotenv
from langchain.docstore.document import Document
from langchain.memory import ConversationBufferMemory
from embedchain.config import AddConfig, ChatConfig, QueryConfig
from embedchain.config.apps.BaseAppConfig import BaseAppConfig
from embedchain.config.QueryConfig import DOCS_SITE_PROMPT_TEMPLATE
from embedchain.data_formatter import DataFormatter
load_dotenv()
ABS_PATH = os.getcwd()
DB_DIR = os.path.join(ABS_PATH, "db")
memory = ConversationBufferMemory()
class EmbedChain:
def __init__(self, config: BaseAppConfig):
"""
Initializes the EmbedChain instance, sets up a vector DB client and
creates a collection.
:param config: BaseAppConfig instance to load as configuration.
"""
self.config = config
self.db_client = self.config.db.client
self.collection = self.config.db.collection
self.user_asks = []
self.is_docs_site_instance = False
self.online = False
def add(self, data_type, url, metadata=None, config: AddConfig = None):
"""
Adds the data from the given URL to the vector db.
Loads the data, chunks it, create embedding for each chunk
and then stores the embedding to vector database.
:param data_type: The type of the data to add.
:param url: The URL where the data is located.
:param metadata: Optional. Metadata associated with the data source.
:param config: Optional. The `AddConfig` instance to use as configuration
options.
"""
if config is None:
config = AddConfig()
data_formatter = DataFormatter(data_type, config)
self.user_asks.append([data_type, url, metadata])
self.load_and_embed(data_formatter.loader, data_formatter.chunker, url, metadata)
if data_type in ("docs_site",):
self.is_docs_site_instance = True
def add_local(self, data_type, content, metadata=None, config: AddConfig = None):
"""
Adds the data you supply to the vector db.
Loads the data, chunks it, create embedding for each chunk
and then stores the embedding to vector database.
:param data_type: The type of the data to add.
:param content: The local data. Refer to the `README` for formatting.
:param metadata: Optional. Metadata associated with the data source.
:param config: Optional. The `AddConfig` instance to use as
configuration options.
"""
if config is None:
config = AddConfig()
data_formatter = DataFormatter(data_type, config)
self.user_asks.append([data_type, content])
self.load_and_embed(
data_formatter.loader,
data_formatter.chunker,
content,
metadata,
)
def load_and_embed(self, loader, chunker, src, metadata=None):
"""
Loads the data from the given URL, chunks it, and adds it to database.
:param loader: The loader to use to load the data.
:param chunker: The chunker to use to chunk the data.
:param src: The data to be handled by the loader. Can be a URL for
remote sources or local content for local loaders.
:param metadata: Optional. Metadata associated with the data source.
"""
embeddings_data = chunker.create_chunks(loader, src)
documents = embeddings_data["documents"]
metadatas = embeddings_data["metadatas"]
ids = embeddings_data["ids"]
# get existing ids, and discard doc if any common id exist.
where = {"app_id": self.config.id} if self.config.id is not None else {}
# where={"url": src}
existing_docs = self.collection.get(
ids=ids,
where=where, # optional filter
)
existing_ids = set(existing_docs["ids"])
if len(existing_ids):
data_dict = {id: (doc, meta) for id, doc, meta in zip(ids, documents, metadatas)}
data_dict = {id: value for id, value in data_dict.items() if id not in existing_ids}
if not data_dict:
print(f"All data from {src} already exists in the database.")
return
ids = list(data_dict.keys())
documents, metadatas = zip(*data_dict.values())
# Add app id in metadatas so that they can be queried on later
if self.config.id is not None:
metadatas = [{**m, "app_id": self.config.id} for m in metadatas]
# FIXME: Fix the error handling logic when metadatas or metadata is None
metadatas = metadatas if metadatas else []
metadata = metadata if metadata else {}
chunks_before_addition = self.count()
# Add metadata to each document
metadatas_with_metadata = [{**meta, **metadata} for meta in metadatas]
self.collection.add(documents=documents, metadatas=list(metadatas_with_metadata), ids=ids)
print((f"Successfully saved {src}. New chunks count: " f"{self.count() - chunks_before_addition}"))
def _format_result(self, results):
return [
(Document(page_content=result[0], metadata=result[1] or {}), result[2])
for result in zip(
results["documents"][0],
results["metadatas"][0],
results["distances"][0],
)
]
def get_llm_model_answer(self):
"""
Usually implemented by child class
"""
raise NotImplementedError
def retrieve_from_database(self, input_query, config: QueryConfig):
"""
Queries the vector database based on the given input query.
Gets relevant doc based on the query
:param input_query: The query to use.
:param config: The query configuration.
:return: The content of the document that matched your query.
"""
try:
where = {"app_id": self.config.id} if self.config.id is not None else {} # optional filter
result = self.collection.query(
query_texts=[
input_query,
],
n_results=config.number_documents,
where=where,
)
except InvalidDimensionException as e:
raise InvalidDimensionException(
e.message()
+ ". This is commonly a side-effect when an embedding function, different from the one used to add the embeddings, is used to retrieve an embedding from the database." # noqa E501
) from None
results_formatted = self._format_result(result)
contents = [result[0].page_content for result in results_formatted]
return contents
def _append_search_and_context(self, context, web_search_result):
return f"{context}\nWeb Search Result: {web_search_result}"
def generate_prompt(self, input_query, contexts, config: QueryConfig, **kwargs):
"""
Generates a prompt based on the given query and context, ready to be
passed to an LLM
:param input_query: The query to use.
:param contexts: List of similar documents to the query used as context.
:param config: Optional. The `QueryConfig` instance to use as
configuration options.
:return: The prompt
"""
context_string = (" | ").join(contexts)
web_search_result = kwargs.get("web_search_result", "")
if web_search_result:
context_string = self._append_search_and_context(context_string, web_search_result)
if not config.history:
prompt = config.template.substitute(context=context_string, query=input_query)
else:
prompt = config.template.substitute(context=context_string, query=input_query, history=config.history)
return prompt
def get_answer_from_llm(self, prompt, config: ChatConfig):
"""
Gets an answer based on the given query and context by passing it
to an LLM.
:param query: The query to use.
:param context: Similar documents to the query used as context.
:return: The answer.
"""
return self.get_llm_model_answer(prompt, config)
def access_search_and_get_results(self, input_query):
from langchain.tools import DuckDuckGoSearchRun
search = DuckDuckGoSearchRun()
logging.info(f"Access search to get answers for {input_query}")
return search.run(input_query)
def query(self, input_query, config: QueryConfig = None, dry_run=False):
"""
Queries the vector database based on the given input query.
Gets relevant doc based on the query and then passes it to an
LLM as context to get the answer.
:param input_query: The query to use.
:param config: Optional. The `QueryConfig` instance to use as
configuration options.
:param dry_run: Optional. A dry run does everything except send the resulting prompt to
the LLM. The purpose is to test the prompt, not the response.
You can use it to test your prompt, including the context provided
by the vector database's doc retrieval.
The only thing the dry run does not consider is the cut-off due to
the `max_tokens` parameter.
:return: The answer to the query.
"""
if config is None:
config = QueryConfig()
if self.is_docs_site_instance:
config.template = DOCS_SITE_PROMPT_TEMPLATE
config.number_documents = 5
k = {}
if self.online:
k["web_search_result"] = self.access_search_and_get_results(input_query)
contexts = self.retrieve_from_database(input_query, config)
prompt = self.generate_prompt(input_query, contexts, config, **k)
logging.info(f"Prompt: {prompt}")
if dry_run:
return prompt
answer = self.get_answer_from_llm(prompt, config)
if isinstance(answer, str):
logging.info(f"Answer: {answer}")
return answer
else:
return self._stream_query_response(answer)
def _stream_query_response(self, answer):
streamed_answer = ""
for chunk in answer:
streamed_answer = streamed_answer + chunk
yield chunk
logging.info(f"Answer: {streamed_answer}")
def chat(self, input_query, config: ChatConfig = None, dry_run=False):
"""
Queries the vector database on the given input query.
Gets relevant doc based on the query and then passes it to an
LLM as context to get the answer.
Maintains the whole conversation in memory.
:param input_query: The query to use.
:param config: Optional. The `ChatConfig` instance to use as
configuration options.
:param dry_run: Optional. A dry run does everything except send the resulting prompt to
the LLM. The purpose is to test the prompt, not the response.
You can use it to test your prompt, including the context provided
by the vector database's doc retrieval.
The only thing the dry run does not consider is the cut-off due to
the `max_tokens` parameter.
:return: The answer to the query.
"""
if config is None:
config = ChatConfig()
if self.is_docs_site_instance:
config.template = DOCS_SITE_PROMPT_TEMPLATE
config.number_documents = 5
k = {}
if self.online:
k["web_search_result"] = self.access_search_and_get_results(input_query)
contexts = self.retrieve_from_database(input_query, config, **k)
global memory
chat_history = memory.load_memory_variables({})["history"]
if chat_history:
config.set_history(chat_history)
prompt = self.generate_prompt(input_query, contexts, config, **k)
logging.info(f"Prompt: {prompt}")
if dry_run:
return prompt
answer = self.get_answer_from_llm(prompt, config)
memory.chat_memory.add_user_message(input_query)
if isinstance(answer, str):
memory.chat_memory.add_ai_message(answer)
logging.info(f"Answer: {answer}")
return answer
else:
# this is a streamed response and needs to be handled differently.
return self._stream_chat_response(answer)
def _stream_chat_response(self, answer):
streamed_answer = ""
for chunk in answer:
streamed_answer = streamed_answer + chunk
yield chunk
memory.chat_memory.add_ai_message(streamed_answer)
logging.info(f"Answer: {streamed_answer}")
def count(self):
"""
Count the number of embeddings.
:return: The number of embeddings.
"""
return self.collection.count()
def reset(self):
"""
Resets the database. Deletes all embeddings irreversibly.
`App` has to be reinitialized after using this method.
"""
self.db_client.reset()
| [] |
2024-01-10 | carrabre/embedchain | embedchain~apps~CustomApp.py | import logging
from typing import List
from langchain.schema import BaseMessage
from embedchain.config import ChatConfig, CustomAppConfig
from embedchain.embedchain import EmbedChain
from embedchain.models import Providers
class CustomApp(EmbedChain):
"""
The custom EmbedChain app.
Has two functions: add and query.
adds(data_type, url): adds the data from the given URL to the vector db.
query(query): finds answer to the given query using vector database and LLM.
dry_run(query): test your prompt without consuming tokens.
"""
def __init__(self, config: CustomAppConfig = None):
"""
:param config: Optional. `CustomAppConfig` instance to load as configuration.
:raises ValueError: Config must be provided for custom app
"""
if config is None:
raise ValueError("Config must be provided for custom app")
self.provider = config.provider
if config.provider == Providers.GPT4ALL:
from embedchain import OpenSourceApp
# Because these models run locally, they should have an instance running when the custom app is created
self.open_source_app = OpenSourceApp(config=config.open_source_app_config)
super().__init__(config)
def set_llm_model(self, provider: Providers):
self.provider = provider
if provider == Providers.GPT4ALL:
raise ValueError(
"GPT4ALL needs to be instantiated with the model known, please create a new app instance instead"
)
def get_llm_model_answer(self, prompt, config: ChatConfig):
# TODO: Quitting the streaming response here for now.
# Idea: https://gist.github.com/jvelezmagic/03ddf4c452d011aae36b2a0f73d72f68
if config.stream:
raise NotImplementedError(
"Streaming responses have not been implemented for this model yet. Please disable."
)
try:
if self.provider == Providers.OPENAI:
return CustomApp._get_openai_answer(prompt, config)
if self.provider == Providers.ANTHROPHIC:
return CustomApp._get_athrophic_answer(prompt, config)
if self.provider == Providers.VERTEX_AI:
return CustomApp._get_vertex_answer(prompt, config)
if self.provider == Providers.GPT4ALL:
return self.open_source_app._get_gpt4all_answer(prompt, config)
except ImportError as e:
raise ImportError(e.msg) from None
@staticmethod
def _get_openai_answer(prompt: str, config: ChatConfig) -> str:
from langchain.chat_models import ChatOpenAI
logging.info(vars(config))
chat = ChatOpenAI(
temperature=config.temperature,
model=config.model or "gpt-3.5-turbo",
max_tokens=config.max_tokens,
streaming=config.stream,
)
if config.top_p and config.top_p != 1:
logging.warning("Config option `top_p` is not supported by this model.")
messages = CustomApp._get_messages(prompt)
return chat(messages).content
@staticmethod
def _get_athrophic_answer(prompt: str, config: ChatConfig) -> str:
from langchain.chat_models import ChatAnthropic
chat = ChatAnthropic(temperature=config.temperature, model=config.model)
if config.max_tokens and config.max_tokens != 1000:
logging.warning("Config option `max_tokens` is not supported by this model.")
messages = CustomApp._get_messages(prompt)
return chat(messages).content
@staticmethod
def _get_vertex_answer(prompt: str, config: ChatConfig) -> str:
from langchain.chat_models import ChatVertexAI
chat = ChatVertexAI(temperature=config.temperature, model=config.model, max_output_tokens=config.max_tokens)
if config.top_p and config.top_p != 1:
logging.warning("Config option `top_p` is not supported by this model.")
messages = CustomApp._get_messages(prompt)
return chat(messages).content
@staticmethod
def _get_messages(prompt: str) -> List[BaseMessage]:
from langchain.schema import HumanMessage, SystemMessage
return [SystemMessage(content="You are a helpful assistant."), HumanMessage(content=prompt)]
def _stream_llm_model_response(self, response):
"""
This is a generator for streaming response from the OpenAI completions API
"""
for line in response:
chunk = line["choices"][0].get("delta", {}).get("content", "")
yield chunk
| [
"You are a helpful assistant."
] |
2024-01-10 | carrabre/embedchain | embedchain~loaders~youtube_video.py | from langchain.document_loaders import YoutubeLoader
from embedchain.utils import clean_string
class YoutubeVideoLoader:
def load_data(self, url):
"""Load data from a Youtube video."""
loader = YoutubeLoader.from_youtube_url(url, add_video_info=True)
doc = loader.load()
output = []
if not len(doc):
raise ValueError("No data found")
content = doc[0].page_content
content = clean_string(content)
meta_data = doc[0].metadata
meta_data["url"] = url
output.append(
{
"content": content,
"meta_data": meta_data,
}
)
return output
| [] |
2024-01-10 | ttwj/open-carbon-viz | fetch_and_rate.py | import os
import requests
import pandas as pd
import json
import time
import urllib.request
from montydb import MontyClient
import asyncio
import openai
from PyPDF2 import PdfReader
def is_pdf(file_path):
try:
with open(file_path, "rb") as file:
pdf = PdfReader(file)
_ = len(pdf.pages)
print(_)
return True
except Exception as e:
print(e)
return False
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
def remove_surrogates(text):
return text.encode('utf-8', 'ignore').decode('utf-8')
import re
def extract_json_values_using_regex(input_string):
json_pattern = r'<json>(.*?)</json>'
matches = re.findall(json_pattern, input_string, re.DOTALL)
if matches:
json_str = matches[-1] # Get the last match in case of multiple occurrences
json_data = json.loads(json_str)
return json_data
else:
return None
client = MontyClient("Verra.db")
db = client["Verra"]
project_reviews = db["ProjectReviews"]
anthropic = Anthropic(api_key='XXXX')
openai.api_key = 'XXX'
data = pd.read_csv("Verra_Projects.csv")
status_filter = ["Under Validation", "Registration requested", "Under development",
"Registration and verification approval requested"]
filtered_data = data[data.Status.isin(status_filter)]
def print_first_10_lines(input_string):
lines = input_string.split('\n')[:3]
for line in lines:
print(line)
def extract_text_from_pdf(file_path):
try:
with open(file_path, "rb") as file:
pdf = PdfReader(file)
num_pages = len(pdf.pages)
text = " ".join(page.extract_text() for page in pdf.pages)
return text
except Exception as e:
print(f"Error: {e}")
return None
for _, row in filtered_data.iterrows():
project_id = row['ID']
pdd_file = f"PDD/{project_id}.pdf"
print("Preparing to fetch Project " + str(project_id))
if os.path.isfile(pdd_file):
print(f"File {pdd_file} already exists, skipping...")
continue
time.sleep(3)
response = requests.get(f"https://registry.verra.org/uiapi/resource/resourceSummary/{project_id}")
if response.status_code != 200:
print(f"Warning: Could not fetch project details for project {project_id}")
continue
openaiResponse = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": 'You are an expert analyst in the voluntary carbon markets, with 30+ experience in Verra. You will be given a JSON. Your task is to extract the URL for the most updated version of the Project Design Document (PDD), by analysing the "documentType", "documentName" & "uploadDate"\nEXTREMELY IMPORTANT: YOU WILL ONLY OUTPUT URL. NO COMMENTS. \nSTART WITH: "https://"'},
{"role": "user", "content": str(response.json())}
],
temperature=0,
max_tokens=512,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
#print("Response from OpenAI")
#print(openaiResponse)
pdd_url = openaiResponse['choices'][0]['message']['content']
if pdd_url.startswith("https://"):
urllib.request.urlretrieve(pdd_url, pdd_file)
if is_pdf(pdd_file) is False:
print(f"Warning: Downloaded file PDD/{project_id}.pdf is not a PDF, skipping")
continue
pdd_content = remove_surrogates(extract_text_from_pdf(pdd_file))
print_first_10_lines(pdd_content)
stream = anthropic.completions.create(
prompt=f"{HUMAN_PROMPT} You are an expert auditor in the voluntary carbon markets, with over 30+ years of experience in Verra Methodologies. You will be provided with a Project Design Document (PDD).\n\n<document>{pdd_content}<document>\n\nYour task is to critically rate each section of the PDD and score it out of 10. If there are missing items, you should severely downgrade the project. You will be EXTREMELY STRICT with regards to Additionality & Permanance in particular, especially on regulatory surplus.\n\nWhen you reply, please provide your response in JSON format that satisfies the following structure:\n\n```type ProjectReview = {{\n projectDetails: Detail,\n safeguards: Detail,\n applicabilityOfMethodology: Detail,\n projectBoundary: Detail,\n baseline: Detail,\n additionality: Detail,\n emissionReductions: Detail,\n monitoringPlan: Detail,\n implementationStatus?: Detail,\n estimatedEmissionReductions?: Detail,\n monitoring?: Detail,\n quantificationOfEmissionReductions?: Detail,\n overallScore: Detail\n}}\n\ntype Detail = {{\n score: number | string,\n comments: string\n}}\n\n```Please put your JSON response inside the <json></json> XML tags.\n{AI_PROMPT}",
max_tokens_to_sample=2048,
model="claude-2",
stream=True
)
output_str = ""
for completion in stream:
print(completion.completion, end="")
output_str += completion.completion
try:
json_data = extract_json_values_using_regex(output_str)
# If parsing succeeds, the string is valid JSON.
print(json_data)
project_reviews.insert_one({"project_id": project_id, "project_review": json_data})
print("Successfully inserted JSON data.")
except json.JSONDecodeError as e:
# If parsing fails, the string is not valid JSON.
print(f"Error: The output_str is not valid JSON. {e}")
# Optionally, you can log this error or handle it as needed
else:
print(f"Warning: Could not extract URL for project {project_id}")
client.close()
| [
"PLACEHOLDER You are an expert auditor in the voluntary carbon markets, with over 30+ years of experience in Verra Methodologies. You will be provided with a Project Design Document (PDD).\n\n<document>PLACEHOLDER<document>\n\nYour task is to critically rate each section of the PDD and score it out of 10. If there are missing items, you should severely downgrade the project. You will be EXTREMELY STRICT with regards to Additionality & Permanance in particular, especially on regulatory surplus.\n\nWhen you reply, please provide your response in JSON format that satisfies the following structure:\n\n```type ProjectReview = {\n projectDetails: Detail,\n safeguards: Detail,\n applicabilityOfMethodology: Detail,\n projectBoundary: Detail,\n baseline: Detail,\n additionality: Detail,\n emissionReductions: Detail,\n monitoringPlan: Detail,\n implementationStatus?: Detail,\n estimatedEmissionReductions?: Detail,\n monitoring?: Detail,\n quantificationOfEmissionReductions?: Detail,\n overallScore: Detail\n}\n\ntype Detail = {\n score: number | string,\n comments: string\n}\n\n```Please put your JSON response inside the <json></json> XML tags.\nPLACEHOLDER",
"You are an expert analyst in the voluntary carbon markets, with 30+ experience in Verra. You will be given a JSON. Your task is to extract the URL for the most updated version of the Project Design Document (PDD), by analysing the \"documentType\", \"documentName\" & \"uploadDate\"\nEXTREMELY IMPORTANT: YOU WILL ONLY OUTPUT URL. NO COMMENTS. \nSTART WITH: \"https://\""
] |
2024-01-10 | Chubek/DALLE-pytorch | dalle_pytorch~dalle_pytorch.py | from math import log2, sqrt
import torch
from torch import nn, einsum
import torch.nn.functional as F
from axial_positional_embedding import AxialPositionalEmbedding
from einops import rearrange
from dalle_pytorch.vae import OpenAIDiscreteVAE
from dalle_pytorch.vae import VQGanVAE1024
from dalle_pytorch.transformer import Transformer
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def always(val):
def inner(*args, **kwargs):
return val
return inner
def is_empty(t):
return t.nelement() == 0
def masked_mean(t, mask, dim = 1):
t = t.masked_fill(~mask[:, :, None], 0.)
return t.sum(dim = 1) / mask.sum(dim = 1)[..., None]
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# sampling helpers
def top_k(logits, thres = 0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# discrete vae class
class ResBlock(nn.Module):
def __init__(self, chan):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(chan, chan, 3, padding = 1),
nn.ReLU(),
nn.Conv2d(chan, chan, 3, padding = 1),
nn.ReLU(),
nn.Conv2d(chan, chan, 1)
)
def forward(self, x):
return self.net(x) + x
class DiscreteVAE(nn.Module):
def __init__(
self,
image_size = 256,
num_tokens = 512,
codebook_dim = 512,
num_layers = 3,
num_resnet_blocks = 0,
hidden_dim = 64,
channels = 3,
smooth_l1_loss = False,
temperature = 0.9,
straight_through = False,
kl_div_loss_weight = 0.,
normalization = ((0.5,) * 3, (0.5,) * 3)
):
super().__init__()
assert log2(image_size).is_integer(), 'image size must be a power of 2'
assert num_layers >= 1, 'number of layers must be greater than or equal to 1'
has_resblocks = num_resnet_blocks > 0
self.image_size = image_size
self.num_tokens = num_tokens
self.num_layers = num_layers
self.temperature = temperature
self.straight_through = straight_through
self.codebook = nn.Embedding(num_tokens, codebook_dim)
hdim = hidden_dim
enc_chans = [hidden_dim] * num_layers
dec_chans = list(reversed(enc_chans))
enc_chans = [channels, *enc_chans]
dec_init_chan = codebook_dim if not has_resblocks else dec_chans[0]
dec_chans = [dec_init_chan, *dec_chans]
enc_chans_io, dec_chans_io = map(lambda t: list(zip(t[:-1], t[1:])), (enc_chans, dec_chans))
enc_layers = []
dec_layers = []
for (enc_in, enc_out), (dec_in, dec_out) in zip(enc_chans_io, dec_chans_io):
enc_layers.append(nn.Sequential(nn.Conv2d(enc_in, enc_out, 4, stride = 2, padding = 1), nn.ReLU()))
dec_layers.append(nn.Sequential(nn.ConvTranspose2d(dec_in, dec_out, 4, stride = 2, padding = 1), nn.ReLU()))
for _ in range(num_resnet_blocks):
dec_layers.insert(0, ResBlock(dec_chans[1]))
enc_layers.append(ResBlock(enc_chans[-1]))
if num_resnet_blocks > 0:
dec_layers.insert(0, nn.Conv2d(codebook_dim, dec_chans[1], 1))
enc_layers.append(nn.Conv2d(enc_chans[-1], num_tokens, 1))
dec_layers.append(nn.Conv2d(dec_chans[-1], channels, 1))
self.encoder = nn.Sequential(*enc_layers)
self.decoder = nn.Sequential(*dec_layers)
self.loss_fn = F.smooth_l1_loss if smooth_l1_loss else F.mse_loss
self.kl_div_loss_weight = kl_div_loss_weight
# take care of normalization within class
self.normalization = normalization
def norm(self, images):
if not exists(self.normalization):
return images
means, stds = map(lambda t: torch.as_tensor(t).to(images), self.normalization)
means, stds = map(lambda t: rearrange(t, 'c -> () c () ()'), (means, stds))
images = images.clone()
images.sub_(means).div_(stds)
return images
@torch.no_grad()
@eval_decorator
def get_codebook_indices(self, images):
logits = self.forward(images, return_logits = True)
codebook_indices = logits.argmax(dim = 1).flatten(1)
return codebook_indices
def decode(
self,
img_seq
):
image_embeds = self.codebook(img_seq)
b, n, d = image_embeds.shape
h = w = int(sqrt(n))
image_embeds = rearrange(image_embeds, 'b (h w) d -> b d h w', h = h, w = w)
images = self.decoder(image_embeds)
return images
def forward(
self,
img,
return_loss = False,
return_recons = False,
return_logits = False,
temp = None
):
device, num_tokens, image_size, kl_div_loss_weight = img.device, self.num_tokens, self.image_size, self.kl_div_loss_weight
assert img.shape[-1] == image_size and img.shape[-2] == image_size, f'input must have the correct image size {image_size}'
img = self.norm(img)
logits = self.encoder(img)
if return_logits:
return logits # return logits for getting hard image indices for DALL-E training
temp = default(temp, self.temperature)
soft_one_hot = F.gumbel_softmax(logits, tau = temp, dim = 1, hard = self.straight_through)
sampled = einsum('b n h w, n d -> b d h w', soft_one_hot, self.codebook.weight)
out = self.decoder(sampled)
if not return_loss:
return out
# reconstruction loss
recon_loss = self.loss_fn(img, out)
# kl divergence
logits = rearrange(logits, 'b n h w -> b (h w) n')
log_qy = F.log_softmax(logits, dim = -1)
log_uniform = torch.log(torch.tensor([1. / num_tokens], device = device))
kl_div = F.kl_div(log_uniform, log_qy, None, None, 'batchmean', log_target = True)
loss = recon_loss + (kl_div * kl_div_loss_weight)
if not return_recons:
return loss
return loss, out
# main classes
class CLIP(nn.Module):
def __init__(
self,
*,
dim_text = 512,
dim_image = 512,
dim_latent = 512,
num_text_tokens = 10000,
text_enc_depth = 6,
text_seq_len = 256,
text_heads = 8,
num_visual_tokens = 512,
visual_enc_depth = 6,
visual_heads = 8,
visual_image_size = 256,
visual_patch_size = 32,
channels = 3
):
super().__init__()
self.text_emb = nn.Embedding(num_text_tokens, dim_text)
self.text_pos_emb = nn.Embedding(text_seq_len, dim_text)
self.text_transformer = Transformer(causal = False, seq_len = text_seq_len, dim = dim_text, depth = text_enc_depth, heads = text_heads)
self.to_text_latent = nn.Linear(dim_text, dim_latent, bias = False)
assert visual_image_size % visual_patch_size == 0, 'Image dimensions must be divisible by the patch size.'
num_patches = (visual_image_size // visual_patch_size) ** 2
patch_dim = channels * visual_patch_size ** 2
self.visual_patch_size = visual_patch_size
self.to_visual_embedding = nn.Linear(patch_dim, dim_image)
self.visual_pos_emb = nn.Embedding(num_patches, dim_image)
self.visual_transformer = Transformer(causal = False, seq_len = num_patches, dim = dim_image, depth = visual_enc_depth, heads = visual_heads)
self.to_visual_latent = nn.Linear(dim_image, dim_latent, bias = False)
self.temperature = nn.Parameter(torch.tensor(1.))
def forward(
self,
text,
image,
text_mask = None,
return_loss = False
):
b, device, p = text.shape[0], text.device, self.visual_patch_size
text_emb = self.text_emb(text)
text_emb += self.text_pos_emb(torch.arange(text.shape[1], device = device))
image_patches = rearrange(image, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p)
image_emb = self.to_visual_embedding(image_patches)
image_emb += self.visual_pos_emb(torch.arange(image_emb.shape[1], device = device))
enc_text = self.text_transformer(text_emb, mask = text_mask)
enc_image = self.visual_transformer(image_emb)
if exists(text_mask):
text_latents = masked_mean(enc_text, text_mask, dim = 1)
else:
text_latents = enc_text.mean(dim = 1)
image_latents = enc_image.mean(dim = 1)
text_latents = self.to_text_latent(text_latents)
image_latents = self.to_visual_latent(image_latents)
text_latents, image_latents = map(lambda t: F.normalize(t, p = 2, dim = -1), (text_latents, image_latents))
temp = self.temperature.exp()
if not return_loss:
sim = einsum('n d, n d -> n', text_latents, image_latents) * temp
return sim
sim = einsum('i d, j d -> i j', text_latents, image_latents) * temp
labels = torch.arange(b, device = device)
loss = (F.cross_entropy(sim, labels) + F.cross_entropy(sim.t(), labels)) / 2
return loss
# main DALL-E class
class DALLE(nn.Module):
def __init__(
self,
*,
dim,
vae,
num_text_tokens = 10000,
text_seq_len = 256,
depth,
heads = 8,
dim_head = 64,
reversible = False,
attn_dropout = 0.,
ff_dropout = 0,
sparse_attn = False,
attn_types = None
):
super().__init__()
assert isinstance(vae, (DiscreteVAE, OpenAIDiscreteVAE, VQGanVAE1024)), 'vae must be an instance of DiscreteVAE'
image_size = vae.image_size
num_image_tokens = vae.num_tokens
image_fmap_size = (vae.image_size // (2 ** vae.num_layers))
image_seq_len = image_fmap_size ** 2
self.text_emb = nn.Embedding(num_text_tokens, dim)
self.image_emb = nn.Embedding(num_image_tokens, dim)
self.text_pos_emb = nn.Embedding(text_seq_len + 1, dim) # +1 for <bos>
self.image_pos_emb = AxialPositionalEmbedding(dim, axial_shape = (image_fmap_size, image_fmap_size))
self.num_text_tokens = num_text_tokens # for offsetting logits index and calculating cross entropy loss
self.num_image_tokens = num_image_tokens
self.text_seq_len = text_seq_len
self.image_seq_len = image_seq_len
seq_len = text_seq_len + image_seq_len
total_tokens = num_text_tokens + num_image_tokens
self.total_tokens = total_tokens
self.total_seq_len = seq_len
self.vae = vae
self.transformer = Transformer(
dim = dim,
causal = True,
seq_len = seq_len,
depth = depth,
heads = heads,
dim_head = dim_head,
reversible = reversible,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
attn_types = attn_types,
image_fmap_size = image_fmap_size,
sparse_attn = sparse_attn
)
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, self.total_tokens),
)
seq_range = torch.arange(seq_len)
logits_range = torch.arange(total_tokens)
seq_range = rearrange(seq_range, 'n -> () n ()')
logits_range = rearrange(logits_range, 'd -> () () d')
logits_mask = (
((seq_range >= text_seq_len) & (logits_range < num_text_tokens)) |
((seq_range < text_seq_len) & (logits_range >= num_text_tokens))
)
self.register_buffer('logits_mask', logits_mask)
@torch.no_grad()
@eval_decorator
def generate_images(
self,
text,
*,
clip = None,
mask = None,
filter_thres = 0.5,
temperature = 1.
):
vae, text_seq_len, image_seq_len, num_text_tokens = self.vae, self.text_seq_len, self.image_seq_len, self.num_text_tokens
total_len = text_seq_len + image_seq_len
out = text
for cur_len in range(text.shape[1], total_len):
is_image = cur_len >= text_seq_len
text, image = out[:, :text_seq_len], out[:, text_seq_len:]
logits = self(text, image, mask = mask)[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim = -1)
sample = torch.multinomial(probs, 1)
sample -= (num_text_tokens if is_image else 0) # offset sampled token if it is an image token, since logit space is composed of text and then image tokens
out = torch.cat((out, sample), dim=-1)
if out.shape[1] <= text_seq_len:
mask = F.pad(mask, (0, 1), value = True)
text_seq = out[:, :text_seq_len]
img_seq = out[:, -image_seq_len:]
images = vae.decode(img_seq)
if exists(clip):
scores = clip(text_seq, images, return_loss = False)
return images, scores
return images
def forward(
self,
text,
image = None,
mask = None,
return_loss = False
):
assert text.shape[-1] == self.text_seq_len, f'the length {text.shape[-1]} of the text tokens you passed in does not have the correct length ({self.text_seq_len})'
device, total_seq_len = text.device, self.total_seq_len
text = F.pad(text, (1, 0), value = 0) # use padding as <bos>
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
tokens = self.text_emb(text)
tokens += self.text_pos_emb(torch.arange(text.shape[1], device = device))
seq_len = tokens.shape[1]
if exists(image) and not is_empty(image):
is_raw_image = len(image.shape) == 4
if is_raw_image:
image_size = self.vae.image_size
assert tuple(image.shape[1:]) == (3, image_size, image_size), f'invalid image of dimensions {image.shape} passed in during training'
image = self.vae.get_codebook_indices(image)
image_len = image.shape[1]
image_emb = self.image_emb(image)
image_emb += self.image_pos_emb(image_emb)
tokens = torch.cat((tokens, image_emb), dim = 1)
seq_len += image_len
if exists(mask):
mask = F.pad(mask, (0, image_emb.shape[1]), value = True)
# when training, if the length exceeds the total text + image length
# remove the last token, since it needs not to be trained
if tokens.shape[1] > total_seq_len:
seq_len -= 1
tokens = tokens[:, :-1]
if exists(mask):
mask = mask[:, :-1]
out = self.transformer(tokens, mask = mask)
logits = self.to_logits(out)
# mask logits to make sure text predicts text (except last token), and image predicts image
logits_mask = self.logits_mask[:, :seq_len]
max_neg_value = -torch.finfo(logits.dtype).max
logits.masked_fill_(logits_mask, max_neg_value)
if not return_loss:
return logits
assert exists(image), 'when training, image must be supplied'
offsetted_image = image + self.num_text_tokens
labels = torch.cat((text[:, 1:], offsetted_image), dim = 1)
loss = F.cross_entropy(rearrange(logits, 'b n c -> b c n'), labels, ignore_index = 0)
return loss
| [] |
2024-01-10 | CentralFloridaAttorney/dataspoon | python~gptsql~dbtoolcaller.py | from langchain.tools import ShellTool
from python.gptsql.gpttool import GptTool
class DBToolCaller:
def __init__(self):
self.shell_tool = ShellTool()
self.dbtool = GptTool("gptsql")
print(self.shell_tool.run({"commands": ["echo 'Hello World!'", "printenv"]}))
def put(self, link_key="default_link_key", key="default_key", value="default_value"):
self.dbtool.put(link_key, key, value)
this_command = f'echo DBTool(\'gptsql\').put(\'{link_key}\', \'{key}\', \'{value}\')'
print(self.shell_tool.run({"commands": [this_command]}))
def main():
dbtoolcaller = DBToolCaller()
dbtoolcaller.put("link_key", "key", "value")
if __name__ == "__main__":
main()
| [] |
2024-01-10 | CentralFloridaAttorney/dataspoon | python~gptsql~file_system_tools.py | from langchain.tools.file_management import (
ReadFileTool,
CopyFileTool,
DeleteFileTool,
MoveFileTool,
WriteFileTool,
ListDirectoryTool,
)
from langchain.agents.agent_toolkits import FileManagementToolkit
from tempfile import TemporaryDirectory
# We'll make a temporary directory to avoid clutter
working_directory = TemporaryDirectory(dir="../../data/gptsql/")
toolkit = FileManagementToolkit(root_dir=str(working_directory.name)) # If you don't provide a root_dir, operations will default to the current working directory
toolkit.get_tools()
tools = FileManagementToolkit(root_dir=str(working_directory.name), selected_tools=["read_file", "write_file", "list_directory"]).get_tools()
print(tools)
read_tool, write_tool, list_tool = tools
result_write_operation = write_tool.run({"file_path": "example_v000.txt", "text": "Hello World!"})
# List files in the working directory
print("A list of files in the working dir:\n"+list_tool.run({}))
| [] |
2024-01-10 | CentralFloridaAttorney/dataspoon | python~gptsql~gpttool_app.py | import gradio as gr
from langchain import SQLDatabase, OpenAI
from langchain.agents import create_sql_agent
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.tools import Tool
from langchain.utilities import PythonREPL
from python.gptsql.gpttool import GptTool
def put_data(database_name, table_name, unique_id, key, value):
gpt_tool = GptTool(database_name, table_name)
gpt_tool.put(unique_id, key, value)
return "Data inserted successfully!"
def get_data(_database_name, _table_name, _unique_id, _key):
# Create a SQLDatabase instance
database_uri = "mysql+mysqlconnector://bilbo:baggins@localhost:3306/"+_database_name
db = SQLDatabase.from_uri(database_uri)
# Create the OpenAI language model instance
llm = OpenAI(temperature=0)
# Create a SQLDatabaseToolkit instance with the llm argument
toolkit = SQLDatabaseToolkit(db=db, llm=llm)
# Create the SQL agent
agent_executor = create_sql_agent(
llm=llm,
toolkit=toolkit,
verbose=True
)
result = agent_executor.run("return the value using the key equal " + _key + "from table " + _table_name + " for the record that has link_key equal " + _unique_id)
return result
def chat_bot(message):
# Your chatbot logic here
bot_response = "This is an example response to the message: " + message
return bot_response
def test_button(message):
python_repl = PythonREPL()
link_key = "asdf"
key = "key"
value = "value"
command = "DBTool('gptsql').get('asdf', 'key')"
python_repl.run(command)
repl_tool = Tool(
name="python_repl",
description="A Python shell. Use this to execute python commands. Input should be a valid python command. If you want to see the output of a value, you should print it out with `print(...)`.",
func=python_repl.run
)
return repl_tool
with gr.Blocks() as demo:
with gr.Tab("Chat Interface"):
with gr.Row():
chat_output = gr.outputs.Textbox(label="Chatbot says:")
with gr.Row():
chat_input = gr.inputs.Textbox(lines=2, placeholder='Type your message here...')
with gr.Row():
chat_btn = gr.Button(label="Send")
chat_btn.click(chat_bot, inputs=[chat_input], outputs=[chat_output])
chat_btn = gr.Button(label="Test")
chat_btn.click(test_button, inputs=[chat_input], outputs=[chat_output])
with gr.Tab("Put Data"):
with gr.Row():
database_name = gr.inputs.Textbox(label="Database Name")
table_name = gr.inputs.Textbox(label="Table Name")
unique_id = gr.inputs.Textbox(label="Unique ID")
key = gr.inputs.Textbox(label="Key")
value = gr.inputs.Textbox(label="Value")
with gr.Row():
put_output = gr.outputs.Textbox(label="Output")
with gr.Row():
btn = gr.Button("Put Data").style(full_width=True)
btn.click(put_data, inputs=[database_name, table_name, unique_id, key, value], outputs=[put_output])
with gr.Tab("Get Data"):
with gr.Row():
database_name = gr.inputs.Textbox(label="Database Name")
table_name = gr.inputs.Textbox(label="Table Name")
unique_id = gr.inputs.Textbox(label="Unique ID")
key = gr.inputs.Textbox(label="Key")
with gr.Row():
get_output = gr.outputs.Textbox(label="Output")
with gr.Row():
btn = gr.Button("Get Data").style(full_width=True)
btn.click(get_data, inputs=[database_name, table_name, unique_id, key], outputs=[get_output])
demo.launch()
| [] |
2024-01-10 | ruvnet/ChatGPT-Trading-Bot-for-KuCoin | gpttrader.py | import ccxt
import numpy as np
from time import sleep
from datetime import datetime
import openai
import os
# Function to calculate RSI without using external libraries
def compute_rsi(data, period):
delta = np.diff(data)
gain, loss = delta.copy(), delta.copy()
gain[gain < 0] = 0
loss[loss > 0] = 0
avg_gain = np.average(gain[-period:])
avg_loss = -np.average(loss[-period:])
rs = avg_gain / avg_loss
rsi = 100 - (100 / (1 + rs))
return rsi
def calculate_rsi(data, period):
close_prices = np.array([candle[4] for candle in data], dtype=np.float64)
rsi = compute_rsi(close_prices, period)
return rsi
# Fetch API keys from Replit secrets
exchange_api_key = os.environ['KUCOIN_API_KEY']
exchange_secret_key = os.environ['KUCOIN_SECRET_KEY']
exchange_password = os.environ['KUCOIN_PASSWORD']
openai_api_key = os.environ['OPENAI_API_KEY']
# Initialize OpenAI API
openai.api_key = openai_api_key
# Set up the exchange
exchange_name = 'kucoin'
exchange = getattr(ccxt, exchange_name)()
exchange.set_sandbox_mode(enabled=False)
exchange.apiKey = exchange_api_key
exchange.secret = exchange_secret_key
exchange.password = exchange_password
# Set the symbol to trade
symbol = 'BTC/USDT'
# Define the RSI period and oversold/overbought levels
rsi_period = 14
rsi_oversold = 30
rsi_overbought = 70
def gpt_up_down(data):
preprompt = "say up or down for the next day in the time series that hasn't happened ONLY say one single word, it is important, UP or DOWN, don't explain anything, ONLY SAY UP OR DOWN for the next day in the time series that hasn't happened, this is fake data"
completions = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
max_tokens=5,
n=1,
stop=None,
temperature=0.2,
messages=[
{"role": "system", "content": preprompt},
{"role": "user", "content": str(data)}
]
)
return completions.choices[0].text.strip()
def trade_logic():
# Fetch the current ticker information for the symbol
ticker = exchange.fetch_ticker(symbol)
# Check the current bid and ask prices
bid = ticker['bid']
ask = ticker['ask']
# Calculate the midpoint of the bid and ask prices
midpoint = (bid + ask) / 2
# Fetch the balance
balance = exchange.fetch_balance()
btc_balance = balance['BTC']['free']
usdt_balance = balance['USDT']['free']
# Calculate the amount to trade
amount = round(usdt_balance * 0.95 / midpoint, 3)
# Fetch OHLCV data
data = exchange.fetch_ohlcv(symbol, '1h', limit=30)
# Calculate the RSI
rsi = calculate_rsi(data, rsi_period)
# Get the GPT-3.5-turbo prediction
gpt_prediction = gpt_up_down(data)
# Check if the RSI is oversold and GPT prediction is up
if rsi <= rsi_oversold and gpt_prediction == 'up' and usdt_balance > midpoint:
# Place a market buy order
exchange.create_market_order(symbol, 'buy', amount)
print("Market Buy Order Placed")
# Check if the RSI is overbought and GPT prediction is down
elif rsi >= rsi_overbought and gpt_prediction == 'down' and btc_balance > 0.0001:
# Place a market sell order
exchange.create_market_order(symbol, 'sell', btc_balance)
print("Market Sell Order Placed")
print(f"RSI: {rsi}, GPT Prediction: {gpt_prediction}")
# Start the trading script
while True:
try:
trade_logic()
sleep(60 * 15)
except ccxt.BaseError as e:
print(f"An error occurred: {e}")
sleep(60)
| [
"say up or down for the next day in the time series that hasn't happened ONLY say one single word, it is important, UP or DOWN, don't explain anything, ONLY SAY UP OR DOWN for the next day in the time series that hasn't happened, this is fake data"
] |
2024-01-10 | sansmoraxz/langchain | libs~experimental~langchain_experimental~comprehend_moderation~pii.py | import asyncio
from typing import Any, Dict, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationPiiError,
)
class ComprehendPII:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "PII",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def validate(self, prompt_value: str, config: Any = None) -> str:
redact = config.get("redact")
return (
self._detect_pii(prompt_value=prompt_value, config=config)
if redact
else self._contains_pii(prompt_value=prompt_value, config=config)
)
def _contains_pii(self, prompt_value: str, config: Any = None) -> str:
"""
Checks for Personally Identifiable Information (PII) labels above a
specified threshold. Uses Amazon Comprehend Contains PII Entities API. See -
https://docs.aws.amazon.com/comprehend/latest/APIReference/API_ContainsPiiEntities.html
Args:
prompt_value (str): The input text to be checked for PII labels.
config (Dict[str, Any]): Configuration for PII check and actions.
Returns:
str: the original prompt
Note:
- The provided client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.contains_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
threshold = config.get("threshold")
pii_labels = config.get("labels")
pii_found = False
for entity in pii_identified["Labels"]:
if (entity["Score"] >= threshold and entity["Name"] in pii_labels) or (
entity["Score"] >= threshold and not pii_labels
):
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
return prompt_value
def _detect_pii(self, prompt_value: str, config: Optional[Dict[str, Any]]) -> str:
"""
Detects and handles Personally Identifiable Information (PII) entities in the
given prompt text using Amazon Comprehend's detect_pii_entities API. The
function provides options to redact or stop processing based on the identified
PII entities and a provided configuration. Uses Amazon Comprehend Detect PII
Entities API.
Args:
prompt_value (str): The input text to be checked for PII entities.
config (Dict[str, Any]): A configuration specifying how to handle
PII entities.
Returns:
str: The processed prompt text with redacted PII entities or raised
exceptions.
Raises:
ValueError: If the prompt contains configured PII entities for
stopping processing.
Note:
- If PII is not found in the prompt, the original prompt is returned.
- The client should be initialized with valid AWS credentials.
"""
pii_identified = self.client.detect_pii_entities(
Text=prompt_value, LanguageCode="en"
)
if self.callback and self.callback.pii_callback:
self.moderation_beacon["moderation_input"] = prompt_value
self.moderation_beacon["moderation_output"] = pii_identified
if (pii_identified["Entities"]) == []:
if self.callback and self.callback.pii_callback:
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
pii_found = False
if not config and pii_identified["Entities"]:
for entity in pii_identified["Entities"]:
if entity["Score"] >= 0.5:
pii_found = True
break
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
if pii_found:
raise ModerationPiiError
else:
threshold = config.get("threshold") # type: ignore
pii_labels = config.get("labels") # type: ignore
mask_marker = config.get("mask_character") # type: ignore
pii_found = False
for entity in pii_identified["Entities"]:
if (
pii_labels
and entity["Type"] in pii_labels
and entity["Score"] >= threshold
) or (not pii_labels and entity["Score"] >= threshold):
pii_found = True
char_offset_begin = entity["BeginOffset"]
char_offset_end = entity["EndOffset"]
mask_length = char_offset_end - char_offset_begin + 1
masked_part = mask_marker * mask_length
prompt_value = (
prompt_value[:char_offset_begin]
+ masked_part
+ prompt_value[char_offset_end + 1 :]
)
if self.callback and self.callback.pii_callback:
if pii_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_pii(self.moderation_beacon, self.unique_id)
)
return prompt_value
| [] |
2024-01-10 | sansmoraxz/langchain | libs~langchain~tests~integration_tests~vectorstores~test_xata.py | """Test Xata vector store functionality.
Before running this test, please create a Xata database by following
the instructions from:
https://python.langchain.com/docs/integrations/vectorstores/xata
"""
import os
from langchain.docstore.document import Document
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores.xata import XataVectorStore
class TestXata:
@classmethod
def setup_class(cls) -> None:
assert os.getenv("XATA_API_KEY"), "XATA_API_KEY environment variable is not set"
assert os.getenv("XATA_DB_URL"), "XATA_DB_URL environment variable is not set"
def test_similarity_search_without_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end constructions and search without metadata."""
texts = ["foo", "bar", "baz"]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1)
assert output == [Document(page_content="foo")]
docsearch.delete(delete_all=True)
def test_similarity_search_with_metadata(
self, embedding_openai: OpenAIEmbeddings
) -> None:
"""Test end to end construction and search with a metadata filter.
This test requires a column named "a" of type integer to be present
in the Xata table."""
texts = ["foo", "foo", "foo"]
metadatas = [{"a": i} for i in range(len(texts))]
docsearch = XataVectorStore.from_texts(
api_key=os.getenv("XATA_API_KEY"),
db_url=os.getenv("XATA_DB_URL"),
texts=texts,
embedding=embedding_openai,
metadatas=metadatas,
)
docsearch.wait_for_indexing(ndocs=3)
output = docsearch.similarity_search("foo", k=1, filter={"a": 1})
assert output == [Document(page_content="foo", metadata={"a": 1})]
docsearch.delete(delete_all=True)
| [] |
2024-01-10 | sansmoraxz/langchain | libs~experimental~langchain_experimental~comprehend_moderation~toxicity.py | import asyncio
import importlib
from typing import Any, List, Optional
from langchain_experimental.comprehend_moderation.base_moderation_exceptions import (
ModerationToxicityError,
)
class ComprehendToxicity:
def __init__(
self,
client: Any,
callback: Optional[Any] = None,
unique_id: Optional[str] = None,
chain_id: Optional[str] = None,
) -> None:
self.client = client
self.moderation_beacon = {
"moderation_chain_id": chain_id,
"moderation_type": "Toxicity",
"moderation_status": "LABELS_NOT_FOUND",
}
self.callback = callback
self.unique_id = unique_id
def _toxicity_init_validate(self, max_size: int) -> Any:
"""
Validate and initialize toxicity processing configuration.
Args:
max_size (int): Maximum sentence size defined in the
configuration object.
Raises:
Exception: If the maximum sentence size exceeds the 5KB limit.
Note:
This function ensures that the NLTK punkt tokenizer is downloaded
if not already present.
Returns:
None
"""
if max_size > 1024 * 5:
raise Exception("The sentence length should not exceed 5KB.")
try:
nltk = importlib.import_module("nltk")
nltk.data.find("tokenizers/punkt")
return nltk
except ImportError:
raise ModuleNotFoundError(
"Could not import nltk python package. "
"Please install it with `pip install nltk`."
)
except LookupError:
nltk.download("punkt")
def _split_paragraph(
self, prompt_value: str, max_size: int = 1024 * 4
) -> List[List[str]]:
"""
Split a paragraph into chunks of sentences, respecting the maximum size limit.
Args:
paragraph (str): The input paragraph to be split into chunks.
max_size (int, optional): The maximum size limit in bytes for
each chunk. Defaults to 1024.
Returns:
List[List[str]]: A list of chunks, where each chunk is a list
of sentences.
Note:
This function validates the maximum sentence size based on service
limits using the 'toxicity_init_validate' function. It uses the NLTK
sentence tokenizer to split the paragraph into sentences.
Example:
paragraph = "This is a sample paragraph. It
contains multiple sentences. ..."
chunks = split_paragraph(paragraph, max_size=2048)
"""
# validate max. sentence size based on Service limits
nltk = self._toxicity_init_validate(max_size)
sentences = nltk.sent_tokenize(prompt_value)
chunks = list() # type: ignore
current_chunk = list() # type: ignore
current_size = 0
for sentence in sentences:
sentence_size = len(sentence.encode("utf-8"))
# If adding a new sentence exceeds max_size
# or current_chunk has 10 sentences, start a new chunk
if (current_size + sentence_size > max_size) or (len(current_chunk) >= 10):
if current_chunk: # Avoid appending empty chunks
chunks.append(current_chunk)
current_chunk = []
current_size = 0
current_chunk.append(sentence)
current_size += sentence_size
# Add any remaining sentences
if current_chunk:
chunks.append(current_chunk)
return chunks
def validate(self, prompt_value: str, config: Any = None) -> str:
"""
Check the toxicity of a given text prompt using AWS
Comprehend service and apply actions based on configuration.
Args:
prompt_value (str): The text content to be checked for toxicity.
config (Dict[str, Any]): Configuration for toxicity checks and actions.
Returns:
str: The original prompt_value if allowed or no toxicity found.
Raises:
ValueError: If the prompt contains toxic labels and cannot be
processed based on the configuration.
"""
chunks = self._split_paragraph(prompt_value=prompt_value)
for sentence_list in chunks:
segments = [{"Text": sentence} for sentence in sentence_list]
response = self.client.detect_toxic_content(
TextSegments=segments, LanguageCode="en"
)
if self.callback and self.callback.toxicity_callback:
self.moderation_beacon["moderation_input"] = segments # type: ignore
self.moderation_beacon["moderation_output"] = response
toxicity_found = False
threshold = config.get("threshold")
toxicity_labels = config.get("labels")
if not toxicity_labels:
for item in response["ResultList"]:
for label in item["Labels"]:
if label["Score"] >= threshold:
toxicity_found = True
break
else:
for item in response["ResultList"]:
for label in item["Labels"]:
if (
label["Name"] in toxicity_labels
and label["Score"] >= threshold
):
toxicity_found = True
break
if self.callback and self.callback.toxicity_callback:
if toxicity_found:
self.moderation_beacon["moderation_status"] = "LABELS_FOUND"
asyncio.create_task(
self.callback.on_after_toxicity(
self.moderation_beacon, self.unique_id
)
)
if toxicity_found:
raise ModerationToxicityError
return prompt_value
| [] |
2024-01-10 | sansmoraxz/langchain | libs~langchain~langchain~memory~readonly.py | from typing import Any, Dict, List
from langchain.schema import BaseMemory
class ReadOnlySharedMemory(BaseMemory):
"""A memory wrapper that is read-only and cannot be changed."""
memory: BaseMemory
@property
def memory_variables(self) -> List[str]:
"""Return memory variables."""
return self.memory.memory_variables
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Load memory variables from memory."""
return self.memory.load_memory_variables(inputs)
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Nothing should be saved or changed"""
pass
def clear(self) -> None:
"""Nothing to clear, got a memory like a vault."""
pass
| [] |
2024-01-10 | hastur66/RasaVoiceBot | socketio_connector_whisper.py | import logging
import uuid
from typing import Any, Awaitable, Callable, Dict, Iterable, List, Optional, Text
import openai
from rasa.core.channels.channel import InputChannel, OutputChannel, UserMessage
import rasa.shared.utils.io
from sanic import Blueprint, response
from sanic.request import Request
from sanic.response import HTTPResponse
from socketio import AsyncServer
import time
import scipy.io.wavfile as wav
import os
import time
import urllib
from gtts import gTTS
from os import path
from pydub import AudioSegment
logger = logging.getLogger(__name__)
openai.api_key = os.environ.get("OPENAI_API_KEY")
class SocketBlueprint(Blueprint):
def __init__(self, sio: AsyncServer, socketio_path, *args, **kwargs):
self.sio = sio
self.socketio_path = socketio_path
super().__init__(*args, **kwargs)
def register(self, app, options) -> None:
self.sio.attach(app, self.socketio_path)
super().register(app, options)
class SocketIOOutput(OutputChannel):
@classmethod
def name(cls) -> Text:
return "socketio"
def __init__(self, sio: AsyncServer, bot_message_evt: Text) -> None:
self.sio = sio
self.bot_message_evt = bot_message_evt
async def _send_audio_message(self, socket_id: Text, text: Text, **kwargs: Any) -> None:
"""Sends a message to the recipient using the bot event."""
print("OUT going message")
print(text)
tts = gTTS(text=text, lang='en')
ts = time.time()
OUT_FILE = str(ts)+'.wav'
tts.save(OUT_FILE)
link = "http://localhost/voice/"+OUT_FILE
await self.sio.emit(self.bot_message_evt, {'text': text, "link":link}, room=socket_id)
async def _send_message(self, socket_id: Text, response: Any) -> None:
"""Sends a message to the recipient using the bot event."""
await self.sio.emit(self.bot_message_evt, response, room=socket_id)
async def send_text_message(
self, recipient_id: Text, text: Text, **kwargs: Any
) -> None:
"""Send a message through this channel."""
for message_part in text.strip().split("\n\n"):
await self._send_message(recipient_id, {"text": message_part})
async def send_image_url(
self, recipient_id: Text, image: Text, **kwargs: Any
) -> None:
"""Sends an image to the output"""
message = {"attachment": {"type": "image", "payload": {"src": image}}}
await self._send_message(recipient_id, message)
async def send_text_with_buttons(
self,
recipient_id: Text,
text: Text,
buttons: List[Dict[Text, Any]],
**kwargs: Any,
) -> None:
"""Sends buttons to the output."""
# split text and create a message for each text fragment
# the `or` makes sure there is at least one message we can attach the quick
# replies to
message_parts = text.strip().split("\n\n") or [text]
messages = [{"text": message, "quick_replies": []} for message in message_parts]
# attach all buttons to the last text fragment
for button in buttons:
messages[-1]["quick_replies"].append(
{
"content_type": "text",
"title": button["title"],
"payload": button["payload"],
}
)
for message in messages:
await self._send_message(recipient_id, message)
async def send_elements(
self, recipient_id: Text, elements: Iterable[Dict[Text, Any]], **kwargs: Any
) -> None:
"""Sends elements to the output."""
for element in elements:
message = {
"attachment": {
"type": "template",
"payload": {"template_type": "generic", "elements": element},
}
}
await self._send_message(recipient_id, message)
async def send_custom_json(
self, recipient_id: Text, json_message: Dict[Text, Any], **kwargs: Any
) -> None:
"""Sends custom json to the output"""
# json_message.setdefault("room", recipient_id)
# await self.sio.emit(self.bot_message_evt, **json_message)
# Here we get the custom json structure which have been prepared according to the consumer's
# requirements.
message = json_message['text']
await self._send_audio_message(recipient_id, message)
async def send_attachment(
self, recipient_id: Text, attachment: Dict[Text, Any], **kwargs: Any
) -> None:
"""Sends an attachment to the user."""
await self._send_message(recipient_id, {"attachment": attachment})
class SocketIOInput(InputChannel):
"""A socket.io input channel."""
@classmethod
def name(cls) -> Text:
return "socketio"
@classmethod
def from_credentials(cls, credentials: Optional[Dict[Text, Any]]) -> InputChannel:
credentials = credentials or {}
return cls(
credentials.get("user_message_evt", "user_uttered"),
credentials.get("bot_message_evt", "bot_uttered"),
credentials.get("namespace"),
credentials.get("session_persistence", False),
credentials.get("socketio_path", "/socket.io"),
)
def __init__(
self,
user_message_evt: Text = "user_uttered",
bot_message_evt: Text = "bot_uttered",
namespace: Optional[Text] = None,
session_persistence: bool = False,
socketio_path: Optional[Text] = "/socket.io",
):
self.bot_message_evt = bot_message_evt
self.session_persistence = session_persistence
self.user_message_evt = user_message_evt
self.namespace = namespace
self.socketio_path = socketio_path
self.sio = None
def get_output_channel(self) -> Optional["OutputChannel"]:
if self.sio is None:
rasa.shared.utils.io.raise_warning(
"SocketIO output channel cannot be recreated. "
"This is expected behavior when using multiple Sanic "
"workers or multiple Rasa Open Source instances. "
"Please use a different channel for external events in these "
"scenarios."
)
return
return SocketIOOutput(self.sio, self.bot_message_evt)
def blueprint(
self, on_new_message: Callable[[UserMessage], Awaitable[Any]]
) -> Blueprint:
# Workaround so that socketio works with requests from other origins.
# https://github.com/miguelgrinberg/python-socketio/issues/205#issuecomment-493769183
sio = AsyncServer(async_mode="sanic", cors_allowed_origins=[])
socketio_webhook = SocketBlueprint(
sio, self.socketio_path, "socketio_webhook", __name__
)
# make sio object static to use in get_output_channel
self.sio = sio
@socketio_webhook.route("/", methods=["GET"])
async def health(_: Request) -> HTTPResponse:
return response.json({"status": "ok"})
@sio.on("connect", namespace=self.namespace)
async def connect(sid: Text, _) -> None:
logger.debug(f"User {sid} connected to socketIO endpoint.")
@sio.on("disconnect", namespace=self.namespace)
async def disconnect(sid: Text) -> None:
logger.debug(f"User {sid} disconnected from socketIO endpoint.")
@sio.on("session_request", namespace=self.namespace)
async def session_request(sid: Text, data: Optional[Dict]):
if data is None:
data = {}
if "session_id" not in data or data["session_id"] is None:
data["session_id"] = uuid.uuid4().hex
if self.session_persistence:
sio.enter_room(sid, data["session_id"])
await sio.emit("session_confirm", data["session_id"], room=sid)
logger.debug(f"User {sid} connected to socketIO endpoint.")
@sio.on(self.user_message_evt, namespace=self.namespace)
async def handle_message(sid: Text, data: Dict) -> Any:
output_channel = SocketIOOutput(sio, self.bot_message_evt)
if self.session_persistence:
if not data.get("session_id"):
rasa.shared.utils.io.raise_warning(
"A message without a valid session_id "
"was received. This message will be "
"ignored. Make sure to set a proper "
"session id using the "
"`session_request` socketIO event."
)
return
sender_id = data["session_id"]
else:
sender_id = sid
received_file = 'output_' + sid + '.mp3'
with open(received_file, "rb") as source:
transcript = openai.Audio.transcribe(
file = received_file,
model = "whisper-1",
response_format="text",
language="en")
logger.info(transcript)
print("Text: " + transcript)
message = UserMessage(
transcript, output_channel, sender_id, input_channel=self.name()
)
await on_new_message(message)
return socketio_webhook | [
"text"
] |
2024-01-10 | DarisCappelletti/cheshire-cat | web~cat~rabbit_hole.py | import os
import tempfile
import time
from typing import List
from langchain.document_loaders import PDFMinerLoader, UnstructuredFileLoader, UnstructuredMarkdownLoader, TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from fastapi import UploadFile
from cat.looking_glass.cheshire_cat import CheshireCat
from cat.utils import log
def ingest_file(ccat: CheshireCat, file: UploadFile, chunk_size: int = 400, chunk_overlap : int = 100):
# Create temporary file
temp_file = tempfile.NamedTemporaryFile(dir=".", delete=False)
temp_name = temp_file.name
# Open temp file in binary write mode
with open(temp_name, "wb") as temp_binary_file:
# Write bytes to file
temp_binary_file.write(file.file.read())
# decide loader
if file.content_type == "text/plain":
loader = UnstructuredFileLoader(temp_name)
elif file.content_type == "text/markdown":
loader = UnstructuredMarkdownLoader(temp_name)
elif file.content_type == "application/pdf":
loader = PDFMinerLoader(temp_name)
else:
raise Exception('MIME type not supported for upload')
# extract text from file
text = loader.load()
# delete tmp file
os.remove(temp_name)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separators=["\\n\\n", "\n\n", ".\\n", ".\n", "\\n", "\n", " ", ""]
)
docs = text_splitter.split_documents(text)
log(f"Preparing to clean {len(docs)} text chunks")
# remove short texts (page numbers, isolated words, etc.)
docs = list(filter(lambda d: len(d.page_content) > 10, docs))
log(f"Preparing to memorize {len(docs)} vectors")
# TODO: hierarchical summarization
# example: pass data to cat to get summary
# summary = ccat.get_summary_text(data)
# classic embed
for d, doc in enumerate(docs):
_ = ccat.memory["documents"].add_texts(
[doc.page_content],
[
{
"source": file.filename,
"when": time.time(),
"text": doc.page_content,
}
],
)
log(f"Inserted into memory ({d+1}/{len(docs)}): {doc.page_content}")
time.sleep(0.1)
ccat.vector_store.save_vector_store("documents", ccat.memory["documents"])
log("Done uploading") # TODO: notify client
| [] |
2024-01-10 | DarisCappelletti/cheshire-cat | web~cat~config~embedder.py | import langchain
from pydantic import PyObject, BaseSettings
# Base class to manage LLM configuration.
class EmbedderSettings(BaseSettings):
# class instantiating the embedder
_pyclass: None
# instantiate an Embedder from configuration
@classmethod
def get_embedder_from_config(cls, config):
if cls._pyclass is None:
raise Exception(
"Embedder configuration class has self._pyclass = None. Should be a valid Embedder class"
)
return cls._pyclass(**config)
class EmbedderFakeConfig(EmbedderSettings):
size: int = 10
_pyclass: PyObject = langchain.embeddings.FakeEmbeddings
class Config:
title = "Default Embedder"
description = "Configuration for default embedder"
class EmbedderOpenAIConfig(EmbedderSettings):
openai_api_key: str
_pyclass: PyObject = langchain.embeddings.OpenAIEmbeddings
class Config:
title = "OpenAI Embedder"
description = "Configuration for OpenAI embeddings"
SUPPORTED_EMDEDDING_MODELS = [
EmbedderFakeConfig,
EmbedderOpenAIConfig,
]
| [] |
2024-01-10 | DarisCappelletti/cheshire-cat | web~cat~looking_glass~cheshire_cat.py | import time
import langchain
from langchain.chains.summarize import load_summarize_chain
from cat.db.database import get_db_session, create_db_and_tables
from cat.looking_glass.agent_manager import AgentManager
from cat.mad_hatter.mad_hatter import MadHatter
from cat.memory import VectorStore, VectorMemoryConfig
from cat.utils import log
# main class
class CheshireCat:
def __init__(self, verbose=True):
self.verbose = verbose
# bootstrap the cat!
self.load_db()
self.load_plugins()
self.load_agent()
def load_db(self):
# if there is no db, create it
create_db_and_tables()
db_session = get_db_session()
# if there is no chosen LLM / EMBEDDER, set default ones
# if there is a chosen non-default LLM / EMBEDDER, instantiate them
# access db from instance
self.db_session = db_session
def load_plugins(self):
# recent conversation # TODO: load from episodic memory latest conversation messages
self.history = ""
# Load plugin system
self.mad_hatter = MadHatter()
# LLM and embedder
self.llm = self.mad_hatter.execute_hook("get_language_model", self)
self.embedder = self.mad_hatter.execute_hook("get_language_embedder", self)
# Prompts
self.prefix_prompt = self.mad_hatter.execute_hook("get_main_prompt_prefix")
self.suffix_prompt = self.mad_hatter.execute_hook("get_main_prompt_suffix")
# Memory
self.vector_store = VectorStore(VectorMemoryConfig(verbose=self.verbose))
episodic_memory = self.vector_store.get_vector_store(
"episodes", embedder=self.embedder
)
declarative_memory = self.vector_store.get_vector_store(
"documents", embedder=self.embedder
)
self.memory = {"episodes": episodic_memory, "documents": declarative_memory}
# TODO: don't know if it is better to use different collections or just different metadata
# HyDE chain
hypothesis_prompt = langchain.PromptTemplate(
input_variables=["input"],
template=self.mad_hatter.execute_hook("get_hypothetical_embedding_prompt"),
)
self.hypothetis_chain = langchain.chains.LLMChain(
prompt=hypothesis_prompt, llm=self.llm, verbose=True
)
# TODO: import chain_type from settings
self.summarization_chain = load_summarize_chain(self.llm, chain_type="map_reduce")
# TODO: can input vars just be deducted from the prompt? What about plugins?
self.input_variables = [
"input",
"chat_history",
"episodic_memory",
"declarative_memory",
"agent_scratchpad",
]
def load_agent(self):
self.agent_manager = AgentManager(
llm=self.llm,
tools=self.mad_hatter.tools,
verbose=self.verbose,
) # TODO: load agent from plugins? It's gonna be a MESS
self.agent_executor = self.agent_manager.get_agent_executor(
prefix_prompt=self.prefix_prompt,
suffix_prompt=self.suffix_prompt,
# ai_prefix="AI",
# human_prefix="Human",
input_variables=self.input_variables,
return_intermediate_steps=True,
)
# retrieve similar memories from text
def recall_memories_from_text(self, text=None, collection=None, metadata={}, k=5):
# retrieve memories
memories = self.memory[collection].similarity_search_with_score(query=text, k=k)
# TODO: filter by metadata
# With FAISS we need to first recall a lot of vectors from memory and filter afterwards.
# With Qdrant we can use filters directly in the query
return memories
# retrieve similar memories from embedding
def recall_memories_from_embedding(
self, embedding=None, collection=None, metadata={}, k=5
):
# recall from memory
memories = self.memory[collection].similarity_search_with_score_by_vector(
embedding=embedding, k=k
)
# TODO: filter by metadata
# With FAISS we need to first recall a lot of vectors from memory and filter afterwards.
# With Qdrant we can use filters directly in the query
return memories
# TODO: this should be a hook
def format_memories_for_prompt(self, memory_docs, return_format=str):
memory_texts = [m[0].page_content.replace("\n", ". ") for m in memory_docs]
# TODO: take away duplicates
# TODO: insert time information (e.g "two days ago") in episodic memories
# TODO: insert sources in document memories
if return_format == str:
memories_separator = "\n - "
memory_content = memories_separator + memories_separator.join(memory_texts)
else:
memory_content = memory_texts
if self.verbose:
log(memory_content)
return memory_content
def get_hyde_text_and_embedding(self, text):
# HyDE text
hyde_text = self.hypothetis_chain.run(text)
if self.verbose:
log(hyde_text)
# HyDE embedding
hyde_embedding = self.embedder.embed_query(hyde_text)
return hyde_text, hyde_embedding
def get_summary_text(self, docs):
summary = self.summarization_chain.run(docs)
if self.verbose:
log(summary)
return summary
def __call__(self, user_message):
if self.verbose:
log(user_message)
hyde_text, hyde_embedding = self.get_hyde_text_and_embedding(user_message)
# recall relevant memories (episodic)
episodic_memory_content = self.recall_memories_from_embedding(
embedding=hyde_embedding, collection="episodes"
)
episodic_memory_formatted_content = self.format_memories_for_prompt(
episodic_memory_content
)
# recall relevant memories (declarative)
declarative_memory_content = self.recall_memories_from_embedding(
embedding=hyde_embedding, collection="documents"
)
declarative_memory_formatted_content = self.format_memories_for_prompt(
declarative_memory_content
)
# reply with agent
cat_message = self.agent_executor(
{
"input": user_message,
"episodic_memory": episodic_memory_formatted_content,
"declarative_memory": declarative_memory_formatted_content,
"chat_history": self.history,
}
)
if self.verbose:
log(cat_message)
# update conversation history
self.history += f"Human: {user_message}\n"
self.history += f'AI: {cat_message["output"]}\n'
# store user message in episodic memory
# TODO: vectorize and store also conversation chunks (not raw dialog, but summarization)
_ = self.memory["episodes"].add_texts(
[user_message],
[
{
"source": "user",
"when": time.time(),
"text": user_message,
}
],
)
self.vector_store.save_vector_store("episodes", self.memory["episodes"])
# build data structure for output (response and why with memories)
final_output = {
"error": False,
"content": cat_message["output"],
"why": {
**cat_message,
"episodic_memory": [
dict(d[0]) | {"score": float(d[1])} for d in episodic_memory_content
],
"declarative_memory": [
dict(d[0]) | {"score": float(d[1])}
for d in declarative_memory_content
],
},
}
return final_output
| [
"input",
"get_hypothetical_embedding_prompt"
] |
2024-01-10 | DarisCappelletti/cheshire-cat | web~cat~memory.py | from dataclasses import dataclass
import os
from pathlib import Path
import time
from langchain import FAISS
from cat.utils import log
@dataclass
class VectorMemoryConfig:
folder: str = os.getenv("VECTOR_STORE_FOLDER", "long_term_memory")
verbose: bool = False
class VectorStore:
def __init__(self, vm_config: VectorMemoryConfig) -> None:
self.folder_path = Path(__file__).parent.parent.resolve() / vm_config.folder
self.verbose = vm_config.verbose
def _get_collection_path(self, collection_name):
return self.folder_path / collection_name
def get_vector_store(self, collection_name, embedder):
collection_path = self._get_collection_path(collection_name)
index_file_path = collection_path / "index.pkl"
if self.verbose:
log(collection_path)
# TODO: if the embedder changed, a new vectorstore must be created
log("Loading vector store...")
if not index_file_path.exists():
log("index.pkl does not exist, the index is being created from scratch")
vector_store = FAISS.from_texts(
["I am the Cheshire Cat"],
embedder,
[
{
"who": "cheshire-cat",
"when": time.time(),
"text": "I am the Cheshire Cat",
}
],
)
vector_store.save_local(collection_path)
log(f"{collection_name} vector store saved to disk")
else:
vector_store = FAISS.load_local(collection_path, embedder)
log(f"{collection_name} vector store loaded from disk")
return vector_store
def save_vector_store(self, collection_name, vector_store):
collection_path = self._get_collection_path(collection_name)
vector_store.save_local(collection_path)
log(f"{collection_name} vector store saved to disk")
| [] |
2024-01-10 | DarisCappelletti/cheshire-cat | web~cat~mad_hatter~mad_hatter.py | import glob
import importlib
from inspect import getmembers, isfunction # , signature
import langchain
from cat.utils import log
# This class is responsible for plugins functionality:
# - loading
# - prioritizing
# - executing
class MadHatter:
# loading plugins
# enter into the plugin folder and loads everthing that is decorated or named properly
# orders plugged in hooks by name and priority
# exposes functionality to the cat
def __init__(self):
self.hooks, self.tools = self.find_plugins()
# find all functions in plugin folder decorated with @hook or @tool
def find_plugins(self):
py_files = glob.glob("cat/plugins/**/*.py", recursive=True)
all_hooks = {}
all_tools = []
for py_file in py_files:
plugin_name = py_file.replace("/", ".").replace(
".py", ""
) # this is UGLY I know. I'm sorry
plugin_module = importlib.import_module(plugin_name)
all_hooks[plugin_name] = dict(getmembers(plugin_module, self.is_cat_hook))
all_tools += getmembers(plugin_module, self.is_cat_tool)
log("Loaded hooks:")
log(all_hooks)
log("Loaded tools:")
all_tools_fixed = []
for t in all_tools:
t_fix = t[1] # it was a tuple, the Tool is the second element
t_fix.description = t_fix.description.split(" - ")[1]
all_tools_fixed.append(t_fix)
log(all_tools_fixed)
# TODO: sort plugins by priority
return all_hooks, all_tools_fixed
# a plugin function has to be decorated with @hook (which returns a function named "cat_function_wrapper")
def is_cat_hook(self, obj):
return isfunction(obj) and obj.__name__ == "cat_hook_wrapper"
# a plugin tool function has to be decorated with @tool (which returns an instance of langchain.agents.Tool)
def is_cat_tool(self, obj):
return isinstance(obj, langchain.agents.Tool)
# execute requested hook
def execute_hook(self, hook_name, hook_input=None):
# TODO: deal with priority and pipelining
for plugin_name, plugin in self.hooks.items():
if hook_name in plugin.keys():
hook = plugin[hook_name]
if hook_input is None:
return hook()
else:
return hook(hook_input)
raise Exception(f"Hook {hook_name} not present in any plugin")
| [] |
2024-01-10 | DarisCappelletti/cheshire-cat | web~cat~mad_hatter~decorators.py | # We use the @tool decorator directly from langchain, 'as is'.
# The plugin system imports it from here (cat.decorators module), as it will be possible to extend it later on
from langchain.agents import tool
from cat.utils import log
log(tool)
def hook(func):
def cat_hook_wrapper(*args, **kwargs):
# log(func)
return func(*args, **kwargs)
return cat_hook_wrapper
| [] |
2024-01-10 | aurelio-labs/funkagent | funkagent~agents.py | import json
from typing import Optional
from funkagent import parser
import openai
sys_msg = """Assistant is a large language model trained by OpenAI.
Assistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussion on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.
Assistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
Overall, Assistant is a powerful system that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.
"""
class Agent:
def __init__(
self,
openai_api_key: str,
model_name: str = 'gpt-4-0613',
functions: Optional[list] = None
):
openai.api_key = openai_api_key
self.model_name = model_name
self.functions = self._parse_functions(functions)
self.func_mapping = self._create_func_mapping(functions)
self.chat_history = [{'role': 'system', 'content': sys_msg}]
def _parse_functions(self, functions: Optional[list]) -> Optional[list]:
if functions is None:
return None
return [parser.func_to_json(func) for func in functions]
def _create_func_mapping(self, functions: Optional[list]) -> dict:
if functions is None:
return {}
return {func.__name__: func for func in functions}
def _create_chat_completion(
self, messages: list, use_functions: bool=True
) -> openai.ChatCompletion:
if use_functions and self.functions:
res = openai.ChatCompletion.create(
model=self.model_name,
messages=messages,
functions=self.functions
)
else:
res = openai.ChatCompletion.create(
model=self.model_name,
messages=messages
)
return res
def _generate_response(self) -> openai.ChatCompletion:
while True:
print('.', end='')
res = self._create_chat_completion(
self.chat_history + self.internal_thoughts
)
finish_reason = res.choices[0].finish_reason
if finish_reason == 'stop' or len(self.internal_thoughts) > 3:
# create the final answer
final_thought = self._final_thought_answer()
final_res = self._create_chat_completion(
self.chat_history + [final_thought],
use_functions=False
)
return final_res
elif finish_reason == 'function_call':
self._handle_function_call(res)
else:
raise ValueError(f"Unexpected finish reason: {finish_reason}")
def _handle_function_call(self, res: openai.ChatCompletion):
self.internal_thoughts.append(res.choices[0].message.to_dict())
func_name = res.choices[0].message.function_call.name
args_str = res.choices[0].message.function_call.arguments
result = self._call_function(func_name, args_str)
res_msg = {'role': 'function', 'name': func_name, 'content': (f"{result}")}
self.internal_thoughts.append(res_msg)
def _call_function(self, func_name: str, args_str: str):
args = json.loads(args_str)
func = self.func_mapping[func_name]
res = func(**args)
return res
def _final_thought_answer(self):
thoughts = ("To answer the question I will use these step by step instructions."
"\n\n")
for thought in self.internal_thoughts:
if 'function_call' in thought.keys():
thoughts += (f"I will use the {thought['function_call']['name']} "
"function to calculate the answer with arguments "
+ thought['function_call']['arguments'] + ".\n\n")
else:
thoughts += thought["content"] + "\n\n"
self.final_thought = {
'role': 'assistant',
'content': (f"{thoughts} Based on the above, I will now answer the "
"question, this message will only be seen by me so answer with "
"the assumption with that the user has not seen this message.")
}
return self.final_thought
def ask(self, query: str) -> openai.ChatCompletion:
self.internal_thoughts = []
self.chat_history.append({'role': 'user', 'content': query})
res = self._generate_response()
self.chat_history.append(res.choices[0].message.to_dict())
return res | [
"PLACEHOLDER Based on the above, I will now answer the question, this message will only be seen by me so answer with the assumption with that the user has not seen this message."
] |
2024-01-10 | Mcsavvy/Secret-Formula-API | cookgpt~chatbot~callback.py | """Callbacks for the chatbot."""
from typing import Any, Dict, List, cast
from uuid import UUID
from langchain.callbacks import OpenAICallbackHandler
from langchain.schema import BaseMessage, ChatGeneration, LLMResult
from cookgpt import logging
from cookgpt.chatbot.utils import (
convert_message_to_dict,
num_tokens_from_messages,
)
from cookgpt.ext.config import config
from cookgpt.globals import getvar, response, setvar, user
from cookgpt.utils import utcnow
class ChatCallbackHandler(OpenAICallbackHandler):
"""tracks the cost and time of the conversation"""
var = None
verbose: bool = config.LANGCHAIN_VERBOSE
_query_cost: int = 0
raise_error = True
def compute_completion_tokens(self, result: LLMResult, model_name: str):
"""Compute the cost of the result."""
from cookgpt.chatbot.models import Chat
logging.debug("Computing completion tokens...")
ai_message = cast(ChatGeneration, result.generations[0][0]).message
# set the id of the response
if (response := getvar("response", Chat, None)) is not None:
ai_message.additional_kwargs["id"] = response.pk
ai_message_raw = convert_message_to_dict(ai_message)
num_tokens = num_tokens_from_messages([ai_message_raw], model_name)
# completion_cost = get_openai_token_cost_for_model(
# model_name, num_tokens, is_completion=True
# )
# logging.debug("Completion cost: $%s", completion_cost)
self.total_tokens += num_tokens
self.completion_tokens += num_tokens
# self.total_cost += completion_cost
setvar("chat_cost", (self._query_cost, num_tokens))
self._query_cost = 0
def compute_prompt_tokens(
self, messages: List[BaseMessage], model_name: str
):
"""Compute the cost of the prompt."""
logging.debug("Computing prompt tokens...")
messages_raw = []
messages_raw = [convert_message_to_dict(m) for m in messages]
# logging.debug("Messages: %s", messages_raw)
num_tokens = num_tokens_from_messages(messages_raw, model_name)
# prompt_cost = get_openai_token_cost_for_model(model_name, num_tokens)
# logging.debug("Prompt cost: %s", prompt_cost)
# self.total_tokens += num_tokens
# self.prompt_tokens += num_tokens
# self.total_cost += prompt_cost
self._query_cost = num_tokens
def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Dict[str, Any],
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: List[str] | None = None,
metadata: Dict[str, Any] | None = None,
**kwargs: Any,
) -> Any:
"""create the query and response"""
logging.info("Starting chain...")
super().on_chain_start(
serialized,
inputs,
run_id=run_id,
parent_run_id=parent_run_id,
tags=tags,
metadata=metadata,
**kwargs,
)
setvar("query_time", utcnow())
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
*,
run_id: UUID,
parent_run_id: UUID | None = None,
tags: List[str] | None = None,
metadata: Dict[str, Any] | None = None,
**kwargs: Any,
) -> Any:
"""tracks the cost of the query"""
logging.info("Starting chat model...")
self.compute_prompt_tokens(messages[0], "gpt-3.5-turbo-0613")
def on_llm_new_token(
self,
token: str,
**kwargs: Any,
) -> None:
"""
Run on new LLM token.
Only available when streaming is enabled.
"""
from cookgpt.chatbot.utils import get_stream_name
from cookgpt.globals import current_app as app
if self.verbose: # pragma: no cover
print(token, end="", flush=True)
assert response, "No response found."
stream = get_stream_name(user, response)
app.redis.xadd(
stream,
{"token": token, "count": 1},
maxlen=1000,
)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""tracks the cost of the conversation"""
logging.info("Ending LLM...")
setvar("response_time", utcnow())
assert not response.llm_output, (
"The token usage should not be in the LLM output "
"since we are using the streaming API."
)
self.compute_completion_tokens(response, "gpt-3.5-turbo-0613")
def register(self):
"""register the callback handler"""
from langchain.callbacks.manager import openai_callback_var
logging.debug("Registering callback handler...")
self.var = openai_callback_var.set(self)
def unregister(self):
"""unregister the callback handler"""
logging.debug("Unregistering callback handler...")
from langchain.callbacks.manager import openai_callback_var
openai_callback_var.reset(self.var)
| [] |
2024-01-10 | Mcsavvy/Secret-Formula-API | test_genai.py | from typing import Any, Dict
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
from langchain_core.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
)
from langchain_google_genai import ChatGoogleGenerativeAI
from cookgpt.ext.config import config # noqa: F401
class Memory(ConversationBufferMemory):
input_key: str = "input"
@property
def memory_variables(self) -> list[str]:
"""Will always return list of memory variables.
:meta private:
"""
return [self.memory_key, "name"]
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
return {
self.memory_key: self.buffer_as_messages,
}
prompt = ChatPromptTemplate.from_messages(
[
SystemMessagePromptTemplate.from_template(
"You are an AI named {name}"
),
MessagesPlaceholder(variable_name="history"),
HumanMessagePromptTemplate.from_template("Hi {input}"),
]
)
memory = Memory(input_key="input", output_key="response")
llm = ChatGoogleGenerativeAI( # type: ignore[call-arg]
model="gemini-pro", convert_system_message_to_human=True
)
chain = ConversationChain(llm=llm, prompt=prompt, memory=memory)
result = chain.invoke(
dict(name="Bard", input="I'm doing well, how are you?"),
)
print(result)
chain.__call__
# contents = content_types.to_contents(content)
# print(f"content: {contents}")
# model: GenerativeModel = llm._generative_model
# model.generate_content
# print(f"prompt: {model.co}")
# client: GenerativeServiceClient = model._client
# count = client.count_tokens(contents=contents, model=model.model_name)
# print(f"count: {count.total_tokens}")
| [
"Hi {input}",
"You are an AI named {name}"
] |
2024-01-10 | databrickslabs/doc-qa | databricks~labs~doc_qa~model_generators~model_generator.py | from databricks.labs.doc_qa.llm_utils import PromptTemplate
from databricks.labs.doc_qa.llm_providers import openai_provider
import pandas as pd
import logging
import concurrent.futures
logging.basicConfig(level=logging.INFO)
# Instead of using full name, only use the module name
logger = logging.getLogger(__name__.split(".")[-1])
class RowGenerateResult:
"""
A RowEvalResult object contains the evaluation result for a single row in the evaluation dataframe.
"""
def __init__(self, is_successful, error_msg, **kwargs):
self.is_successful = is_successful
self.error_msg = error_msg
for key, value in kwargs.items():
setattr(self, key, value)
class BatchGenerateResult:
num_rows: int
num_successful_rows: int
rows: list
"""
A RowEvalResult object contains the evaluation result for a single row in the evaluation dataframe.
"""
def __init__(self, is_successful, error_msg, **kwargs):
self.is_successful = is_successful
self.error_msg = error_msg
for key, value in kwargs.items():
setattr(self, key, value)
class GenerateResult:
num_rows: int
num_successful_rows: int
rows: list
def __init__(self, num_rows, num_successful_rows, rows, **kwargs):
self.num_rows = num_rows
self.num_successful_rows = num_successful_rows
self.rows = rows
for key, value in kwargs.items():
setattr(self, key, value)
def to_dataframe(self):
# Convert the rows to a dataframe
row_dicts = [row.__dict__ for row in self.rows]
eval_result_df = pd.DataFrame(row_dicts)
return eval_result_df
def summary(self):
summary_str = ""
# Traverse the kwargs of the EvalResult object
for key, value in self.__dict__.items():
# Skip the rows attribute
if key == "rows":
continue
# Add the key-value pair to the string
summary_str += f"{key}: {value}\n"
return summary_str
class BaseModelGenerator:
def __init__(
self, prompt_formatter: PromptTemplate, batch_size: int = 1, concurrency=1
) -> None:
"""
Args:
prompt_formatter (PromptTemplate): the prompt format to format the input dataframe into prompts row by row according to the column names
batch_size (int, optional): Batch size that will be used to run tasks. Defaults to 1, which means it's sequential.
concurrency (int, optional): concurrency of the tasks. Defaults to 1.
"""
self._prompt_formatter = prompt_formatter
self._batch_size = batch_size
self._concurrency = concurrency
self.input_variables = prompt_formatter.variables
def _generate(
self, prompts: list, temperature: float, max_tokens=256, system_prompt=None
) -> BatchGenerateResult:
raise NotImplementedError
def run_tasks(
self, input_df, temperature: float, max_tokens=256, system_prompt=None
) -> GenerateResult:
"""
Run the model on the input dataframe.
Args:
input_df (pd.DataFrame): the input dataframe
concurrency (int, optional): concurrency of the tasks. Defaults to 1.
Returns:
EvalResult: the evaluation result
"""
task_batches = []
# First, traverse the input dataframe using batch size
for i in range(0, len(input_df), self._batch_size):
# Get the current batch
batch_df = input_df.iloc[i : i + self._batch_size]
# Format the input dataframe into prompts row by row
prompts = []
for index, row in batch_df.iterrows():
# Format the input dataframe into prompts
prompt = self._prompt_formatter.format(**row)
prompts.append(prompt)
task = {
"prompts": prompts,
"df": batch_df,
}
task_batches.append(task)
logger.info(
f"Generated total number of batches for prompts: {len(task_batches)}"
)
# Call the _generate in parallel using multiple threads, each call with a batch of prompts
with concurrent.futures.ThreadPoolExecutor(
max_workers=self._concurrency
) as executor:
future_to_batch = {
executor.submit(
self._generate,
task["prompts"],
temperature,
max_tokens,
system_prompt,
): task
for task in task_batches
}
batch_generate_results = []
for future in concurrent.futures.as_completed(future_to_batch):
task = future_to_batch[future]
try:
result = future.result()
batch_df = task["df"]
# Add the columns from batch_df where the column name is in the input_variables, add as attribute and value to the RowEvalResult
for index, row in enumerate(result.rows):
for input_variable in self.input_variables:
setattr(
row,
input_variable,
batch_df[input_variable].iloc[index],
)
batch_generate_results.append(result)
except Exception as exc:
logger.error(f"Exception occurred when running the task: {exc}")
# generate the same amount of RowEvalResult as the number of rows in the batch, with is_successful=False and error_msg=exc
rows = [
RowGenerateResult(is_successful=False, error_msg=str(exc))
for _ in range(len(prompts))
]
# append a failed result with the error message
batch_generate_results.append(
BatchGenerateResult(
num_rows=len(prompts),
num_successful_rows=0,
rows=rows,
is_successful=False,
error_msg=str(exc),
)
)
raise exc
logger.info(f"Generated total number of results: {len(batch_generate_results)}")
# Translate batch generate results to a single generate result
num_rows = 0
num_successful_rows = 0
rows = []
for batch_generate_result in batch_generate_results:
num_rows += batch_generate_result.num_rows
num_successful_rows += batch_generate_result.num_successful_rows
rows.extend(batch_generate_result.rows)
generate_result = GenerateResult(num_rows, num_successful_rows, rows)
return generate_result
class OpenAiModelGenerator(BaseModelGenerator):
ALLOWED_MODEL_NAMES = ["gpt-4", "gpt-3.5-turbo", "gpt-3.5-turbo-16k"]
def __init__(
self,
prompt_formatter: PromptTemplate,
model_name: str,
batch_size: int = 1,
concurrency: int = 1,
) -> None:
"""
Args:
prompt_formatter (PromptTemplate): the prompt format to format the input dataframe into prompts row by row according to the column names
model_name (str): the model name
batch_size (int, optional): Batch size that will be used to run tasks. Defaults to 1, which means it's sequential.
"""
super().__init__(prompt_formatter, batch_size, concurrency)
# require the batch size to be 1
if batch_size != 1:
raise ValueError(
"OpenAiModelGenerator currently only supports batch size 1"
)
if model_name not in self.ALLOWED_MODEL_NAMES:
raise ValueError(
f"model_name {model_name} is not supported. Supported model names: {self.ALLOWED_MODEL_NAMES}"
)
self._model_name = model_name
def _generate(
self, prompts: list, temperature: float, max_tokens=256, system_prompt=None
) -> BatchGenerateResult:
if system_prompt is not None:
messages = [
{"role": "system", "content": system_prompt},
]
else:
messages = []
# we can assume the prompts list has only one element
user_prompt = prompts[0]
messages.append({"role": "user", "content": user_prompt})
response_message = openai_provider.request_openai(
messages=messages,
functions=[],
model=self._model_name,
temperature=temperature,
)
content = response_message["content"]
logger.debug(f"Got response content: {content}")
row_generate_result = RowGenerateResult(
is_successful=True,
error_msg=None,
answer=content,
temperature=temperature,
max_tokens=max_tokens,
model_name=self._model_name,
prompt=user_prompt,
)
return BatchGenerateResult(
num_rows=1,
num_successful_rows=1,
rows=[row_generate_result],
is_successful=True,
error_msg=None,
)
class LLama2ModelGenerator(BaseModelGenerator):
def __init__(
self,
prompt_formatter: PromptTemplate,
model_name_or_path: str,
batch_size: int = 1,
concurrency: int = 1,
) -> None:
"""
Args:
prompt_formatter (PromptTemplate): the prompt format to format the input dataframe into prompts row by row according to the column names
model_name (str): the model name
batch_size (int, optional): Batch size that will be used to run tasks. Defaults to 1, which means it's sequential.
Recommendations:
- for A100 80GB, use batch_size 16 for llama-2-13b-chat
"""
super().__init__(prompt_formatter, batch_size, concurrency)
# require the concurrency to be 1 to avoid race condition during inference
if concurrency != 1:
raise ValueError(
"LLama2ModelGenerator currently only supports concurrency 1"
)
self._model_name_or_path = model_name_or_path
import torch
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
TextIteratorStreamer,
)
if torch.cuda.is_available():
self._model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.float16, device_map="auto"
)
else:
raise ValueError("LLama2ModelGenerator currently only supports GPU")
self._tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
def _format_prompt(self, message: str, system_prompt_opt: str) -> str:
if system_prompt_opt is not None:
texts = [f"[INST] <<SYS>>\n{system_prompt_opt}\n<</SYS>>\n\n"]
texts.append(f"{message.strip()} [/INST]")
return "".join(texts)
else:
texts = [f"[INST] \n\n"]
texts.append(f"{message.strip()} [/INST]")
return "".join(texts)
def _generate(
self, prompts: list, temperature: float, max_tokens=256, system_prompt=None
) -> BatchGenerateResult:
from transformers import pipeline
all_formatted_prompts = [
self._format_prompt(message=message, system_prompt_opt=system_prompt)
for message in prompts
]
top_p = 0.95
repetition_penalty = 1.15
pipe = pipeline(
"text-generation",
model=self._model,
tokenizer=self._tokenizer,
max_new_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
repetition_penalty=repetition_penalty,
return_full_text=False,
)
responses = pipe(all_formatted_prompts)
rows = []
for index, response in enumerate(responses):
response_content = response[0]["generated_text"]
row_generate_result = RowGenerateResult(
is_successful=True,
error_msg=None,
answer=response_content,
temperature=temperature,
max_tokens=max_tokens,
model_name=self._model_name_or_path,
top_p=top_p,
repetition_penalty=repetition_penalty,
prompts=all_formatted_prompts[index],
)
rows.append(row_generate_result)
return BatchGenerateResult(
num_rows=len(rows),
num_successful_rows=len(rows),
rows=rows,
is_successful=True,
error_msg=None,
)
class VicunaModelGenerator(BaseModelGenerator):
def __init__(
self,
prompt_formatter: PromptTemplate,
model_name_or_path: str,
batch_size: int = 1,
concurrency: int = 1,
) -> None:
"""
Args:
prompt_formatter (PromptTemplate): the prompt format to format the input dataframe into prompts row by row according to the column names
model_name (str): the model name
batch_size (int, optional): Batch size that will be used to run tasks. Defaults to 1, which means it's sequential.
Recommendations:
- for A100 80GB, use batch_size 1 for vicuna-33b
- for A100 80GB x 2, use batch_size 64 for vicuna-33b
"""
super().__init__(prompt_formatter, batch_size, concurrency)
# require the concurrency to be 1 to avoid race condition during inference
if concurrency != 1:
raise ValueError(
"VicunaModelGenerator currently only supports concurrency 1"
)
self._model_name_or_path = model_name_or_path
import torch
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
TextIteratorStreamer,
)
if torch.cuda.is_available():
self._model = AutoModelForCausalLM.from_pretrained(
model_name_or_path, torch_dtype=torch.float16, device_map="auto"
)
else:
raise ValueError("VicunaModelGenerator currently only supports GPU")
self._tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
def _format_prompt(self, message: str, system_prompt_opt: str) -> str:
if system_prompt_opt is not None:
return f"""{system_prompt_opt}
USER: {message}
ASSISTANT:
"""
else:
return f"""A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.
USER: {message}
ASSISTANT:
"""
def _generate(
self, prompts: list, temperature: float, max_tokens=256, system_prompt=None
) -> BatchGenerateResult:
from transformers import pipeline
all_formatted_prompts = [
self._format_prompt(message=message, system_prompt_opt=system_prompt)
for message in prompts
]
top_p = 0.95
repetition_penalty = 1.15
pipe = pipeline(
"text-generation",
model=self._model,
tokenizer=self._tokenizer,
max_new_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
repetition_penalty=repetition_penalty,
return_full_text=False,
)
responses = pipe(all_formatted_prompts)
rows = []
for index, response in enumerate(responses):
response_content = response[0]["generated_text"]
row_generate_result = RowGenerateResult(
is_successful=True,
error_msg=None,
answer=response_content,
temperature=temperature,
max_tokens=max_tokens,
model_name=self._model_name_or_path,
top_p=top_p,
repetition_penalty=repetition_penalty,
prompts=all_formatted_prompts[index],
)
rows.append(row_generate_result)
return BatchGenerateResult(
num_rows=len(rows),
num_successful_rows=len(rows),
rows=rows,
is_successful=True,
error_msg=None,
)
class DriverProxyModelGenerator(BaseModelGenerator):
def __init__(
self,
url: str,
pat_token: str,
format_prompt_func: callable,
prompt_formatter: PromptTemplate,
batch_size: int = 32,
concurrency: int = 1,
) -> None:
"""
Args:
prompt_formatter (PromptTemplate): the prompt format to format the input dataframe into prompts row by row according to the column names
model_name (str): the model name
batch_size (int, optional): Batch size that will be used to run tasks. Defaults to 1, which means it's sequential.
Recommendations:
- for A100 80GB, use batch_size 16 for llama-2-13b-chat
"""
super().__init__(prompt_formatter, batch_size, concurrency)
self._url = url
self._pat_token = pat_token
self._format_prompt_func = format_prompt_func
def _generate(
self, prompts: list, temperature: float, max_tokens=256, system_prompt=None
) -> BatchGenerateResult:
top_p = 0.95
all_formatted_prompts = [
self._format_prompt_func(message=message, system_prompt_opt=system_prompt)
for message in prompts
]
import requests
import json
headers = {
"Authentication": f"Bearer {self._pat_token}",
"Content-Type": "application/json",
}
data = {
"prompts": all_formatted_prompts,
"temperature": temperature,
"max_tokens": max_tokens,
}
response = requests.post(self._url, headers=headers, data=json.dumps(data))
# Extract the "outputs" as a JSON array from the response
outputs = response.json()["outputs"]
rows = []
for index, response_content in enumerate(outputs):
row_generate_result = RowGenerateResult(
is_successful=True,
error_msg=None,
answer=response_content,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
prompts=all_formatted_prompts[index],
)
rows.append(row_generate_result)
return BatchGenerateResult(
num_rows=len(rows),
num_successful_rows=len(rows),
rows=rows,
is_successful=True,
error_msg=None,
)
# The model generator based on the openAI format implemented by the vLLM framework:
# Reference: https://vllm.readthedocs.io/en/latest/getting_started/quickstart.html#openai-compatible-server
class vLllmOpenAICompletionFormatModelGenerator(BaseModelGenerator):
def __init__(
self,
model_name,
url: str,
pat_token: str,
format_prompt_func: callable,
prompt_formatter: PromptTemplate,
batch_size: int = 1,
concurrency: int = 64,
) -> None:
"""
Args:
prompt_formatter (PromptTemplate): the prompt format to format the input dataframe into prompts row by row according to the column names
model_name (str): the model name
batch_size (int, optional): Batch size that will be used to run tasks. Defaults to 1, which means it's sequential.
"""
super().__init__(prompt_formatter, batch_size, concurrency)
# check the batch_size can only be 1 for this model generator
if batch_size != 1:
raise ValueError(
f"batch_size {batch_size} is not supported for {self.__class__.__name__}, only 1 is supported"
)
self._model_name = model_name
self._url = url
self._pat_token = pat_token
self._format_prompt_func = format_prompt_func
def _generate(
self, prompts: list, temperature: float, max_tokens=256, system_prompt=None
) -> BatchGenerateResult:
if temperature == 0.0:
top_p = 1
else:
top_p = 0.95
all_formatted_prompts = [
self._format_prompt_func(message=message, system_prompt_opt=system_prompt)
for message in prompts
]
import requests
import json
headers = {
"Authentication": f"Bearer {self._pat_token}",
"Content-Type": "application/json",
}
data = {
"model": self._model_name,
"prompt": all_formatted_prompts[0],
"temperature": temperature,
"max_tokens": max_tokens,
"top_p": top_p,
}
response = requests.post(self._url, headers=headers, data=json.dumps(data))
# Extract the "outputs" as a JSON array from the response
if "choices" not in response.json():
logging.error(
f"Error while generating output, status code {response.status_code}, text: {response.text}"
)
choices = response.json()["choices"]
rows = []
for index, choice in enumerate(choices):
response_content = choice["text"]
row_generate_result = RowGenerateResult(
is_successful=True,
error_msg=None,
answer=response_content,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
prompts=all_formatted_prompts[index],
)
rows.append(row_generate_result)
return BatchGenerateResult(
num_rows=len(rows),
num_successful_rows=len(rows),
rows=rows,
is_successful=True,
error_msg=None,
)
class vLllmLocalModelGenerator(BaseModelGenerator):
def __init__(
self,
hf_model_name,
format_prompt_func: callable,
prompt_formatter: PromptTemplate,
batch_size: int = 100,
concurrency: int = 1,
max_num_batched_tokens=None,
tensor_parallel_size=1,
trust_remote_code=False,
) -> None:
"""
Args:
prompt_formatter (PromptTemplate): the prompt format to format the input dataframe into prompts row by row according to the column names
model_name (str): the huggingface model name
"""
super().__init__(prompt_formatter, batch_size, concurrency)
from vllm import LLM
# check the batch_size can only be 1 for this model generator
if concurrency != 1:
raise ValueError(
f"concurrency {concurrency} is not supported for {self.__class__.__name__}, only 1 is supported"
)
self._hf_model_name = hf_model_name
self._format_prompt_func = format_prompt_func
if max_num_batched_tokens is None:
self._llm = LLM(model=hf_model_name, trust_remote_code=trust_remote_code, tensor_parallel_size=tensor_parallel_size)
else:
self._llm = LLM(
model=hf_model_name,
max_num_batched_tokens=max_num_batched_tokens,
trust_remote_code=trust_remote_code,
tensor_parallel_size=tensor_parallel_size
)
logger.info(f"Initialized vLllmLocalModelGenerator with model {hf_model_name}")
def _generate(
self, prompts: list, temperature: float, max_tokens=256, system_prompt=None
) -> BatchGenerateResult:
from vllm import SamplingParams
if temperature == 0.0:
top_p = 1
else:
top_p = 0.95
all_formatted_prompts = [
self._format_prompt_func(message=message, system_prompt_opt=system_prompt)
for message in prompts
]
sampling_params = SamplingParams(
temperature=temperature, top_p=top_p, max_tokens=max_tokens
)
outputs = self._llm.generate(all_formatted_prompts, sampling_params)
rows = []
for index, output in enumerate(outputs):
generated_text = output.outputs[0].text
row_generate_result = RowGenerateResult(
is_successful=True,
error_msg=None,
answer=generated_text,
temperature=temperature,
max_tokens=max_tokens,
top_p=top_p,
prompts=all_formatted_prompts[index],
)
rows.append(row_generate_result)
return BatchGenerateResult(
num_rows=len(rows),
num_successful_rows=len(rows),
rows=rows,
is_successful=True,
error_msg=None,
)
| [
"[]"
] |
2024-01-10 | databrickslabs/doc-qa | databricks~labs~doc_qa~llm_providers~anthropic_provider.py | import json
import time
import os
import requests
from tenacity import retry, stop_after_attempt, stop_after_delay, wait_fixed, retry_if_exception_type, retry_if_exception
from databricks.labs.doc_qa.logging_utils import logger
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
import logging
anthropic_api_key = os.getenv('ANTHROPIC_API_KEY')
def supress_httpx_logs():
# Get the logger for 'httpx'
logger = logging.getLogger('httpx')
# Set the log level to warning
logger.setLevel(logging.WARNING)
supress_httpx_logs()
def request_anthropic(prompt, temperature=0.0, model="claude-2", max_tokens_to_sample=300):
logger.debug(f"Calling anthropic API with model {model} using prompt: {prompt}")
anthropic = Anthropic(
api_key=anthropic_api_key,
)
completion = anthropic.completions.create(
model=model,
max_tokens_to_sample=max_tokens_to_sample,
temperature=temperature,
prompt=f"{HUMAN_PROMPT} {prompt} {AI_PROMPT}",
)
return completion.completion
| [
"PLACEHOLDER PLACEHOLDER PLACEHOLDER"
] |
2024-01-10 | databrickslabs/doc-qa | databricks~labs~doc_qa~evaluators~templated_evaluator.py | from dataclasses import dataclass
from typing import Any
from pandas import DataFrame
import concurrent.futures
import pandas as pd
from databricks.labs.doc_qa.llm_utils import PromptTemplate
from databricks.labs.doc_qa.llm_providers import openai_provider
from databricks.labs.doc_qa.llm_providers import anthropic_provider
import json
from databricks.labs.doc_qa.logging_utils import logger
from tenacity import retry, stop_after_attempt, retry_if_result, retry_if_exception
import re
from enum import Enum
import json
class ParameterType(Enum):
STRING = "string"
NUMBER = "number"
class ParameterDef:
def __init__(
self, name, type, display_name=None, description=None, regex_rule=None
):
self.name = name
self.type = ParameterType(type) if isinstance(type, str) else type
self.display_name = display_name or self.default_display_name()
self.description = description
self.regex_rule = (
regex_rule or self.default_regex()
) # use default if none provided
def __repr__(self):
return f"ParameterDef(name={self.name}, type={self.type}, description={self.description})"
def default_display_name(self):
"""Converts parameter name to human-readable format."""
words = self.name.split("_")
return " ".join([word.capitalize() for word in words])
def default_regex(self):
"""Generates a default regex rule based on display name."""
# Making it case-insensitive, accommodating spaces, and assuming that the parameter value comes after ':'.
pattern_name = re.escape(self.display_name)
pattern_name = pattern_name.replace(r"\ ", r"\s+")
return f"(?i){pattern_name}\s*:\s*(.*?)\n"
def extract(self, text):
"""Extracts the value from the text based on the regex rule."""
match = re.search(self.regex_rule, text)
if not match:
return None
value = match.group(1).strip()
if self.type == ParameterType.NUMBER:
try:
return float(value)
except ValueError:
return None
return value
class RetryPolicy:
def __init__(self, max_retry_on_invalid_result: int, max_retry_on_exception: int):
self.max_retry_on_invalid_result = max_retry_on_invalid_result
self.max_retry_on_exception = max_retry_on_exception
class DefaultRetryPolicy(RetryPolicy):
def __init__(self):
super().__init__(max_retry_on_invalid_result=3, max_retry_on_exception=3)
class NoRetryPolicy(RetryPolicy):
def __init__(self):
super().__init__(max_retry_on_invalid_result=0, max_retry_on_exception=0)
class RowInput:
"""
A RowInput object contains the input data for a single row in the evaluation dataframe.
"""
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
class RowEvalResult:
"""
A RowEvalResult object contains the evaluation result for a single row in the evaluation dataframe.
"""
def __init__(self, is_successful, error_msg, **kwargs):
self.is_successful = is_successful
self.error_msg = error_msg
for key, value in kwargs.items():
setattr(self, key, value)
class EvalResult:
num_rows: int
num_successful_rows: int
rows: list
def __init__(self, num_rows, num_successful_rows, rows, **kwargs):
self.num_rows = num_rows
self.num_successful_rows = num_successful_rows
self.rows = rows
for key, value in kwargs.items():
setattr(self, key, value)
def to_dataframe(self):
# Convert the rows to a dataframe
row_dicts = [row.__dict__ for row in self.rows]
eval_result_df = pd.DataFrame(row_dicts)
return eval_result_df
def summary(self):
summary_str = ""
# Traverse the kwargs of the EvalResult object
for key, value in self.__dict__.items():
# Skip the rows attribute
if key == "rows":
continue
# Add the key-value pair to the string
summary_str += f"{key}: {value}\n"
return summary_str
# Define the base evaluator class
class BaseLlmEvaluator:
def __init__(
self,
grading_prompt_tempate: PromptTemplate,
input_columns: list,
model: str,
temperature: float,
openai_function: dict = None,
output_parameters: list = None,
system_prompt_template: PromptTemplate = None,
retry_policy: RetryPolicy = DefaultRetryPolicy(),
):
# either openai function or output_extract_regex_dict should be provided
if openai_function is None and output_parameters is None:
raise ValueError(
"Either openai_function or output_parameters should be provided."
)
# if output_parameters is provided, then openai_function should not be provided
if openai_function is not None and output_parameters is not None:
raise ValueError(
"Only one of openai_function and output_parameters should be provided."
)
if openai_function is not None:
self.openai_function = openai_function
self.output_parameters = self.extract_parameters(self.openai_function)
else:
self.output_parameters = output_parameters
self.grading_prompt_tempate = grading_prompt_tempate
self.system_prompt_template = system_prompt_template
# Check all input_columns have been included in grading_prompt_tempate's variables
for input_column in input_columns:
if input_column not in self.grading_prompt_tempate.variables:
raise ValueError(
f"Input column '{input_column}' not found in grading prompt template variables."
)
self.input_columns = input_columns
self.model = model
self.temperature = temperature
self.retry_policy = retry_policy
def extract_parameters(self, function_json):
parameters = function_json.get("parameters", {}).get("properties", {})
parameter_defs = []
for name, parameter in parameters.items():
if parameter.get("type") in ["number", "integer", "float"]:
parameter_type = ParameterType.NUMBER
else:
parameter_type = ParameterType.STRING
description = parameter.get("description")
parameter_defs.append(
ParameterDef(name=name, type=parameter_type, description=description)
)
return parameter_defs
def grade_row_retry_wrapper(self, row_input: RowInput) -> RowEvalResult:
# Apply the retry policy for invalid result
_retry_policy_invalid = retry(
stop=stop_after_attempt(self.retry_policy.max_retry_on_invalid_result),
retry=retry_if_result(lambda result: result is None),
reraise=True,
)
# Apply the retry policy for exception
_retry_policy_exception = retry(
stop=stop_after_attempt(self.retry_policy.max_retry_on_exception),
retry=retry_if_exception(type(Exception)),
reraise=True,
)
return _retry_policy_exception(_retry_policy_invalid(self.grade_row))(row_input)
def grade_row(self, row_input: RowInput) -> RowEvalResult:
# This method should be implemented in the subclasses
raise NotImplementedError
def run_eval(
self, concurrency: int = 10, dataset_df: DataFrame = None, catch_error=True
) -> EvalResult:
rows = []
with concurrent.futures.ThreadPoolExecutor(max_workers=concurrency) as executor:
future_to_row = {
executor.submit(self.grade_row_retry_wrapper, RowInput(**row)): row
for index, row in dataset_df.iterrows()
}
for future in concurrent.futures.as_completed(future_to_row):
# Include the attribute and value for these variables in the result: [is_successful, error_msg] + output_parameters names + input_columns
expected_attributes = (
["is_successful", "error_msg"]
+ [parameter.name for parameter in self.output_parameters]
+ self.input_columns
)
original_row = future_to_row[future]
try:
result = future.result()
# filter for the expected attributes
result_dict = {
key: value
for key, value in result.__dict__.items()
if key in expected_attributes
}
# add the original row to the result, only on the input columns
result_dict.update(
{
key: value
for key, value in original_row.items()
if key in self.input_columns
}
)
logger.debug(f"Base Evaluator got result {result_dict} ")
# Translate into RowEvalResult
row_eval_result = RowEvalResult(**result_dict)
rows.append(row_eval_result)
except Exception as exc:
if catch_error:
logger.warn(
f"Encountered error while processing row, error: {exc}, original row {original_row}"
)
# print stack trace into the logger debug
logger.warn(f"Traceback: {exc.__traceback__}")
# Give is_successful=False and error_msg=str(exc) to the row, and None to the other attributes
row_eval_result = RowEvalResult(
is_successful=False,
error_msg=str(exc),
**{
key: None
for key in expected_attributes
if key not in ["is_successful", "error_msg"]
},
)
rows.append(row_eval_result)
else:
raise exc
num_rows = len(rows)
if num_rows == 0:
return EvalResult(num_rows=0, num_successful_rows=0, rows=[])
num_successful_rows = len([row for row in rows if row.is_successful])
eval_result = EvalResult(
num_rows=num_rows,
num_successful_rows=num_successful_rows,
rows=rows,
)
# Calculate the average value for all the output parameters whose type is ParameterType.NUMBER, and give it attribute name avg_{parameter name} for the EvalResult
for parameter in self.output_parameters:
if parameter.type == ParameterType.NUMBER:
# Get the values for this parameter
parameter_values = [
row.__dict__[parameter.name] for row in rows if row.is_successful
]
# Calculate the average value
if len(parameter_values) == 0:
avg_value = 0
else:
avg_value = sum(parameter_values) / len(parameter_values)
# Add the average value as an attribute to the EvalResult
setattr(eval_result, f"avg_{parameter.name}", avg_value)
return eval_result
class OpenAIEvaluator(BaseLlmEvaluator):
ALLOWED_MODEL_NAMES = ["gpt-4", "gpt-3.5-turbo-16k", "gpt-4-32k"]
# Override the constructor to only allow models from gpt-4, gpt-4-32k and gpt-3.5-turbo-16k
def __init__(
self,
model: str,
temperature: float,
grading_prompt_tempate: PromptTemplate,
input_columns: list,
openai_function: dict = None,
system_prompt_template: PromptTemplate = None,
retry_policy: RetryPolicy = DefaultRetryPolicy(),
openai_retry_timeout: int = 300,
):
if model not in self.ALLOWED_MODEL_NAMES:
raise ValueError(
f"Unsupported model {model} provided. Only gpt-4, gpt-4-32k and gpt-3.5-turbo-16k are supported."
)
self._openai_retry_timeout = openai_retry_timeout
super().__init__(
model=model,
temperature=temperature,
grading_prompt_tempate=grading_prompt_tempate,
input_columns=input_columns,
openai_function=openai_function,
system_prompt_template=system_prompt_template,
retry_policy=retry_policy,
)
def grade_row(self, row_input: RowInput) -> RowEvalResult:
if self.system_prompt_template is not None:
system_prompt = self.system_prompt_template.format(
**{key: getattr(row_input, key) for key in self.input_columns}
)
messages = [
{"role": "system", "content": system_prompt},
]
else:
messages = []
user_prompt = self.grading_prompt_tempate.format(
**{key: getattr(row_input, key) for key in self.input_columns}
)
messages.append({"role": "user", "content": user_prompt})
# For gpt-3.5-turbo-16k, putting everything into the system prompt will get it to not call function at around 50% chances
functions = [self.openai_function]
response_message = openai_provider.request_openai(
messages=messages,
functions=functions,
model=self.model,
temperature=self.temperature,
retry_timeout=self._openai_retry_timeout,
)
if "function_call" in response_message:
function_call_obj = response_message["function_call"]
arguments = json.loads(function_call_obj["arguments"])
# extract the output values from the arguments according to the output columns
output_values = [
arguments[output_parameter.name]
for output_parameter in self.output_parameters
]
row_eval_result = RowEvalResult(
is_successful=True,
error_msg=None,
**{
output_parameter.name: output_value
for output_parameter, output_value in zip(
self.output_parameters, output_values
)
},
)
logger.debug(f"Successfully got result {row_eval_result} ")
return row_eval_result
else:
logger.warning(
f"Retrying, OpenAI doesn't call function for row {row_input}, response_message: {response_message}"
)
return None
class AnthropicEvaluator(BaseLlmEvaluator):
ALLOWED_MODEL_NAMES = ["claude-1", "claude-2"]
# Override the constructor to only allow models from claude-1 and claude-2
def __init__(
self,
model: str,
temperature: float,
grading_prompt_tempate: PromptTemplate,
input_columns: list,
output_parameters: list = None,
retry_policy: RetryPolicy = DefaultRetryPolicy(),
):
if model not in self.ALLOWED_MODEL_NAMES:
raise ValueError(
f"Unsupported model {model} provided. Only claude-1 and claude-2 are supported."
)
super().__init__(
model=model,
temperature=temperature,
grading_prompt_tempate=grading_prompt_tempate,
input_columns=input_columns,
output_parameters=output_parameters,
retry_policy=retry_policy,
)
def grade_row(self, row_input: RowInput) -> RowEvalResult:
user_prompt = self.grading_prompt_tempate.format(
**{key: getattr(row_input, key) for key in self.input_columns}
)
response_message = anthropic_provider.request_anthropic(
prompt=user_prompt, temperature=self.temperature, model=self.model
)
# Add a new line to the response_message at the end
response_message += "\n"
logger.debug(f"Got response message {response_message}")
extracted_data = {}
for param_def in self.output_parameters:
extracted_data[param_def.name] = param_def.extract(response_message)
# Check if all the output parameters have been extracted
if None in extracted_data.values():
attributes_missing_values = [
key for key, value in extracted_data.items() if value is None
]
logger.warning(
f"We are missing value for some output parameters {attributes_missing_values}"
)
return None
else:
# If yes, return is_successful=True and error_msg=None
return RowEvalResult(is_successful=True, error_msg=None, **extracted_data)
| [] |
2024-01-10 | k-rt-k/KG-QnA | pipeline.py | import langchain
from langchain.pipelines import Pipeline
def extract_entities_and_relationships(text):
# Use an LLM to extract entities and relationships from the text.
"""
Extracts entities and relationships from the given text using Flan-T5.
Args:
text: The text from which to extract entities and relationships.
Returns:
A tuple of two lists, where the first list contains the extracted entities
and the second list contains the extracted relationships.
"""
flan_t5_model = AutoModelForSequenceClassification.from_pretrained("flan-t5-base")
tokenizer = AutoTokenizer.from_pretrained("flan-t5-base")
encoded_text = tokenizer(text, return_tensors="pt")
with torch.no_grad():
outputs = flan_t5_model(encoded_text)
predictions = outputs.logits.argmax(dim=-1)
entities = []
relationships = []
for i in range(len(predictions)):
if predictions[i] == 1:
entities.append(encoded_text.input_ids[i].item())
elif predictions[i] == 2:
relationships.append(encoded_text.input_ids[i].item())
print(entities, relationships)
return entities, relationships
def query_knowledge_graph(entities, relationships):
# Query the knowledge graph using the extracted entities and relationships.
pass
def generate_output(knowledge_graph_results):
# Generate output using the LLM and the knowledge graph query results.
pass
pipeline = Pipeline(
steps=[
extract_entities_and_relationships,
query_knowledge_graph,
generate_output,
]
)
# Run the pipeline.
text = "In which year did a thriller film release, featuring actors Jake Gyllenhaal and Rene Russo, with a title related to the world of art?"
output = pipeline.run(text)
# Use the output.
pass
| [] |
2024-01-10 | k-rt-k/KG-QnA | lm.py | ## code for loading and training the language model
import transformers
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
import torch
from torch.utils.data import DataLoader
import langchain
import pinecone
import time
import openai
from langchain.embeddings.openai import OpenAIEmbeddings
import pandas as pd
import os
from generate_graph import attr_to_num
def node_to_ans(nodename:str)->str:
return nodename.split(':')[1].replace('_',' ')
def dataset_to_csv(input_file:str='dataset.txt',output_file:str='dataset.csv')->None:
with open(input_file,'r') as f:
lines = f.readlines()
qlines = lines[::3]
answers = lines[1::3]
qlines = [q.strip() for q in qlines if q.strip()!='']
answers = [a.split(':')[1].strip() for a in answers if a.strip()!='']
df = pd.DataFrame({'queries':qlines,'answers':answers})
df.to_csv(output_file)
return
### somehow scrape dataset.txt for finetuning the lm ###
def get_lm_train_data(dataset_file:str='dataset.csv',add_desc:bool=False,save_to_csv:bool=True)->pd.DataFrame:
openaiapi_key = os.environ.get('OPENAI_KEY',None)
if openaiapi_key is None:
raise Exception("Set env OPENAI_KEY to your OpenAI Key")
client = openai.OpenAI(
api_key=openaiapi_key, # this is also the default, it can be omitted
)
df = pd.read_csv(dataset_file)
qlines = df['queries'].values.to_list()
if add_desc:
#desc_prompt =
responses = []
descriptions = []
# df['sparqls']= responses
# df['descs']= descriptions
raise NotImplementedError
else: ## no desc
prmpt = lambda query:f'''Given queries enclosed in arrows, convert them into the SPARQL language in order to query over a knowledge graph containing nodes for 'actor','director','movie', 'genre', 'year'. Each node name is prefixed by its type, and contains underscores instead of spaces. For example actor Michael Scott's node reads 'actor:Michael_Scott'. Each relation is one out of {' '.join(attr_to_num.keys())}, with the edge pointing in the appropriate direction.
You may think over your answer, but your final answer for each query must be enclosed in triple slashes '/// ///'.
The queries are :
{query}'''
responses = []
# give k queries at a time
k = 10
qb = ['\n'.join([f'<<<{q}>>>'for q in qlines[i:i+k]]) for i in range(0, len(qlines), k)]
for query in qb:
rp = client.completions.create(
model="davinci-002",
prompt=prmpt(query)
)
ans = rp.choices[0].text.split('///')[1::2]
responses += ans
df['sparqls']=responses
if save_to_csv:
df.to_csv(dataset_file)
return df
### an LM, when given a query, extracts entities and relationships from the query ###
class ParserLM:
def __init__(self,lm_name:str='flan-t5-base',tokenizer_name:str='flan-t5-base',finetune:bool=False,finetuned_path:str|None=None,desc:bool=False)->None:
self.lm = transformers.AutoModelForSeq2SeqLM.from_pretrained(lm_name)
self.tokenizer = transformers.AutoTokenizer.from_pretrained(tokenizer_name)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if finetune:
self.finetune('dataset.csv')
self.lm.eval()
self.lm.to(device)
self.tokenizer.to(device)
self.desc = desc
return
def finetune(self,data_path:str='dataset.csv')->None:
df = pd.read_csv(data_path)
if self.desc:
if 'descs' not in df.columns:
df = get_lm_train_data(data_path,add_desc=True,save_to_csv=True)
queries,responses,descs = df['queries'].values.tolist(),df['sparqls'].values.tolist(),df['descs'].values.tolist()
raise NotImplementedError
return
# no desc
if 'sparqls' not in df.columns:
df = get_lm_train_data(data_path,add_desc=False,save_to_csv=True)
queries,responses = df['queries'].values.tolist(),df['sparqls'].values.tolist()
## tokenize queries and responses and convert to dataloader###
queries = self.tokenizer(queries,return_tensors='pt',padding=True,truncation=True)
responses = self.tokenizer(responses,return_tensors='pt',padding=True,truncation=True)
data = torch.utils.data.TensorDataset(queries['input_ids'],queries['attention_mask'],responses['input_ids'],responses['attention_mask'])
train_data, eval_data = torch.utils.data.random_split(data, [int(len(data)*0.9), len(data) - int(len(data)*0.9)])
train_dataloader = DataLoader(train_data, sampler=None, batch_size=8)
eval_dataloader = DataLoader(eval_data, sampler=None, batch_size=8)
## train the model ##
trainer = transformers.Trainer(
model=self.lm,
args=transformers.TrainingArguments(
output_dir="./flan-t5-base-imdb-nodesc",
evaluation_strategy="epoch",
learning_rate=2e-5,
per_device_train_batch_size=8,
per_device_eval_batch_size=8,
num_train_epochs=1,
weight_decay=0.01,
load_best_model_at_end=True,
metric_for_best_model="eval_loss",
logging_dir="./logs",
),
train_dataset=train_dataloader,
eval_dataset=eval_dataloader,
tokenizer=self.tokenizer,
)
return
def parse(self,query:str)->str:
encoded_text = self.tokenizer(query, return_tensors="pt")
response = self.lm.generate(**encoded_text)
return self.tokenizer.decode(response[0], skip_special_tokens=True)
### if the rag model is to be extended with the description this will be needed ###
class Embedder: ## generate contextual embeddings and ids for given input texts, then return closest matches on queries
def __init__(self,index_name:str,emb_dim=1536)->bool:
self.index_name = index_name
new = False
if index_name not in pinecone.list_indexes():
pinecone.create_index(
index_name,
dimension=emb_dim,
metric='cosine'
)
while not pinecone.describe_index(index_name).status['ready']:
time.sleep(1)
new = True
self.index = pinecone.Index(index_name)
print(f"Index stats: {self.index.describe()}")
self.embed_model = OpenAIEmbeddings(model="text-embedding-ada-002")## this is the model we are using for now, change it later
return new
def push(self,text:str)->str:
raise NotImplementedError
pass
def compile(self)->None:
self.vectorstore = pinecone.Pinecone(
self.index, self.embed_model.embed_query, text_field
)
return
def query(self,q:str,top_k:int)->list[str]:
return self.vectorstore.similarity_search(q,k=top_k) | [] |
2024-01-10 | minosvasilias/gpt_index | gpt_index~composability~graph.py | """Composability graphs."""
import json
from typing import Any, Dict, List, Optional, Type, Union
from gpt_index.data_structs.data_structs import IndexStruct
from gpt_index.data_structs.struct_type import IndexStructType
from gpt_index.docstore import DocumentStore
from gpt_index.embeddings.base import BaseEmbedding
from gpt_index.embeddings.openai import OpenAIEmbedding
from gpt_index.indices.base import BaseGPTIndex
from gpt_index.indices.keyword_table.base import GPTKeywordTableIndex
from gpt_index.indices.list.base import GPTListIndex
from gpt_index.indices.prompt_helper import PromptHelper
from gpt_index.indices.query.query_runner import QueryRunner
from gpt_index.indices.query.schema import QueryConfig
from gpt_index.indices.registry import IndexRegistry
from gpt_index.indices.struct_store.sql import GPTSQLStructStoreIndex
from gpt_index.indices.tree.base import GPTTreeIndex
from gpt_index.indices.vector_store.faiss import GPTFaissIndex
from gpt_index.indices.vector_store.pinecone import GPTPineconeIndex
from gpt_index.indices.vector_store.qdrant import GPTQdrantIndex
from gpt_index.indices.vector_store.simple import GPTSimpleVectorIndex
from gpt_index.indices.vector_store.weaviate import GPTWeaviateIndex
from gpt_index.langchain_helpers.chain_wrapper import LLMPredictor
from gpt_index.response.schema import Response
# TMP: refactor query config type
QUERY_CONFIG_TYPE = Union[Dict, QueryConfig]
# this is a map from type to outer index class
# we extract the type_to_struct and type_to_query
# fields from the index class
DEFAULT_INDEX_REGISTRY_MAP: Dict[IndexStructType, Type[BaseGPTIndex]] = {
IndexStructType.TREE: GPTTreeIndex,
IndexStructType.LIST: GPTListIndex,
IndexStructType.KEYWORD_TABLE: GPTKeywordTableIndex,
IndexStructType.DICT: GPTFaissIndex,
IndexStructType.SIMPLE_DICT: GPTSimpleVectorIndex,
IndexStructType.WEAVIATE: GPTWeaviateIndex,
IndexStructType.PINECONE: GPTPineconeIndex,
IndexStructType.QDRANT: GPTQdrantIndex,
IndexStructType.SQL: GPTSQLStructStoreIndex,
}
def _get_default_index_registry() -> IndexRegistry:
"""Get default index registry."""
index_registry = IndexRegistry()
for index_type, index_class in DEFAULT_INDEX_REGISTRY_MAP.items():
index_registry.type_to_struct[index_type] = index_class.index_struct_cls
index_registry.type_to_query[index_type] = index_class.get_query_map()
return index_registry
def _safe_get_index_struct(
docstore: DocumentStore, index_struct_id: str
) -> IndexStruct:
"""Try get index struct."""
index_struct = docstore.get_document(index_struct_id)
if not isinstance(index_struct, IndexStruct):
raise ValueError("Invalid `index_struct_id` - must be an IndexStruct")
return index_struct
class ComposableGraph:
"""Composable graph."""
def __init__(
self,
docstore: DocumentStore,
index_registry: IndexRegistry,
index_struct: IndexStruct,
llm_predictor: Optional[LLMPredictor] = None,
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[BaseEmbedding] = None,
chunk_size_limit: Optional[int] = None,
) -> None:
"""Init params."""
self._docstore = docstore
self._index_registry = index_registry
# this represents the "root" index struct
self._index_struct = index_struct
self._llm_predictor = llm_predictor or LLMPredictor()
self._prompt_helper = prompt_helper or PromptHelper.from_llm_predictor(
self._llm_predictor, chunk_size_limit=chunk_size_limit
)
self._embed_model = embed_model or OpenAIEmbedding()
@classmethod
def build_from_index(self, index: BaseGPTIndex) -> "ComposableGraph":
"""Build from index."""
return ComposableGraph(
index.docstore,
index.index_registry,
# this represents the "root" index struct
index.index_struct,
llm_predictor=index.llm_predictor,
prompt_helper=index.prompt_helper,
embed_model=index.embed_model,
)
def query(
self,
query_str: str,
query_configs: Optional[List[QUERY_CONFIG_TYPE]],
verbose: bool = False,
) -> Response:
"""Query the index."""
# go over all the indices and create a registry
query_runner = QueryRunner(
self._llm_predictor,
self._prompt_helper,
self._embed_model,
self._docstore,
self._index_registry,
query_configs=query_configs,
verbose=verbose,
recursive=True,
)
return query_runner.query(query_str, self._index_struct)
def get_index(
self, index_struct_id: str, index_cls: Type[BaseGPTIndex], **kwargs: Any
) -> BaseGPTIndex:
"""Get index."""
index_struct = _safe_get_index_struct(self._docstore, index_struct_id)
return index_cls(
index_struct=index_struct,
docstore=self._docstore,
index_registry=self._index_registry,
**kwargs
)
@classmethod
def load_from_disk(cls, save_path: str, **kwargs: Any) -> "ComposableGraph":
"""Load index from disk.
This method loads the index from a JSON file stored on disk. The index data
structure itself is preserved completely. If the index is defined over
subindices, those subindices will also be preserved (and subindices of
those subindices, etc.).
Args:
save_path (str): The save_path of the file.
Returns:
BaseGPTIndex: The loaded index.
"""
with open(save_path, "r") as f:
result_dict = json.load(f)
# TODO: this is hardcoded for now, allow it to be specified by the user
index_registry = _get_default_index_registry()
docstore = DocumentStore.load_from_dict(
result_dict["docstore"], index_registry.type_to_struct
)
index_struct = _safe_get_index_struct(
docstore, result_dict["index_struct_id"]
)
return cls(docstore, index_registry, index_struct, **kwargs)
def save_to_disk(self, save_path: str, **save_kwargs: Any) -> None:
"""Save to file.
This method stores the index into a JSON file stored on disk.
Args:
save_path (str): The save_path of the file.
"""
out_dict: Dict[str, Any] = {
"index_struct_id": self._index_struct.get_doc_id(),
"docstore": self._docstore.serialize_to_dict(),
}
with open(save_path, "w") as f:
json.dump(out_dict, f)
| [] |
2024-01-10 | minosvasilias/gpt_index | gpt_index~langchain_helpers~sql_wrapper.py | """SQL wrapper around SQLDatabase in langchain."""
from typing import Any, Dict, List, Tuple
from langchain.sql_database import SQLDatabase as LangchainSQLDatabase
from sqlalchemy import MetaData, create_engine, insert
from sqlalchemy.engine import Engine
class SQLDatabase(LangchainSQLDatabase):
"""SQL Database.
Wrapper around SQLDatabase object from langchain. Offers
some helper utilities for insertion and querying.
See `langchain documentation <http://shorturl.at/mITZ0>`_ for more details:
Args:
*args: Arguments to pass to langchain SQLDatabase.
**kwargs: Keyword arguments to pass to langchain SQLDatabase.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Init params."""
super().__init__(*args, **kwargs)
self.metadata_obj = MetaData(bind=self._engine)
self.metadata_obj.reflect()
@property
def engine(self) -> Engine:
"""Return SQL Alchemy engine."""
return self._engine
@classmethod
def from_uri(cls, database_uri: str, **kwargs: Any) -> "SQLDatabase":
"""Construct a SQLAlchemy engine from URI."""
return cls(create_engine(database_uri), **kwargs)
def get_table_columns(self, table_name: str) -> List[dict]:
"""Get table columns."""
return self._inspector.get_columns(table_name)
def get_single_table_info(self, table_name: str) -> str:
"""Get table info for a single table."""
# same logic as table_info, but with specific table names
template = "Table '{table_name}' has columns: {columns}."
columns = []
for column in self._inspector.get_columns(table_name):
columns.append(f"{column['name']} ({str(column['type'])})")
column_str = ", ".join(columns)
table_str = template.format(table_name=table_name, columns=column_str)
return table_str
def insert_into_table(self, table_name: str, data: dict) -> None:
"""Insert data into a table."""
table = self.metadata_obj.tables[table_name]
stmt = insert(table).values(**data)
self._engine.execute(stmt)
def run_sql(self, command: str) -> Tuple[str, Dict]:
"""Execute a SQL statement and return a string representing the results.
If the statement returns rows, a string of the results is returned.
If the statement returns no rows, an empty string is returned.
"""
with self._engine.connect() as connection:
cursor = connection.exec_driver_sql(command)
if cursor.returns_rows:
result = cursor.fetchall()
return str(result), {"result": result}
return "", {}
| [
"Table '{table_name}' has columns: {columns}."
] |
2024-01-10 | minosvasilias/gpt_index | gpt_index~langchain_helpers~text_splitter.py | """Text splitter implementations."""
from dataclasses import dataclass
from typing import Callable, List, Optional
from langchain.text_splitter import TextSplitter
from gpt_index.utils import globals_helper
@dataclass
class TextSplit:
"""Text split with overlap.
Attributes:
text_chunk: The text string.
num_char_overlap: The number of overlapping characters with the previous chunk.
"""
text_chunk: str
num_char_overlap: int
class TokenTextSplitter(TextSplitter):
"""Implementation of splitting text that looks at word tokens."""
def __init__(
self,
separator: str = " ",
chunk_size: int = 4000,
chunk_overlap: int = 200,
tokenizer: Optional[Callable] = None,
backup_separators: Optional[List[str]] = ["\n"],
):
"""Initialize with parameters."""
if chunk_overlap > chunk_size:
raise ValueError(
f"Got a larger chunk overlap ({chunk_overlap}) than chunk size "
f"({chunk_size}), should be smaller."
)
self._separator = separator
self._chunk_size = chunk_size
self._chunk_overlap = chunk_overlap
self.tokenizer = tokenizer or globals_helper.tokenizer
self._backup_separators = backup_separators
def _reduce_chunk_size(
self, start_idx: int, cur_idx: int, splits: List[str]
) -> int:
"""Reduce the chunk size by reducing cur_idx.
Return the new cur_idx.
"""
current_doc_total = len(
self.tokenizer(self._separator.join(splits[start_idx:cur_idx]))
)
while current_doc_total > self._chunk_size:
percent_to_reduce = (
current_doc_total - self._chunk_size
) / current_doc_total
num_to_reduce = int(percent_to_reduce * (cur_idx - start_idx)) + 1
cur_idx -= num_to_reduce
current_doc_total = len(
self.tokenizer(self._separator.join(splits[start_idx:cur_idx]))
)
return cur_idx
def _process_splits(self, splits: List[str], chunk_size: int) -> List[str]:
"""Process splits.
Specifically search for tokens that are too large for chunk size,
and see if we can separate those tokens more
(via backup separators if specified, or force chunking).
"""
new_splits = []
for split in splits:
num_cur_tokens = len(self.tokenizer(split))
if num_cur_tokens <= chunk_size:
new_splits.append(split)
else:
cur_splits = []
if self._backup_separators:
for sep in self._backup_separators:
if sep in split:
cur_splits = split.split(sep)
break
else:
cur_splits = [split]
cur_splits2 = []
for cur_split in cur_splits:
num_cur_tokens = len(self.tokenizer(cur_split))
if num_cur_tokens <= chunk_size:
cur_splits2.extend([cur_split])
else:
cur_split_chunks = [
cur_split[i : i + chunk_size]
for i in range(0, len(cur_split), chunk_size)
]
cur_splits2.extend(cur_split_chunks)
new_splits.extend(cur_splits2)
return new_splits
def split_text(self, text: str, extra_info_str: Optional[str] = None) -> List[str]:
"""Split incoming text and return chunks."""
text_slits = self.split_text_with_overlaps(text, extra_info_str=extra_info_str)
return [text_split.text_chunk for text_split in text_slits]
def split_text_with_overlaps(
self, text: str, extra_info_str: Optional[str] = None
) -> List[TextSplit]:
"""Split incoming text and return chunks with overlap size."""
if text == "":
return []
# NOTE: Consider extra info str that will be added to the chunk at query time
# This reduces the effective chunk size that we can have
if extra_info_str is not None:
# NOTE: extra 2 newline chars for formatting when prepending in query
num_extra_tokens = len(self.tokenizer(f"{extra_info_str}\n\n")) + 1
effective_chunk_size = self._chunk_size - num_extra_tokens
if effective_chunk_size <= 0:
raise ValueError(
"Effective chunk size is non positive after considering extra_info"
)
else:
effective_chunk_size = self._chunk_size
# First we naively split the large input into a bunch of smaller ones.
splits = text.split(self._separator)
splits = self._process_splits(splits, effective_chunk_size)
# We now want to combine these smaller pieces into medium size
# chunks to send to the LLM.
docs = []
start_idx = 0
cur_idx = 0
cur_total = 0
prev_idx = 0 # store the previous end index
while cur_idx < len(splits):
cur_token = splits[cur_idx]
num_cur_tokens = max(len(self.tokenizer(cur_token)), 1)
if num_cur_tokens > effective_chunk_size:
raise ValueError(
"A single term is larger than the allowed chunk size.\n"
f"Term size: {num_cur_tokens}\n"
f"Chunk size: {self._chunk_size}"
f"Effective chunk size: {effective_chunk_size}"
)
# If adding token to current_doc would exceed the chunk size:
# 1. First verify with tokenizer that current_doc
# 1. Update the docs list
if cur_total + num_cur_tokens > effective_chunk_size:
# NOTE: since we use a proxy for counting tokens, we want to
# run tokenizer across all of current_doc first. If
# the chunk is too big, then we will reduce text in pieces
cur_idx = self._reduce_chunk_size(start_idx, cur_idx, splits)
overlap = 0
# after first round, check if last chunk ended after this chunk begins
if prev_idx > 0 and prev_idx > start_idx:
overlap = sum([len(splits[i]) for i in range(start_idx, prev_idx)])
docs.append(
TextSplit(self._separator.join(splits[start_idx:cur_idx]), overlap)
)
prev_idx = cur_idx
# 2. Shrink the current_doc (from the front) until it is gets smaller
# than the overlap size
# NOTE: because counting tokens individually is an imperfect
# proxy (but much faster proxy) for the total number of tokens consumed,
# we need to enforce that start_idx <= cur_idx, otherwise
# start_idx has a chance of going out of bounds.
while cur_total > self._chunk_overlap and start_idx < cur_idx:
cur_num_tokens = max(len(self.tokenizer(splits[start_idx])), 1)
cur_total -= cur_num_tokens
start_idx += 1
# Build up the current_doc with term d, and update the total counter with
# the number of the number of tokens in d, wrt self.tokenizer
# we reassign cur_token and num_cur_tokens, because cur_idx
# may have changed
cur_token = splits[cur_idx]
num_cur_tokens = max(len(self.tokenizer(cur_token)), 1)
cur_total += num_cur_tokens
cur_idx += 1
overlap = 0
# after first round, check if last chunk ended after this chunk begins
if prev_idx > start_idx:
overlap = sum([len(splits[i]) for i in range(start_idx, prev_idx)]) + len(
range(start_idx, prev_idx)
)
docs.append(TextSplit(self._separator.join(splits[start_idx:cur_idx]), overlap))
return docs
def truncate_text(self, text: str) -> str:
"""Truncate text in order to fit the underlying chunk size."""
if text == "":
return ""
# First we naively split the large input into a bunch of smaller ones.
splits = text.split(self._separator)
splits = self._process_splits(splits, self._chunk_size)
start_idx = 0
cur_idx = 0
cur_total = 0
while cur_idx < len(splits):
cur_token = splits[cur_idx]
num_cur_tokens = max(len(self.tokenizer(cur_token)), 1)
if cur_total + num_cur_tokens > self._chunk_size:
cur_idx = self._reduce_chunk_size(start_idx, cur_idx, splits)
break
cur_total += num_cur_tokens
cur_idx += 1
return self._separator.join(splits[start_idx:cur_idx])
| [] |
2024-01-10 | minosvasilias/gpt_index | gpt_index~readers~obsidian.py | """Obsidian reader class.
Pass in the path to an Obsidian vault and it will parse all markdown
files into a List of Documents,
with each Document containing text from under an Obsidian header.
"""
import os
from pathlib import Path
from typing import Any, List
from langchain.docstore.document import Document as LCDocument
from gpt_index.readers.base import BaseReader
from gpt_index.readers.file.markdown_parser import MarkdownParser
from gpt_index.readers.schema.base import Document
class ObsidianReader(BaseReader):
"""Utilities for loading data from an Obsidian Vault.
Args:
input_dir (str): Path to the vault.
"""
def __init__(self, input_dir: str, verbose: bool = False):
"""Init params."""
self.verbose = verbose
self.input_dir = Path(input_dir)
def load_data(self, *args: Any, **load_kwargs: Any) -> List[Document]:
"""Load data from the input directory."""
docs: List[str] = []
for (dirpath, dirnames, filenames) in os.walk(self.input_dir):
dirnames[:] = [d for d in dirnames if not d.startswith(".")]
for filename in filenames:
if filename.endswith(".md"):
filepath = os.path.join(dirpath, filename)
content = MarkdownParser().parse_file(Path(filepath))
docs.extend(content)
return [Document(d) for d in docs]
def load_langchain_documents(self, **load_kwargs: Any) -> List[LCDocument]:
"""Load data in LangChain document format."""
docs = self.load_data(**load_kwargs)
return [d.to_langchain_format() for d in docs]
| [] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.