date_collected
stringclasses 1
value | repo_name
stringlengths 6
116
| file_name
stringlengths 2
220
| file_contents
stringlengths 13
357k
| prompts
sequence |
---|---|---|---|---|
2024-01-10 | tencent-ailab/TPolicies | tpolicies~ops.py | """ operations extension. in style of tf.nn & tf ops"""
import numpy as np
import tensorflow as tf
INF = 1e20
def mask_logits(logits, mask):
neginf = tf.zeros_like(logits) - INF
logits = tf.where(mask, logits, neginf)
return logits
def mask_embed(embed, mask):
mask = to_float32(tf.expand_dims(mask, axis=-1))
return tf.multiply(embed, mask)
def to_float32(t):
if t.dtype == tf.float32:
return t
return tf.cast(t, tf.float32)
def to_int32(t):
if t.dtype == tf.int32:
return t
return tf.cast(t, tf.int32)
def to_bool(t):
if t.dtype == tf.bool:
return t
return tf.cast(t, tf.bool)
def fetch_op(tensor, idx):
"""Fetch tensor given index
Args:
tensor: (bs, dim_a, dim_b, dim_c, ...)
idx: (bs,),
Returns:
A tensor in shape (bs, dim_b, dim_c, ...)
"""
return tf.gather_nd(tensor, tf.stack([tf.range(tf.shape(idx)[0]), idx], axis=1))
# rnn stuff
def batch_to_seq(inputs, nrollout, rollout_len, flat=False):
""" Convert a Tensor to a Tensor Sequence (list of Tensors).
Borrowed and modified from openai/baselines
Args:
inputs: (nrollout*rollout_len, d1, d2, ...)
Returns:
A list of Tensors, length rollout_len, each Tensor sized
(nrollout, d1, d2, ...)
"""
if flat:
inputs = tf.reshape(inputs, [nrollout, rollout_len])
else:
inputs = tf.reshape(inputs, [nrollout, rollout_len, -1])
return [tf.squeeze(v, [1]) for v in
tf.split(axis=1, num_or_size_splits=rollout_len, value=inputs)]
def seq_to_batch(inputs: list, flat=False):
""" Convert a Tensor Sequence (list of tensors) to a Tensor.
Borrowed and modified from openai/baselines
Args:
inputs: a list, length rollout_len. Each Tensor sized
(nrollout, d1, d2, ...)
flat: boolean, whether flatten as vector
Returns:
A Tensor sized (nrollout*rollout_len, d1, d2, ...)
"""
shape = inputs[0].get_shape().as_list()
if not flat:
assert len(shape) > 1, 'The rank ot the Tensor in inputs seq must be > 1'
h_dims = inputs[0].get_shape().as_list()[1:] # (d1, d2, ...)
return tf.reshape(tf.concat(axis=1, values=inputs), [-1] + h_dims)
else:
return tf.reshape(tf.stack(values=inputs, axis=1), [-1])
def one_step_lstm_op(c, h, x, wx, wh, b, forget_bias, x_nf=None, h_nf=None,
c_nf=None):
""" one step lstm op. """
xx = tf.matmul(x, wx)
xx = xx if x_nf is None else x_nf(xx)
hh = tf.matmul(h, wh)
hh = hh if h_nf is None else h_nf(hh)
z = xx + hh + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f + forget_bias)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f * c + i * u
cc = c if c_nf is None else c_nf(c)
h = o * tf.tanh(cc)
return c, h
def cat_sample_from_logits(logits, dtype=tf.int32):
logits_shape = logits.shape
if len(logits_shape) > 2:
sample = tf.reshape(tf.random.categorical(tf.reshape(
logits, shape=(-1, logits_shape[-1])), 1)[:, 0],
shape=logits_shape[:-1])
else:
sample = tf.random.categorical(logits, 1)[:, 0]
return tf.cast(sample, dtype=dtype)
def ortho_init(scale=1.0):
def _ortho_init(shape, dtype, partition_info=None):
#lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init | [] |
2024-01-10 | kumar045/ChatBot | ChatBot.py | import streamlit as st
from langchain.llms import CTransformers
from langchain import LLMChain, PromptTemplate
from langchain.memory import ConversationBufferMemory
from langchain.memory.chat_message_histories import RedisChatMessageHistory
from streamlit_chat import message
import requests
def download_file(url, destination):
try:
response = requests.get(url)
response.raise_for_status()
with open(destination, 'wb') as file:
file.write(response.content)
print("File downloaded successfully.")
except requests.exceptions.HTTPError as errh:
print ("Http Error:",errh)
except requests.exceptions.ConnectionError as errc:
print ("Error Connecting:",errc)
except requests.exceptions.Timeout as errt:
print ("Timeout Error:",errt)
except requests.exceptions.RequestException as err:
print ("OOps: Something Else",err)
url = "https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/resolve/main/llama-2-7b-chat.ggmlv3.q8_0.bin"
destination = "llama-2-7b-chat.ggmlv3.q8_0.bin"
download_file(url, destination)
llm = CTransformers(
model="llama-2-7b-chat.ggmlv3.q8_0.bin",
model_type="llama",
max_new_tokens = 512,
temperature = 0.5
)
message_history = RedisChatMessageHistory(
url="redis://localhost:6379/0", ttl=1000, session_id="my-session"
)
memory = ConversationBufferMemory(
memory_key="chat_history", chat_memory=message_history
)
template = """[INST] <<SYS>>
You are a helpful assistant. behave like human and start with the message Helllo
<</SYS>>
{chat_history}
Human: {question}
Assistant:
[/INST]
"""
prompt = PromptTemplate(input_variables=["chat_history", "question"], template=template)
chatgpt_chain = LLMChain(
llm=llm,
prompt=prompt,
verbose=True,
memory=memory
)
# Display conversation history using Streamlit messages
def display_conversation(history):
message(history["chat_history"], is_user=True)
st.title('Chatbot')
user_input = st.text_input("Type your message here:")
if st.button('Send'):
output = chatgpt_chain.predict(question=user_input)
chat_history=memory.load_memory_variables({})
display_conversation(chat_history)
| [
"question",
"chat_history",
"[INST] <<SYS>>\nYou are a helpful assistant. behave like human and start with the message Helllo\n<</SYS>>\n\n{chat_history}\nHuman: {question} \nAssistant:\n\n[/INST]\n\n"
] |
2024-01-10 | simon409/aidvi | backend~aidvi_functions.py | import streamlit as st
from dotenv import load_dotenv
from PyPDF2 import PdfReader
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings
from langchain.vectorstores import FAISS
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from langchain.llms import HuggingFaceHub
import os
from docx import Document
from youtube_transcript_api import YouTubeTranscriptApi as yta
import re
import requests
import re
import pickle
from bs4 import BeautifulSoup
import csv
load_dotenv()
openai_api_key=os.getenv("OPEN_AI_API_KEY")
conversation=""
def pdf__data(pdffile):
text = ""
#Read pdffile then stock everything in text
for pdf in pdffile:
pdf_reader = PdfReader(pdf)
for page in pdf_reader.pages:
text += page.extract_text()
return text
def docs_data(docfile):
text = ""
doc = Document(docfile)
for paragraph in doc.paragraphs:
text = text + os.linesep + paragraph.text
return text
def tube__data(tube_link):
link=tube_link.split("v=")[1]
text_mkhrbq= yta.get_transcript(link)
text=""
for key,value in text_mkhrbq.items():
if(key=='text'):
text+=value
return text
def webb__data(web_link):
link_request = requests.get(web_link)
all_content= BeautifulSoup(link_request.content, "html.parser")
text=all_content.get_text()
return(text)
def csv_data(csvv):
with open(csvv, newline='') as csvfile:
csvvv = csv.reader(csvfile)
text = '\n'.join(','.join(row) for row in csvvv)
return text
def get_text_chunks(text):
text_splitter = CharacterTextSplitter(
separator="\n",
chunk_size=1000,
chunk_overlap=200,
length_function=len
)
chunks = text_splitter.split_text(text)
print("All good in chunks")
return chunks
def get_vectorstore(text_chunks, directory_path):
embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
vectorstore = FAISS.from_texts(texts=text_chunks, embedding=embeddings)
print("All good in vectores")
with open(directory_path+"/vectorstore.pkl", "wb") as f:
pickle.dump(vectorstore, f)
def load_vectorstore(directory_path):
with open(directory_path+"/vectorstore.pkl", "rb") as f:
vectorstore = pickle.load(f)
return vectorstore
def get_conversation_chain(vectorstore):
llm = ChatOpenAI(openai_api_key=openai_api_key)
memory = ConversationBufferMemory(
memory_key='chat_history', return_messages=True)
conversation_chain = ConversationalRetrievalChain.from_llm(
llm=llm,
retriever=vectorstore.as_retriever(),
memory=memory
)
print("All good in Chain")
return conversation_chain
def handle_userinput(user_question,conversation):
response = conversation({'question': user_question})
print(response)
chat_history = response['chat_history']
print(chat_history)
def process_file(file_path):
# ndiro variables li ghadi ikon fihom text dyal each file
result=""
file_extension = os.path.splitext(file_path)[1].lower()
if file_extension == '.pdf':
result=pdf__data(file_path)
elif file_extension == '.docx':
result=docs_data(file_path)
elif file_extension == '.csv':
result=csv_data(file_path)
elif file_extension == '.txt':
with open(file_path, 'r') as file:
file_content = file.read()
if 'youtube' in file_content.lower():
result=tube__data(file_content)
else:
result=webb__data(file_content)
else:
print(f"Skipping {file_path} - unsupported file type")
return result
| [] |
2024-01-10 | dflatline/GPTeam | src~utils~logging.py | import atexit
import json
import logging
import os
import re
from datetime import datetime
from pathlib import Path
from typing import List
import openai
import pytz
def clean_json_string(json_string):
cleaned_string = re.sub(r"\\\'", r"'", json_string) # replace \' with '
cleaned_string = re.sub(
r'\\"', r'"', cleaned_string
) # replace \" with " on cleaned_string
return cleaned_string
def get_completion_data(text) -> List[str]:
pattern = r"(api_version=[^\s]+)|(data=(.+?)(?= [^\s]+=))|(message='(.+?)')"
matches = re.findall(pattern, text)
cleaned_matches = []
for match in matches:
for item in match:
if item != "":
cleaned_matches.append(item)
break
return cleaned_matches
def get_key_value(text):
pattern = r"(\w+)=((?:\"(?:\\\"|[^\"])*\")|(?:\'(?:\\\'|[^'])*\'))"
matches = re.findall(pattern, text)
result = {}
for match in matches:
key, value = match[0], match[1]
# Remove the outer quotes and unescape the inner quotes
if value.startswith('"'):
value = value[1:-1].replace('\\"', '"')
else:
value = value[1:-1].replace("\\'", "'")
result[key] = value
return result
class OpenAIFilter(logging.Filter):
def filter(self, record):
return "openai" in record.name
class JsonArrayFileHandler(logging.FileHandler):
def __init__(self, filename, mode="a", encoding=None, delay=False):
super().__init__(filename, mode, encoding, delay)
self.closed_properly = False
self.stream.write("[")
atexit.register(self.close)
def close(self):
self.acquire()
try:
if not self.closed_properly:
self.stream.write("]")
self.closed_properly = True
super().close()
finally:
self.release()
def emit(self, record):
if self.stream.tell() > 1:
self.stream.write(",\n")
super().emit(record)
class LoggingFilter(logging.Filter):
def filter(self, record):
print("logging filter", record)
return True
def init_logging():
openai.util.logger.setLevel(logging.WARNING)
open("src/web/logs/agent.txt", "w").close()
def get_agent_logger():
# Create a logger
logger = logging.getLogger("agent")
logger.setLevel(logging.INFO)
# Prevent log messages from being passed to the root logger or any other ancestor logger
logger.propagate = False
# Remove all handlers associated with the logger object.
for handler in logger.handlers[:]:
logger.removeHandler(handler)
# Create a file handler
Path("src/web/logs/").mkdir(parents=True, exist_ok=True)
handler = logging.FileHandler("src/web/logs/agent.txt")
handler.setLevel(logging.INFO)
# Add the handlers to the logger
logger.addHandler(handler)
return logger
agent_logger = get_agent_logger()
| [] |
2024-01-10 | rudeigerc/FastChat | fastchat~llm_judge~gen_api_answer.py | """Generate answers with GPT-4
Usage:
python3 gen_api_answer.py --model gpt-3.5-turbo
"""
import argparse
import json
import os
import time
import concurrent.futures
import openai
import shortuuid
import tqdm
from fastchat.llm_judge.common import (
load_questions,
temperature_config,
chat_compeletion_openai,
chat_compeletion_anthropic,
chat_compeletion_palm,
)
from fastchat.llm_judge.gen_model_answer import reorg_answer_file
from fastchat.model.model_adapter import get_conversation_template, ANTHROPIC_MODEL_LIST
def get_answer(
question: dict, model: str, num_choices: int, max_tokens: int, answer_file: str
):
assert (
args.force_temperature is not None and "required_temperature" in question.keys()
) == False
if args.force_temperature is not None:
temperature = args.force_temperature
elif "required_temperature" in question.keys():
temperature = question["required_temperature"]
elif question["category"] in temperature_config:
temperature = temperature_config[question["category"]]
else:
temperature = 0.7
choices = []
chat_state = None # for palm-2 model
for i in range(num_choices):
conv = get_conversation_template(model)
turns = []
for j in range(len(question["turns"])):
conv.append_message(conv.roles[0], question["turns"][j])
conv.append_message(conv.roles[1], None)
if model in ANTHROPIC_MODEL_LIST:
output = chat_compeletion_anthropic(
model, conv, temperature, max_tokens
)
elif model == "palm-2-chat-bison-001":
chat_state, output = chat_compeletion_palm(
chat_state, model, conv, temperature, max_tokens
)
else:
output = chat_compeletion_openai(model, conv, temperature, max_tokens)
conv.update_last_message(output)
turns.append(output)
choices.append({"index": i, "turns": turns})
# Dump answers
ans = {
"question_id": question["question_id"],
"answer_id": shortuuid.uuid(),
"model_id": model,
"choices": choices,
"tstamp": time.time(),
}
os.makedirs(os.path.dirname(answer_file), exist_ok=True)
with open(answer_file, "a") as fout:
fout.write(json.dumps(ans) + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--bench-name",
type=str,
default="mt_bench",
help="The name of the benchmark question set.",
)
parser.add_argument("--answer-file", type=str, help="The output answer file.")
parser.add_argument("--model", type=str, default="gpt-3.5-turbo")
parser.add_argument(
"--num-choices",
type=int,
default=1,
help="How many completion choices to generate.",
)
parser.add_argument(
"--force-temperature", type=float, help="Forcibly set a sampling temperature."
)
parser.add_argument(
"--max-tokens",
type=int,
default=1024,
help="The maximum number of new generated tokens.",
)
parser.add_argument(
"--question-begin",
type=int,
help="A debug option. The begin index of questions.",
)
parser.add_argument(
"--question-end", type=int, help="A debug option. The end index of questions."
)
parser.add_argument(
"--parallel", type=int, default=1, help="The number of concurrent API calls."
)
parser.add_argument("--openai-api-base", type=str, default=None)
args = parser.parse_args()
if args.openai_api_base is not None:
openai.api_base = args.openai_api_base
question_file = f"data/{args.bench_name}/question.jsonl"
questions = load_questions(question_file, args.question_begin, args.question_end)
if args.answer_file:
answer_file = args.answer_file
else:
answer_file = f"data/{args.bench_name}/model_answer/{args.model}.jsonl"
print(f"Output to {answer_file}")
with concurrent.futures.ThreadPoolExecutor(max_workers=args.parallel) as executor:
futures = []
for question in questions:
future = executor.submit(
get_answer,
question,
args.model,
args.num_choices,
args.max_tokens,
answer_file,
)
futures.append(future)
for future in tqdm.tqdm(
concurrent.futures.as_completed(futures), total=len(futures)
):
future.result()
reorg_answer_file(answer_file)
| [] |
2024-01-10 | rudeigerc/FastChat | fastchat~serve~api_provider.py | """Call API providers."""
import os
import random
import time
from fastchat.utils import build_logger
from fastchat.constants import WORKER_API_TIMEOUT
logger = build_logger("gradio_web_server", "gradio_web_server.log")
def openai_api_stream_iter(
model_name,
messages,
temperature,
top_p,
max_new_tokens,
api_base=None,
api_key=None,
):
import openai
is_azure = False
if "azure" in model_name:
is_azure = True
openai.api_type = "azure"
openai.api_version = "2023-07-01-preview"
else:
openai.api_type = "open_ai"
openai.api_version = None
openai.api_base = api_base or "https://api.openai.com/v1"
openai.api_key = api_key or os.environ["OPENAI_API_KEY"]
if model_name == "gpt-4-turbo":
model_name = "gpt-4-1106-preview"
# Make requests
gen_params = {
"model": model_name,
"prompt": messages,
"temperature": temperature,
"top_p": top_p,
"max_new_tokens": max_new_tokens,
}
logger.info(f"==== request ====\n{gen_params}")
if is_azure:
res = openai.ChatCompletion.create(
engine=model_name,
messages=messages,
temperature=temperature,
max_tokens=max_new_tokens,
stream=True,
)
else:
res = openai.ChatCompletion.create(
model=model_name,
messages=messages,
temperature=temperature,
max_tokens=max_new_tokens,
stream=True,
)
text = ""
for chunk in res:
if len(chunk["choices"]) > 0:
text += chunk["choices"][0]["delta"].get("content", "")
data = {
"text": text,
"error_code": 0,
}
yield data
def anthropic_api_stream_iter(model_name, prompt, temperature, top_p, max_new_tokens):
import anthropic
c = anthropic.Anthropic(api_key=os.environ["ANTHROPIC_API_KEY"])
# Make requests
gen_params = {
"model": model_name,
"prompt": prompt,
"temperature": temperature,
"top_p": top_p,
"max_new_tokens": max_new_tokens,
}
logger.info(f"==== request ====\n{gen_params}")
res = c.completions.create(
prompt=prompt,
stop_sequences=[anthropic.HUMAN_PROMPT],
max_tokens_to_sample=max_new_tokens,
temperature=temperature,
top_p=top_p,
model=model_name,
stream=True,
)
text = ""
for chunk in res:
text += chunk.completion
data = {
"text": text,
"error_code": 0,
}
yield data
def init_palm_chat(model_name):
import vertexai # pip3 install google-cloud-aiplatform
from vertexai.preview.language_models import ChatModel
project_id = os.environ["GCP_PROJECT_ID"]
location = "us-central1"
vertexai.init(project=project_id, location=location)
chat_model = ChatModel.from_pretrained(model_name)
chat = chat_model.start_chat(examples=[])
return chat
def palm_api_stream_iter(chat, message, temperature, top_p, max_new_tokens):
parameters = {
"temperature": temperature,
"top_p": top_p,
"max_output_tokens": max_new_tokens,
}
gen_params = {
"model": "palm-2",
"prompt": message,
}
gen_params.update(parameters)
logger.info(f"==== request ====\n{gen_params}")
response = chat.send_message(message, **parameters)
content = response.text
pos = 0
while pos < len(content):
# This is a fancy way to simulate token generation latency combined
# with a Poisson process.
pos += random.randint(10, 20)
time.sleep(random.expovariate(50))
data = {
"text": content[:pos],
"error_code": 0,
}
yield data
| [] |
2024-01-10 | teodorkasap/chat-langchain | query_data.py | """Create a ChatVectorDBChain for question/answering."""
from langchain.callbacks.manager import AsyncCallbackManager
from langchain.callbacks.tracers import LangChainTracer
from langchain.chains import ChatVectorDBChain, ConversationalRetrievalChain
from langchain.chains.chat_vector_db.prompts import (CONDENSE_QUESTION_PROMPT,
QA_PROMPT)
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.vectorstores.base import VectorStore
def get_chain(
vectorstore: VectorStore, question_handler, stream_handler, tracing: bool = False
) -> ConversationalRetrievalChain:
"""Create a ConversationalRetrievalChain for question/answering."""
# Construct a ConversationalRetrievalChain with a streaming llm for combine docs
# and a separate, non-streaming llm for question generation
manager = AsyncCallbackManager([])
question_manager = AsyncCallbackManager([question_handler])
stream_manager = AsyncCallbackManager([stream_handler])
if tracing:
tracer = LangChainTracer()
tracer.load_default_session()
manager.add_handler(tracer)
question_manager.add_handler(tracer)
stream_manager.add_handler(tracer)
question_gen_llm = OpenAI(
temperature=0,
verbose=True,
callback_manager=question_manager,
)
streaming_llm = OpenAI(
streaming=True,
callback_manager=stream_manager,
verbose=True,
temperature=0,
)
question_generator = LLMChain(
llm=question_gen_llm, prompt=CONDENSE_QUESTION_PROMPT, callback_manager=manager
)
doc_chain = load_qa_chain(
streaming_llm, chain_type="stuff", prompt=QA_PROMPT, callback_manager=manager
)
qa = ConversationalRetrievalChain(
# vectorstore=vectorstore,
retriever=vectorstore.as_retriever(),
combine_docs_chain=doc_chain,
question_generator=question_generator,
callback_manager=manager,
)
return qa
| [] |
2024-01-10 | LieZiWind/ScaledRoPE | longeval~utils.py | import json
import time
import os
import re
import sys
import argparse
import yaml
import openai
import tiktoken
import random
import itertools
import uuid
import torch
import transformers
import numpy as np
from transformers import logging
logging.set_verbosity_error()
from fastchat.model import load_model, get_conversation_template
HERE = __file__
REPO_DIR = os.path.join(os.path.dirname(HERE), "../")
def maybe_monkey_patch(args):
if "longchat" in args.model_name_or_path or args.interpolation_type is not None:
from longchat.train.monkey_patch.llama_condense_monkey_patch import replace_llama_with_condense
replace_llama_with_condense(args.longchat_ratio, args.interpolation_type)
if args.longchat_flash_attn:
from longchat.train.monkey_patch.llama_flash_attn_monkey_patch import replace_llama_attn_with_flash_attn
replace_llama_attn_with_flash_attn()
import transformers
def get_output_dir(args):
path = args.model_name_or_path
if path[-1] == "/":
path = path[:-1]
name = path.split("/")[-1]
output_dir = f"evaluation/{args.task}/predictions/{name}"
os.makedirs(output_dir, exist_ok=True)
print(f"output to {output_dir}")
return output_dir
def longeval_load_model(args):
if "mosaicml/mpt-7b-storywriter" in args.model_name_or_path:
# Adapt from: https://huggingface.co/mosaicml/mpt-7b-storywriter
filter_string()
config = transformers.AutoConfig.from_pretrained(args.model_name_or_path, trust_remote_code=True)
config.attn_config['attn_impl'] = 'triton'
model = transformers.AutoModelForCausalLM.from_pretrained(
args.model_name_or_path,
config=config,
torch_dtype=torch.bfloat16, # Load model weights in bfloat16
trust_remote_code=True
)
tokenizer = transformers.AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
elif "mosaicml/mpt-30b-chat" in args.model_name_or_path:
config = transformers.AutoConfig.from_pretrained(args.model_name_or_path, trust_remote_code=True)
model = transformers.AutoModelForCausalLM.from_pretrained(
args.model_name_or_path,
low_cpu_mem_usage=True,
trust_remote_code=True,
max_seq_len = 16384,
device_map = "auto",
max_memory= {i: f"{args.max_gpu_memory}GiB" for i in range(args.num_gpus)},
torch_dtype=torch.float16
)
model.attn_impl = "triton"
tokenizer = transformers.AutoTokenizer.from_pretrained(
args.model_name_or_path, trust_remote_code=True, use_fast=True, model_max_length=16384
)
model.config.eos_token_id = tokenizer.eos_token_id
model.config.pad_token_id = tokenizer.pad_token_id
elif "THUDM/chatglm2-6b" in args.model_name_or_path:
tokenizer = transformers.AutoTokenizer.from_pretrained(args.model_name_or_path, trust_remote_code=True)
model = transformers.AutoModel.from_pretrained(args.model_name_or_path, trust_remote_code=True).half().cuda()
model = model.eval()
elif "gpt-" in args.model_name_or_path:
tokenizer = None
model = None
elif "claude" in args.model_name_or_path:
tokenizer = None
model = None
else:
# Use fastchat load_model API
model, tokenizer = load_model(
args.model_name_or_path,
device="cuda",
num_gpus=args.num_gpus,
max_gpu_memory=f"{args.max_gpu_memory}GiB",
load_8bit=False,
cpu_offloading=False,
debug=False,
)
return model, tokenizer
def load_testcases(test_file):
with open(test_file, 'r') as json_file:
json_list = list(json_file)
test_cases = []
for test_case in json_list:
test_case = json.loads(test_case)
test_cases.append(test_case)
return test_cases
def test_topics_one_sample(model, tokenizer, test_case, output_file, idx, args):
prompt = test_case["prompt"]
topics = test_case["topics"]
if "mosaicml/mpt-7b-storywriter" in args.model_name_or_path:
from transformers import pipeline
pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, device='cuda:0')
# Use next word prediction to get storywriter answer
prompt += '\n ASSISTANT: The first topic is'
prompt_length = len(tokenizer(prompt).input_ids)
with torch.autocast('cuda', dtype=torch.bfloat16):
output = pipe(prompt, max_new_tokens=15, do_sample=True, use_cache=True)[0]['generated_text'][len(prompt):]
elif "THUDM/chatglm2-6b" in args.model_name_or_path:
prompt_length = len(tokenizer(prompt).input_ids)
output, _ = model.chat(tokenizer, prompt, history=[], max_length=16384)
output = [output]
elif "gpt-" in args.model_name_or_path:
prompt_length, output = retrieve_from_openai(prompt, args.model_name_or_path)
elif "claude" in args.model_name_or_path:
prompt_length, output = retrieve_from_anthropic(prompt, args.model_name_or_path)
else:
if "longchat" in args.model_name_or_path:
conv = get_conversation_template("vicuna")
else:
conv = get_conversation_template(args.model_name_or_path)
conv.append_message(conv.roles[0], prompt)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
input = tokenizer(prompt, return_tensors="pt")
prompt_length = input.input_ids.size()[-1]
# Disable use_cache if using longchat models with flash attention
use_cache = not ("longchat" in args.model_name_or_path and args.longchat_flash_attn)
output = model.generate(input.input_ids.to(model.device), max_new_tokens=50, use_cache=use_cache)[0]
output = output[prompt_length:]
output = tokenizer.batch_decode([output], skip_special_tokens=True)
summary = f"Label: {topics[0]}, Predict: {output}, prompt length: {prompt_length}".replace('\n', ' ')
print(summary)
if idx ==0:
with open(output_file, "w") as f:
f.write(summary)
f.write("\n")
else:
with open(output_file, "a+") as f:
f.write(summary)
f.write("\n")
return None, prompt_length, summary
def test_lines_one_sample(model, tokenizer, test_case, output_file, idx, args):
prompt = test_case["prompt"]
correct_line = test_case["correct_line"]
expected_number = test_case["expected_number"]
if "mosaicml/mpt-7b-storywriter" in args.model_name_or_path:
from transformers import pipeline
pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, device='cuda:0')
# Use next word prediction to get storywriter answer
prompt += f'Line <{test_case["random_idx"][0]}>: <REGISTER_CONTENT> is'
prompt_length = len(tokenizer(prompt).input_ids)
with torch.autocast('cuda', dtype=torch.bfloat16):
output = pipe(prompt, max_new_tokens=15, do_sample=True, use_cache=True)[0]['generated_text'][len(prompt):]
elif "THUDM/chatglm2-6b" in args.model_name_or_path:
prompt_length = len(tokenizer(prompt).input_ids)
output, _ = model.chat(tokenizer, prompt, history=[], max_length=16384)
elif "gpt-" in args.model_name_or_path:
prompt_length, output = retrieve_from_openai(prompt, args.model_name_or_path)
elif "claude" in args.model_name_or_path:
prompt_length, output = retrieve_from_anthropic(prompt, args.model_name_or_path)
else:
if "longchat" in args.model_name_or_path:
conv = get_conversation_template("vicuna")
else:
conv = get_conversation_template(args.model_name_or_path)
print(f"Using conversation template: {conv.name}")
if "mosaicml/mpt-30b-chat" in args.model_name_or_path:
prompt += f'Answer in the format <{test_case["random_idx"][0]}> <REGISTER_CONTENT>.'
conv.append_message(conv.roles[0], prompt)
conv.append_message(conv.roles[1], None)
prompt = conv.get_prompt()
input = tokenizer(prompt, return_tensors="pt")
prompt_length = input.input_ids.shape[-1]
# Disable use_cache if using longchat models with flash attention
use_cache = not ("longchat" in args.model_name_or_path and args.longchat_flash_attn)
output = model.generate(input.input_ids.to(model.device), max_new_tokens=100, use_cache=use_cache)[0]
output = output[prompt_length:]
output = tokenizer.batch_decode([output], skip_special_tokens=True)[0]
# Matching the last digit of the model output
response_number = re.findall("\d+", output)
if response_number is not None and len(response_number) > 0:
response_number = int(response_number[-1])
else:
print(f"Got unparsable result")
response_number = -1
summary = f"Label: {expected_number}, Predict: {output}, Parsed: {response_number}, prompt length: {prompt_length}".replace('\n', ' ')
print(summary)
if idx ==0:
with open(output_file, "w") as f:
f.write(summary)
f.write("\n")
else:
with open(output_file, "a+") as f:
f.write(summary)
f.write("\n")
return expected_number == response_number, prompt_length, summary
def token_counter(model_name, prompt):
if "gpt" in model_name:
token_size = len(tiktoken.encoding_for_model(model_name).encode(prompt))
print(f"Number of tokens: {token_size}")
else:
token_size = len(tiktoken.encoding_for_model(model_name).encode(prompt))
print(f"Number of tokens: {token_size} by using gpt tokenizer as default")
return token_size
def retrieve_from_openai(prompt, model_name, num_retries=10):
openai.api_key = os.environ["OPENAI_API_KEY"]
token_size = len(tiktoken.encoding_for_model(model_name).encode(prompt))
num_retries = 10
completion = None
for attempt in range(num_retries):
backoff = 2 ** (attempt)
try:
completion = openai.ChatCompletion.create(
model=model_name,
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": f"{prompt}"}
],
temperature = 0
)
break
except openai.error.APIError as e:
print(f"OpenAI API returned an API Error: {e}")
if attempt == num_retries - 1:
raise
except openai.error.APIConnectionError as e:
print(f"Failed to connect to OpenAI API: {e}")
if attempt == num_retries - 1:
raise
except openai.error.RateLimitError as e:
print(f"OpenAI API request exceeded rate limit: {e}")
if attempt == num_retries - 1:
raise
except openai.error.Timeout as e:
print(f"OpenAI API request timed out: {e}")
if attempt == num_retries - 1:
raise
except openai.error.InvalidRequestError as e:
print(f"Invalid request to OpenAI API: {e}")
if attempt == num_retries - 1:
raise
except openai.error.AuthenticationError as e:
print(f"Authentication error with OpenAI API: {e}")
if attempt == num_retries - 1:
raise
except openai.error.ServiceUnavailableError as e:
print(f"OpenAI API service unavailable: {e}")
if attempt == num_retries - 1:
raise
time.sleep(backoff)
if completion is None:
print(f"Failed to get response after {num_retries} retries")
return token_size, -1, "Rate limit"
response_line = completion.choices[0].message["content"]
return token_size, response_line
def retrieve_from_anthropic(prompt, model_name, num_retries=10):
import anthropic
from anthropic import HUMAN_PROMPT, AI_PROMPT
client = anthropic.Client(os.environ["ANTHROPIC_API_KEY"])
completion = client.completion(
model = model_name,
max_retries=num_retries,
max_tokens_to_sample=300,
temperature=0,
prompt=f"{HUMAN_PROMPT} {prompt} {AI_PROMPT}"
)
return -1, completion["completion"]
def filter_string():
class FilteredStream:
def __init__(self, original_stream, filter_string):
self.original_stream = original_stream
self.filter_string = filter_string
def write(self, message):
if self.filter_string not in message:
self.original_stream.write(message)
def flush(self):
self.original_stream.flush()
# Define the filter string to exclude specific content
filter_string = "The model 'MPTForCausalLM' is not supported for text-generation. Supported models are ['BartForCausalLM', 'BertLMHeadModel', 'BertGenerationDecoder', 'BigBirdForCausalLM', 'BigBirdPegasusForCausalLM', 'BioGptForCausalLM', 'BlenderbotForCausalLM', 'BlenderbotSmallForCausalLM', 'BloomForCausalLM', 'CamembertForCausalLM', 'CodeGenForCausalLM', 'CpmAntForCausalLM', 'CTRLLMHeadModel', 'Data2VecTextForCausalLM', 'ElectraForCausalLM', 'ErnieForCausalLM', 'GitForCausalLM', 'GPT2LMHeadModel', 'GPT2LMHeadModel', 'GPTBigCodeForCausalLM', 'GPTNeoForCausalLM', 'GPTNeoXForCausalLM', 'GPTNeoXJapaneseForCausalLM', 'GPTJForCausalLM', 'LlamaForCausalLM', 'MarianForCausalLM', 'MBartForCausalLM', 'MegaForCausalLM', 'MegatronBertForCausalLM', 'MvpForCausalLM', 'OpenAIGPTLMHeadModel', 'OPTForCausalLM', 'PegasusForCausalLM', 'PLBartForCausalLM', 'ProphetNetForCausalLM', 'QDQBertLMHeadModel', 'ReformerModelWithLMHead', 'RemBertForCausalLM', 'RobertaForCausalLM', 'RobertaPreLayerNormForCausalLM', 'RoCBertForCausalLM', 'RoFormerForCausalLM', 'Speech2Text2ForCausalLM', 'TransfoXLLMHeadModel', 'TrOCRForCausalLM', 'XGLMForCausalLM', 'XLMWithLMHeadModel', 'XLMProphetNetForCausalLM', 'XLMRobertaForCausalLM', 'XLMRobertaXLForCausalLM', 'XLNetLMHeadModel', 'XmodForCausalLM']."
# Create the filtered stream and replace sys.stdout with it
filtered_stream = FilteredStream(sys.stdout, filter_string)
sys.stdout = filtered_stream
def generate_topics_testcases(cfgs, output_dir):
conv_list = []
with open(os.path.join(REPO_DIR, "longeval/evaluation/topics/conversations.jsonl"), 'r') as json_file:
conv_obj_list = list(json_file)
for conv_obj in conv_obj_list:
conv_obj = json.loads(conv_obj)
conv_list.append(Conv(conv_obj["topic"], conv_obj["conversation"]))
# generate prompts for each num_topics
for num_topics in cfgs["num_topics"]:
prompt_list = []
for i in range(cfgs["num_test_samples"]):
prompt = Prompt(i)
indices = np.random.choice(list(range(len(conv_list))), size=num_topics, replace=False)
for idx in indices:
prompt.add_conv(conv_list[idx])
prompt_list.append(prompt)
prompt = None
# write to output file
avg_len = 0
output_path = os.path.join(output_dir, f"{num_topics}_topics.jsonl")
f = open(output_path, "w")
for i, p in enumerate(prompt_list):
pt = p.assemble_prompt()
curr_output = {"test_id": p.id,
"prompt": pt,
"topics": p.topic_list,
"prompt_length": -1}
json.dump(curr_output, f)
f.write("\n")
f.close()
def generate_lines_testcases(cfgs, output_dir):
for n in cfgs["num_lines"]:
output_path = os.path.join(output_dir, f"{n}_lines.jsonl")
f = open(output_path, "w")
for i in range(cfgs["num_test_samples"]):
prompt_header = "Below is a record of lines I want you to remember. " + \
"Each line begins with 'line <line index>' and contains " + \
"a '<REGISTER_CONTENT>' at the end of the line as a numerical value. " + \
"For each line index, memorize its corresponding <REGISTER_CONTENT>. At " + \
"the end of the record, I will ask you to retrieve the corresponding " + \
"<REGISTER_CONTENT> of a certain line index. Now the record start:\n\n"
lines = []
if cfgs["line_idx_opt"] == "LRT":
line_idxes = list(range(1, n + 1))
lines.extend([f"line {i}: REGISTER_CONTENT is <{random.randint(1, 50000)}>\n" for i in line_idxes])
random_idx = random.randint(1, n)
random_num = random_idx - 1
else:
line_idxes = generate_line_index(n, cfgs["line_idx_opt"])
lines.extend([f"line {i}: REGISTER_CONTENT is <{random.randint(1, 50000)}>\n" for i in line_idxes])
random_num = random.randint(0, len(line_idxes)-1)
random_idx = line_idxes[random_num]
expected_number, correct_line = retrieve_expected(lines, random_num)
lines.insert(0, f"{prompt_header}")
lines.insert(len(lines), f"\nNow the record is over. Tell me what is the <REGISTER_CONTENT> in line {random_idx}? I need the number.")
prompt = generate_prompt_from_lines(lines)
output = {
"random_idx": (random_idx, random_num), # this is the line to retrieve
"expected_number": expected_number,
"num_lines": n,
"correct_line": correct_line,
"prompt": prompt}
json.dump(output, f)
f.write("\n")
f.close()
class Conv:
"""a single conversation on a topic"""
def __init__(self, topic, content):
self.topic = topic
self.content = content
class Prompt:
"""the prompt used for testing, composed of multiple """
def __init__(self, id):
self.id = id
self.conv_list = []
self.topic_list = []
def add_conv(self, conv):
self.conv_list.append(conv)
self.topic_list.append(conv.topic)
def assemble_prompt(self):
record_prompt = "Below is a record of our previous conversation " + \
f"on {len(self.topic_list)} different topics. You are the ASSISTANT, and " + \
"I am the USER. At the beginning of each topic, the USER will say " + \
"'I would like to discuss the topic of <TOPIC>'. Memorize each " + \
"<TOPIC>. At the end of the record, I will ask you to retrieve the " + \
"first topic. Now the record start. "
for conv in self.conv_list:
record_prompt += conv.content
self.prompt = f"{record_prompt} Now " + \
"the record ends. What is the first topic(s) we discussed? Only give " + \
"me the topic name. Do not summarize yourself."
# self.prompt = "A chat between a curious user and an artificial intelligence " + \
# "assistant. The assistant gives helpful, detailed, and polite " + \
# f"answers to the user\'s questions. USER: {record_prompt} Now " + \
# f"the record ends. What is the {question_idx} topic(s) we discussed? Only give " + \
# "me the topic name(s) in the format of [<topic>, <topic>, ...]. Do not summarize yourself. Do not mention topic order. ASSISTANT:"
return self.prompt
def retrieve_cmd_args(): # setup program params from a given path to a yaml file
parser = argparse.ArgumentParser()
parser.add_argument('yaml_path', help='path to the yaml configuration')
args = parser.parse_args()
f = open(args.yaml_path, "r")
cfgs = yaml.load(f, Loader=yaml.CLoader)
print(yaml.dump(cfgs))
return cfgs
def generate_line_index(num_line, idx_opt):
if idx_opt == "LRT-ABCindex":
ingredients = ["A", "B", "C", "D", "E", "F"]
start = 6
comb = list(itertools.product(ingredients, repeat=start))
while len(comb) < num_line:
start += 1
comb = list(itertools.product(ingredients, repeat=start))
comb = ["".join(i) for i in comb]
return comb[:num_line]
elif idx_opt == "LRT-UUID":
comb = []
for i in range(num_line):
comb.append(str(uuid.uuid4()))
return comb
elif idx_opt == "LRT-NL":
import wonderwords
w = wonderwords.RandomWord()
adjs = w.random_words(num_line, include_categories=["adjective"])
nouns = w.random_words(num_line, include_categories=["noun"])
comb = []
for i, (adj, noun) in enumerate(zip(adjs, nouns)):
comb.append(f"{adj}-{noun}")
return comb
def retrieve_expected(lines, random_line_pos):
correct_line = lines[random_line_pos]
expected_number = re.search("<\d+>", correct_line)
if expected_number is not None:
expected_number = int(expected_number.group()[1:-1])
else:
print(f"Got unparsable line: {correct_line}")
return expected_number, correct_line
def generate_prompt_from_lines(lines):
prompt = ""
for l in lines:
prompt += l
return prompt | [
"<TOPIC>. At the end of the record, I will ask you to retrieve the ",
"'I would like to discuss the topic of <TOPIC>'. Memorize each ",
"first topic. Now the record start. ",
"Answer in the format <P> <REGISTER_CONTENT>.",
"None",
"Below is a record of our previous conversation ",
"Line <P>: <REGISTER_CONTENT> is",
"You are a helpful assistant.",
"[]",
"Below is a record of lines I want you to remember. Each line begins with 'line <line index>' and contains a '<REGISTER_CONTENT>' at the end of the line as a numerical value. For each line index, memorize its corresponding <REGISTER_CONTENT>. At the end of the record, I will ask you to retrieve the corresponding <REGISTER_CONTENT> of a certain line index. Now the record start:\n\n",
"I am the USER. At the beginning of each topic, the USER will say ",
"\n ASSISTANT: The first topic is"
] |
2024-01-10 | tleers/langchain | tests~integration_tests~document_loaders~test_bshtml.py | import sys
from pathlib import Path
import pytest
from langchain.document_loaders.html_bs import BSHTMLLoader
def test_bs_html_loader() -> None:
"""Test unstructured loader."""
file_path = Path(__file__).parent.parent / "examples/example.html"
loader = BSHTMLLoader(str(file_path))
docs = loader.load()
assert len(docs) == 1
metadata = docs[0].metadata
assert metadata["title"] == "Chew dad's slippers"
assert metadata["source"] == str(file_path)
@pytest.mark.skipif(
bool(sys.flags.utf8_mode) or not sys.platform.startswith("win"),
reason="default encoding is utf8",
)
def test_bs_html_loader_non_utf8() -> None:
"""Test providing encoding to BSHTMLLoader."""
file_path = Path(__file__).parent.parent / "examples/example-utf8.html"
with pytest.raises(UnicodeDecodeError):
BSHTMLLoader(str(file_path)).load()
loader = BSHTMLLoader(str(file_path), open_encoding="utf8")
docs = loader.load()
assert len(docs) == 1
metadata = docs[0].metadata
assert metadata["title"] == "Chew dad's slippers"
assert metadata["source"] == str(file_path)
| [] |
2024-01-10 | tleers/langchain | langchain~vectorstores~elastic_vector_search.py | """Wrapper around Elasticsearch vector database."""
from __future__ import annotations
import uuid
from abc import ABC
from typing import Any, Dict, Iterable, List, Optional
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_dict_or_env
from langchain.vectorstores.base import VectorStore
def _default_text_mapping(dim: int) -> Dict:
return {
"properties": {
"text": {"type": "text"},
"vector": {"type": "dense_vector", "dims": dim},
}
}
def _default_script_query(query_vector: List[float]) -> Dict:
return {
"script_score": {
"query": {"match_all": {}},
"script": {
"source": "cosineSimilarity(params.query_vector, 'vector') + 1.0",
"params": {"query_vector": query_vector},
},
}
}
# ElasticVectorSearch is a concrete implementation of the abstract base class
# VectorStore, which defines a common interface for all vector database
# implementations. By inheriting from the ABC class, ElasticVectorSearch can be
# defined as an abstract base class itself, allowing the creation of subclasses with
# their own specific implementations. If you plan to subclass ElasticVectorSearch,
# you can inherit from it and define your own implementation of the necessary methods
# and attributes.
class ElasticVectorSearch(VectorStore, ABC):
"""Wrapper around Elasticsearch as a vector database.
To connect to an Elasticsearch instance that does not require
login credentials, pass the Elasticsearch URL and index name along with the
embedding object to the constructor.
Example:
.. code-block:: python
from langchain import ElasticVectorSearch
from langchain.embeddings import OpenAIEmbeddings
embedding = OpenAIEmbeddings()
elastic_vector_search = ElasticVectorSearch(
elasticsearch_url="http://localhost:9200",
index_name="test_index",
embedding=embedding
)
To connect to an Elasticsearch instance that requires login credentials,
including Elastic Cloud, use the Elasticsearch URL format
https://username:password@es_host:9243. For example, to connect to Elastic
Cloud, create the Elasticsearch URL with the required authentication details and
pass it to the ElasticVectorSearch constructor as the named parameter
elasticsearch_url.
You can obtain your Elastic Cloud URL and login credentials by logging in to the
Elastic Cloud console at https://cloud.elastic.co, selecting your deployment, and
navigating to the "Deployments" page.
To obtain your Elastic Cloud password for the default "elastic" user:
1. Log in to the Elastic Cloud console at https://cloud.elastic.co
2. Go to "Security" > "Users"
3. Locate the "elastic" user and click "Edit"
4. Click "Reset password"
5. Follow the prompts to reset the password
The format for Elastic Cloud URLs is
https://username:password@cluster_id.region_id.gcp.cloud.es.io:9243.
Example:
.. code-block:: python
from langchain import ElasticVectorSearch
from langchain.embeddings import OpenAIEmbeddings
embedding = OpenAIEmbeddings()
elastic_host = "cluster_id.region_id.gcp.cloud.es.io"
elasticsearch_url = f"https://username:password@{elastic_host}:9243"
elastic_vector_search = ElasticVectorSearch(
elasticsearch_url=elasticsearch_url,
index_name="test_index",
embedding=embedding
)
Args:
elasticsearch_url (str): The URL for the Elasticsearch instance.
index_name (str): The name of the Elasticsearch index for the embeddings.
embedding (Embeddings): An object that provides the ability to embed text.
It should be an instance of a class that subclasses the Embeddings
abstract base class, such as OpenAIEmbeddings()
Raises:
ValueError: If the elasticsearch python package is not installed.
"""
def __init__(self, elasticsearch_url: str, index_name: str, embedding: Embeddings):
"""Initialize with necessary components."""
try:
import elasticsearch
except ImportError:
raise ValueError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
self.embedding = embedding
self.index_name = index_name
try:
es_client = elasticsearch.Elasticsearch(elasticsearch_url) # noqa
except ValueError as e:
raise ValueError(
f"Your elasticsearch client string is misformatted. Got error: {e} "
)
self.client = es_client
def add_texts(
self,
texts: Iterable[str],
metadatas: Optional[List[dict]] = None,
refresh_indices: bool = True,
**kwargs: Any,
) -> List[str]:
"""Run more texts through the embeddings and add to the vectorstore.
Args:
texts: Iterable of strings to add to the vectorstore.
metadatas: Optional list of metadatas associated with the texts.
refresh_indices: bool to refresh ElasticSearch indices
Returns:
List of ids from adding the texts into the vectorstore.
"""
try:
from elasticsearch.helpers import bulk
except ImportError:
raise ValueError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
requests = []
ids = []
embeddings = self.embedding.embed_documents(list(texts))
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
"vector": embeddings[i],
"text": text,
"metadata": metadata,
"_id": _id,
}
ids.append(_id)
requests.append(request)
bulk(self.client, requests)
if refresh_indices:
self.client.indices.refresh(index=self.index_name)
return ids
def similarity_search(
self, query: str, k: int = 4, **kwargs: Any
) -> List[Document]:
"""Return docs most similar to query.
Args:
query: Text to look up documents similar to.
k: Number of Documents to return. Defaults to 4.
Returns:
List of Documents most similar to the query.
"""
embedding = self.embedding.embed_query(query)
script_query = _default_script_query(embedding)
response = self.client.search(index=self.index_name, query=script_query)
hits = [hit["_source"] for hit in response["hits"]["hits"][:k]]
documents = [
Document(page_content=hit["text"], metadata=hit["metadata"]) for hit in hits
]
return documents
@classmethod
def from_texts(
cls,
texts: List[str],
embedding: Embeddings,
metadatas: Optional[List[dict]] = None,
**kwargs: Any,
) -> ElasticVectorSearch:
"""Construct ElasticVectorSearch wrapper from raw documents.
This is a user-friendly interface that:
1. Embeds documents.
2. Creates a new index for the embeddings in the Elasticsearch instance.
3. Adds the documents to the newly created Elasticsearch index.
This is intended to be a quick way to get started.
Example:
.. code-block:: python
from langchain import ElasticVectorSearch
from langchain.embeddings import OpenAIEmbeddings
embeddings = OpenAIEmbeddings()
elastic_vector_search = ElasticVectorSearch.from_texts(
texts,
embeddings,
elasticsearch_url="http://localhost:9200"
)
"""
elasticsearch_url = get_from_dict_or_env(
kwargs, "elasticsearch_url", "ELASTICSEARCH_URL"
)
try:
import elasticsearch
from elasticsearch.helpers import bulk
except ImportError:
raise ValueError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticearch`."
)
try:
client = elasticsearch.Elasticsearch(elasticsearch_url)
except ValueError as e:
raise ValueError(
"Your elasticsearch client string is misformatted. " f"Got error: {e} "
)
index_name = uuid.uuid4().hex
embeddings = embedding.embed_documents(texts)
dim = len(embeddings[0])
mapping = _default_text_mapping(dim)
# TODO would be nice to create index before embedding,
# just to save expensive steps for last
client.indices.create(index=index_name, mappings=mapping)
requests = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
request = {
"_op_type": "index",
"_index": index_name,
"vector": embeddings[i],
"text": text,
"metadata": metadata,
}
requests.append(request)
bulk(client, requests)
client.indices.refresh(index=index_name)
return cls(elasticsearch_url, index_name, embedding)
| [] |
2024-01-10 | tleers/langchain | tests~unit_tests~test_python.py | """Test functionality of Python REPL."""
from langchain.python import PythonREPL
from langchain.tools.python.tool import PythonREPLTool
_SAMPLE_CODE = """
```
def multiply():
print(5*6)
multiply()
```
"""
def test_python_repl() -> None:
"""Test functionality when globals/locals are not provided."""
repl = PythonREPL()
# Run a simple initial command.
repl.run("foo = 1")
assert repl.locals is not None
assert repl.locals["foo"] == 1
# Now run a command that accesses `foo` to make sure it still has it.
repl.run("bar = foo * 2")
assert repl.locals is not None
assert repl.locals["bar"] == 2
def test_python_repl_no_previous_variables() -> None:
"""Test that it does not have access to variables created outside the scope."""
foo = 3 # noqa: F841
repl = PythonREPL()
output = repl.run("print(foo)")
assert output == "name 'foo' is not defined"
def test_python_repl_pass_in_locals() -> None:
"""Test functionality when passing in locals."""
_locals = {"foo": 4}
repl = PythonREPL(_locals=_locals)
repl.run("bar = foo * 2")
assert repl.locals is not None
assert repl.locals["bar"] == 8
def test_functionality() -> None:
"""Test correct functionality."""
chain = PythonREPL()
code = "print(1 + 1)"
output = chain.run(code)
assert output == "2\n"
def test_functionality_multiline() -> None:
"""Test correct functionality for ChatGPT multiline commands."""
chain = PythonREPL()
tool = PythonREPLTool(python_repl=chain)
output = tool.run(_SAMPLE_CODE)
assert output == "30\n"
def test_function() -> None:
"""Test correct functionality."""
chain = PythonREPL()
code = "def add(a, b): " " return a + b"
output = chain.run(code)
assert output == ""
code = "print(add(1, 2))"
output = chain.run(code)
assert output == "3\n"
| [] |
2024-01-10 | tleers/langchain | langchain~document_loaders~googledrive.py | """Loader that loads data from Google Drive."""
# Prerequisites:
# 1. Create a Google Cloud project
# 2. Enable the Google Drive API:
# https://console.cloud.google.com/flows/enableapi?apiid=drive.googleapis.com
# 3. Authorize credentials for desktop app:
# https://developers.google.com/drive/api/quickstart/python#authorize_credentials_for_a_desktop_application # noqa: E501
# 4. For service accounts visit
# https://cloud.google.com/iam/docs/service-accounts-create
from pathlib import Path
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, root_validator, validator
from langchain.docstore.document import Document
from langchain.document_loaders.base import BaseLoader
SCOPES = ["https://www.googleapis.com/auth/drive.readonly"]
class GoogleDriveLoader(BaseLoader, BaseModel):
"""Loader that loads Google Docs from Google Drive."""
service_account_key: Path = Path.home() / ".credentials" / "keys.json"
credentials_path: Path = Path.home() / ".credentials" / "credentials.json"
token_path: Path = Path.home() / ".credentials" / "token.json"
folder_id: Optional[str] = None
document_ids: Optional[List[str]] = None
file_ids: Optional[List[str]] = None
@root_validator
def validate_folder_id_or_document_ids(
cls, values: Dict[str, Any]
) -> Dict[str, Any]:
"""Validate that either folder_id or document_ids is set, but not both."""
if values.get("folder_id") and (
values.get("document_ids") or values.get("file_ids")
):
raise ValueError(
"Cannot specify both folder_id and document_ids nor "
"folder_id and file_ids"
)
if (
not values.get("folder_id")
and not values.get("document_ids")
and not values.get("file_ids")
):
raise ValueError("Must specify either folder_id, document_ids, or file_ids")
return values
@validator("credentials_path")
def validate_credentials_path(cls, v: Any, **kwargs: Any) -> Any:
"""Validate that credentials_path exists."""
if not v.exists():
raise ValueError(f"credentials_path {v} does not exist")
return v
def _load_credentials(self) -> Any:
"""Load credentials."""
# Adapted from https://developers.google.com/drive/api/v3/quickstart/python
try:
from google.auth.transport.requests import Request
from google.oauth2 import service_account
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
except ImportError:
raise ImportError(
"You must run"
"`pip install --upgrade "
"google-api-python-client google-auth-httplib2 "
"google-auth-oauthlib`"
"to use the Google Drive loader."
)
creds = None
if self.service_account_key.exists():
return service_account.Credentials.from_service_account_file(
str(self.service_account_key), scopes=SCOPES
)
if self.token_path.exists():
creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
str(self.credentials_path), SCOPES
)
creds = flow.run_local_server(port=0)
with open(self.token_path, "w") as token:
token.write(creds.to_json())
return creds
def _load_sheet_from_id(self, id: str) -> List[Document]:
"""Load a sheet and all tabs from an ID."""
from googleapiclient.discovery import build
creds = self._load_credentials()
sheets_service = build("sheets", "v4", credentials=creds)
spreadsheet = sheets_service.spreadsheets().get(spreadsheetId=id).execute()
sheets = spreadsheet.get("sheets", [])
documents = []
for sheet in sheets:
sheet_name = sheet["properties"]["title"]
result = (
sheets_service.spreadsheets()
.values()
.get(spreadsheetId=id, range=sheet_name)
.execute()
)
values = result.get("values", [])
header = values[0]
for i, row in enumerate(values[1:], start=1):
metadata = {
"source": (
f"https://docs.google.com/spreadsheets/d/{id}/"
f"edit?gid={sheet['properties']['sheetId']}"
),
"title": f"{spreadsheet['properties']['title']} - {sheet_name}",
"row": i,
}
content = []
for j, v in enumerate(row):
title = header[j].strip() if len(header) > j else ""
content.append(f"{title}: {v.strip()}")
page_content = "\n".join(content)
documents.append(Document(page_content=page_content, metadata=metadata))
return documents
def _load_document_from_id(self, id: str) -> Document:
"""Load a document from an ID."""
from io import BytesIO
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaIoBaseDownload
creds = self._load_credentials()
service = build("drive", "v3", credentials=creds)
file = service.files().get(fileId=id).execute()
request = service.files().export_media(fileId=id, mimeType="text/plain")
fh = BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
try:
while done is False:
status, done = downloader.next_chunk()
except HttpError as e:
if e.resp.status == 404:
print("File not found: {}".format(id))
else:
print("An error occurred: {}".format(e))
text = fh.getvalue().decode("utf-8")
metadata = {
"source": f"https://docs.google.com/document/d/{id}/edit",
"title": f"{file.get('name')}",
}
return Document(page_content=text, metadata=metadata)
def _load_documents_from_folder(self) -> List[Document]:
"""Load documents from a folder."""
from googleapiclient.discovery import build
creds = self._load_credentials()
service = build("drive", "v3", credentials=creds)
results = (
service.files()
.list(
q=f"'{self.folder_id}' in parents",
pageSize=1000,
fields="nextPageToken, files(id, name, mimeType)",
)
.execute()
)
items = results.get("files", [])
returns = []
for item in items:
if item["mimeType"] == "application/vnd.google-apps.document":
returns.append(self._load_document_from_id(item["id"]))
elif item["mimeType"] == "application/vnd.google-apps.spreadsheet":
returns.extend(self._load_sheet_from_id(item["id"]))
elif item["mimeType"] == "application/pdf":
returns.extend(self._load_file_from_id(item["id"]))
else:
pass
return returns
def _load_documents_from_ids(self) -> List[Document]:
"""Load documents from a list of IDs."""
if not self.document_ids:
raise ValueError("document_ids must be set")
return [self._load_document_from_id(doc_id) for doc_id in self.document_ids]
def _load_file_from_id(self, id: str) -> List[Document]:
"""Load a file from an ID."""
from io import BytesIO
from googleapiclient.discovery import build
from googleapiclient.http import MediaIoBaseDownload
creds = self._load_credentials()
service = build("drive", "v3", credentials=creds)
file = service.files().get(fileId=id).execute()
request = service.files().get_media(fileId=id)
fh = BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
content = fh.getvalue()
from PyPDF2 import PdfReader
pdf_reader = PdfReader(BytesIO(content))
return [
Document(
page_content=page.extract_text(),
metadata={
"source": f"https://drive.google.com/file/d/{id}/view",
"title": f"{file.get('name')}",
"page": i,
},
)
for i, page in enumerate(pdf_reader.pages)
]
def _load_file_from_ids(self) -> List[Document]:
"""Load files from a list of IDs."""
if not self.file_ids:
raise ValueError("file_ids must be set")
docs = []
for file_id in self.file_ids:
docs.extend(self._load_file_from_id(file_id))
return docs
def load(self) -> List[Document]:
"""Load documents."""
if self.folder_id:
return self._load_documents_from_folder()
elif self.document_ids:
return self._load_documents_from_ids()
else:
return self._load_file_from_ids()
| [] |
2024-01-10 | chenyu-wang55/Data-Science-Project | NLP~src~LDA_analysis.py | from kneed import KneeLocator
from sklearn.cluster import KMeans
from pprint import pprint
from gensim.models import Phrases #gensim version 3.6.0
from gensim.corpora import Dictionary
from gensim.models import LdaModel
from matplotlib import pyplot as plt
from wordcloud import WordCloud, STOPWORDS
import matplotlib.colors as mcolors
import pyLDAvis.gensim #2.1.2
import nltk
from nltk.corpus import stopwords
import re
import string
import pandas as pd
import os
from sklearn.feature_extraction.text import TfidfVectorizer
from nltk.tokenize import word_tokenize
from utilities import utils, processing_utils
import pickle
from sklearn.decomposition import PCA
from gensim.models.coherencemodel import CoherenceModel
from sklearn.manifold import TSNE
DATASETS_DIR = '../datasets/'
DATASETS_MED_DIR = '../datasets/Meditation.csv'
DATASETS_LPT_DIR = '../datasets/LifeProTips.csv'
DATASETS_FSP_DIR = '../datasets/Friendship.csv'
DATASETS_DEPRESSION_DIR = '../datasets/Depression.csv'
DATASETS_TRAINING_DATASET_DIR = '../datasets/training_dataset.p'
DATASETS_COVID_DATASET_DIR = '../datasets/the-reddit-covid-dataset-comments.csv.zip'
DATASETS_X_TRAIN_DIR = '../datasets/x_train.p'
DATASETS_Y_TRAIN_DIR = '../datasets/y_train.p'
DATASETS_X_VAL_DIR = '../datasets/x_val.p'
DATASETS_Y_VAL_DIR = '../datasets/y_val.p'
DATASETS_X_TEST_DIR = '../datasets/x_test.p'
DATASETS_Y_TEST_DIR = '../datasets/y_test.p'
DATASETS_COVID_19_DATASET_DIR = '../datasets/covid_19_dataset.p'
DATASETS_X_TEST_CSV_DIR = '../datasets/x_test.csv'
DATASETS_Y_TEST_CSV_DIR = '../datasets/y_test.csv'
DATASETS_MANUAL_LABELED = '../datasets/data_manual_labeled.csv'
DATASETS_COVID_19_DATASET_CSV_DIR = '../datasets/covid_19_dataset.csv'
# directories to experiment results.
EXPERIMENTS_DIR = '../experiments'
EXPERIMENTS_BERT_RESULTS_TEST_100_PREDICTIONS_DIR = '../experiments/fine_tuning_bert_results/test_100_predictions.p'
EXPERIMENTS_BERT_RESULTS_COVID_19_PREDICTIONS_DIR = '../experiments/fine_tuning_bert_results/covid_19_predictions.p'
EXPERIMENTS_COVID_19_DATASET_PREDICTED_CSV_DIR = '../experiments/fine_tuning_bert_results/covid_19_dataset_predicted.csv'
EXPERIMENTS_SIMPLE_CLASSIFIER_DIR = '../experiments/simple_classifier_results'
EXPERIMENTS_LDA_ANALYSIS_DIR = '../experiments/LDA_analysis_results'
EXPERIMENTS_LDA_ANALYSIS_PYLDAVIS_DIR = '../experiments/LDA_analysis_results/pyLDAvis.html'
# path to pre_trained_model
PRETRAINED_MODEL_DIR = '../pre_trained_model'
ZIP_DIR = '../pre_trained_model/enwiki_dbow-20220306T033226Z-001.zip'
PRETRAINED_MODEL_TRAIN_DIR = '../pre_trained_model/train_doc2vec.p'
PRETRAINED_MODEL_TEST_DIR = '../pre_trained_model/test_doc2vec.p'
PRETRAINED_MODEL_COVID_19_DIR = '../pre_trained_model/covid_19_doc2vec.p'
PRETRAINED_MODEL_COVID_19_TFIDF_DIR = '../pre_trained_model/covid_19_tfidf.p'
PRETRAINED_MODEL_X_TFIDF_DIR = '../pre_trained_model/x_tfidf.p'
#Text Cleaning
nltk.download('stopwords')
nltk.download('wordnet')
nltk.download('punkt')
def clean_text(text, flg_stemm=False, flg_lemm=True):
global stopwords
text = text.values.tolist()
stopwords = stopwords.words('english') + stopwords.words('french') + stopwords.words('spanish')
newStopWords = ['people','you','covid','like','the','http','get']
stopwords.extend(newStopWords)
ps = nltk.stem.porter.PorterStemmer()
lem = nltk.stem.wordnet.WordNetLemmatizer()
text_cleaned = []
text_cleaned1 = []
for word in text:
word = word.lower()
word = re.sub('\[.*?\]', '', word)
word = re.sub('https?://\S+|www\.\S+', '', word)
word = re.sub('<.*?>+', '', word)
word = re.sub('[%s]' % re.escape(string.punctuation), '', word)
word = re.sub('\n', '', word)
word = re.sub('\w*\d\w*', '', word)
word = re.sub('http','',word)
word = re.sub('[^A-Za-z0-9.]+', ' ', word)
tokenized_word = nltk.word_tokenize(word)
# remove stopwords, numbers, word length less than 1
lst_text = [w for w in tokenized_word if
(len(w) > 2 and w not in stopwords)]
# Stemming (remove -ing, -ly, ...)
if flg_stemm == True:
lst_text = [ps.stem(word) for word in lst_text]
# Lemmatisation (convert the word into root word)
if flg_lemm == True:
lst_text = [lem.lemmatize(word) for word in lst_text]
lst_str = ' '.join(lst_text)
#print(lst_str)
text_cleaned.append(lst_text)
text_cleaned1.append(lst_str)
#print(text_cleaned)
return text_cleaned, text_cleaned1
def num_topic(X_cv):
#find the optimal number of clusters
number_of_clusters = 20
wcss = []
for i in range (1, number_of_clusters):
model = KMeans(n_clusters=i,
init='k-means++',
max_iter=20)
model.fit(X_cv)
wcss.append(model.inertia_)
kl = KneeLocator(range(1, 20), wcss, curve="convex", direction="decreasing")
print(kl.elbow)
plt.plot(range(1, number_of_clusters), wcss)
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
#plt.show()
path = EXPERIMENTS_LDA_ANALYSIS_DIR + '/optimal topic number'
plt.savefig(path)
return kl.elbow
def lda_model(X_LDA, n_topic):
# Add bigrams and trigrams to docs (only ones that appear 20 times or more).
bigram = Phrases(X_LDA, min_count=20)
for idx in range(len(X_LDA)):
for token in bigram[X_LDA[idx]]:
if '_' in token:
# Token is a bigram, add to document.
X_LDA[idx].append(token)
#print(X_LDA)
dictionary = Dictionary(X_LDA)
dictionary.filter_extremes(no_below=20, no_above=0.5)
corpus = [dictionary.doc2bow(doc) for doc in X_LDA]
print('Number of unique tokens: %d' % len(dictionary))
print('Number of documents: %d' % len(corpus))
#print(dictionary)
num_topics = n_topic
chunksize = 2000
passes = 20
iterations = 400
eval_every = None # Don't evaluate model perplexity, takes too much time.
# Make a index to word dictionary.
temp = dictionary[0] # This is only to "load" the dictionary.
id2word = dictionary.id2token
model = LdaModel(
corpus=corpus,
id2word=id2word,
chunksize=chunksize,
alpha='auto',
eta='auto',
iterations=iterations,
num_topics=num_topics,
passes=passes,
eval_every=eval_every
)
pprint(model.print_topics())
return corpus, dictionary, model
def get_coherence(text_lda, n_topic):
results = []
for n in n_topic:
corpus, dictionary, model = lda_model(text_lda, n)
texts = [[dictionary[word_id] for word_id, freq in doc] for doc in corpus]
cm = CoherenceModel(model=model, corpus=corpus, texts=texts, dictionary=dictionary, coherence='c_v')
coherence = cm.get_coherence()
results.append(coherence)
print(results)
plt.plot(n_topic, results)
plt.xlabel('Number of clusters')
plt.ylabel('coherence')
# plt.show()
path = EXPERIMENTS_LDA_ANALYSIS_DIR + '/coherence results'
plt.savefig(path)
return results
def plot_topic(model, num):
cols = [color for name, color in mcolors.TABLEAU_COLORS.items()] # more colors: 'mcolors.XKCD_COLORS'
cloud = WordCloud(stopwords=nltk.corpus.stopwords.words("english"),
background_color='white',
width=2500,
height=1800,
max_words=10,
colormap='tab10',
color_func=lambda *args, **kwargs: cols[i],
prefer_horizontal=1.0)
topics = model.show_topics(formatted=False)
fig, axes = plt.subplots(2, int(num/2), figsize=(10, 10), sharex=True, sharey=True)
for i, ax in enumerate(axes.flatten()):
fig.add_subplot(ax)
topic_words = dict(topics[i][1])
cloud.generate_from_frequencies(topic_words, max_font_size=300)
plt.gca().imshow(cloud)
plt.gca().set_title('Topic ' + str(i), fontdict=dict(size=16))
plt.gca().axis('off')
plt.subplots_adjust(wspace=0, hspace=0)
plt.axis('off')
plt.margins(x=0, y=0)
plt.tight_layout()
#plt.show()
path = EXPERIMENTS_LDA_ANALYSIS_DIR + '/top words in each topic'
plt.savefig(path)
def main():
covid_19_predicted = pd.read_csv(EXPERIMENTS_COVID_19_DATASET_PREDICTED_CSV_DIR)
#print(covid_19_predicted.info)
covid_19_depression = covid_19_predicted.loc[covid_19_predicted["predictions"] == 1]
#print(covid_19_depression.info)
text = covid_19_depression['body']
#print(X)
#clean the dataset
text_lda, text_tf =clean_text(text)
#transform to tfidf
if not os.path.exists(PRETRAINED_MODEL_COVID_19_TFIDF_DIR):
vectorizer = TfidfVectorizer()
covid_19_depression_tfidf = vectorizer.fit_transform(text_tf)
covid_19_depression_tfidf = covid_19_depression_tfidf.toarray()
pca = PCA(n_components=300)
covid_19_depression_pca = pca.fit_transform(covid_19_depression_tfidf)
pickle.dump(covid_19_depression_pca, open(PRETRAINED_MODEL_COVID_19_TFIDF_DIR, 'wb'))
covid_19_depression_pca = pickle.load(open(PRETRAINED_MODEL_COVID_19_TFIDF_DIR, 'rb'))
print(covid_19_depression_pca.shape)
#find the optimal number topic
n_topic = num_topic(covid_19_depression_pca)
# n_topic = 8
topics = [7, 8, 9, 10, 11, 12, 13, 14, 15]
#LDA model
corpus, dictionary, model = lda_model(text_lda, n_topic)
coherence = get_coherence(text_lda, topics)
plot_topic(model, n_topic)
vis = pyLDAvis.gensim.prepare(model, corpus, dictionary)
pyLDAvis.save_html(vis, EXPERIMENTS_LDA_ANALYSIS_PYLDAVIS_DIR)
pyLDAvis.show(vis)
if __name__ == '__main__':
main() | [] |
2024-01-10 | pmarcelino/camoes | tests~essay_language_classification.py | import os
import openai
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())
openai.api_key = os.environ["OPENAI_API_KEY"]
answers_path = '.././data/artificial/'
def read_txt_file(file_path):
"""
Reads the content of a text file and returns it as a string.
Args:
file_path (str): The path to the text file.
Returns:
str: The content of the text file as a string.
"""
with open(file_path, 'r', encoding='utf-8') as file:
file_content = file.read()
return file_content
def get_completion(prompt, model="gpt-3.5-turbo"): # "gpt-3.5-turbo", "gpt-4"
"""
Generates a completion response for a given prompt using the OpenAI ChatGPT model.
Args:
prompt (str): The prompt text to generate a completion for.
model (str, optional): The model to use for completion generation. Defaults to "gpt-3.5-turbo".
Returns:
str: The generated completion response.
"""
messages = [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=0,
)
return response.choices[0].message["content"]
def save_string_to_txt_file(answer_path, string):
"""
Saves the given string to a text file.
Args:
answer_path (str): The path to the output text file.
string (str): The string to be saved.
Returns:
None
"""
with open(answer_path, 'w', encoding='utf-8') as file:
file.write(string)
def process_essay_answer_language_classification(folder_path):
"""
Process files in the specified folder and generate language classification results for essay answers.
Args:
folder_path (str): The path to the folder containing the files.
Returns:
None
"""
tests_results = []
for file_name in os.listdir(folder_path):
if file_name.endswith('essay_answer.txt'):
answer_path = os.path.join(folder_path, file_name)
essay_answer = read_txt_file(answer_path)
prompt = "This is an essay answer. Say 'Portuguese' if the answer is in Portuguese, or 'Non-Portuguese' if the answer is in any other language." + "\n\n---\n\n" + essay_answer
response = get_completion(prompt)
tests_results.append(file_name + ": " + response)
print(file_name + ": " + "Done!")
# Save tests results to a text file
test_results_path = os.path.join('./', 'essay_language_classification_tests_results.log')
save_string_to_txt_file(test_results_path, '\n'.join(tests_results))
print('Tests run successful. Check report at', test_results_path)
process_essay_answer_language_classification(answers_path) | [
"This is an essay answer. Say 'Portuguese' if the answer is in Portuguese, or 'Non-Portuguese' if the answer is in any other language.\n\n---\n\nPLACEHOLDER"
] |
2024-01-10 | cheddarking/octoai-sdxl-demo | gcp-deploy~app_octo.py | # Dependencies:
# simpleaichat==0.2.0
# chainlit==0.6.2
# anthropic==0.3.8
# openai==0.27.8
# octoai-sdk==0.2.1
#
# environment: octo
import openai
import chainlit as cl
from chainlit.action import Action
from config import OCTO_SYSTEM_PROMPT
import re
import json
import asyncio
import io
from octoai_functions import generate_image
from simpleaichat import AIChat
from simpleaichat import AsyncAIChat
### UTILITY FUNCTIONS ###
def run_chatgpt(prompt):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{"role": "system", "content": "You are a helpful assistant that creates text-to-image prompts for AI image generation. Provide a single prompt for each request. Be as creative and imaginative as possible. "},
{"role": "user", "content": 'Write a prompt for an image of: a woman in a red dress eating a pizza at a restaurant'},
{"role": "assistant", "content": " A stylish woman in a red dress enjoying a slice of pizza and a glass of wine in a cozy restaurant."},
{"role": "user", "content": "Write a prompt for an image of: " + prompt}
]
)
text = response['choices'][0]['message']['content'].strip() #
return text
# Define the functions that will be used by the agent
functions=[
{
"name": "generate_image",
"description": "Use the image prompt you generated from the user's input to generate an image using Stable Diffusion",
"parameters": {
"type": "object",
"properties": {
"prompt": {
"type": "string",
"description": "The image prompt you generated from the user's input",
}
},
"required": ["prompt"],
},
},
]
async def octo_gen(prompt):
'''Generate an image using the prompt'''
image_response, image = generate_image(prompt)
return image_response, image
### OPENAI API SETUP ###
settings = {
"temperature": 0.7,
"max_tokens": 500,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
}
gpt3 = "gpt-3.5-turbo-0613"
gpt4 = "gpt-4-0613"
ai = AsyncAIChat(api_key="key",
system=OCTO_SYSTEM_PROMPT,
model=gpt3,
console=False,
params=settings,
)
@cl.on_chat_start
async def start_chat():
await cl.Avatar(
name="stability.ai",
url="https://avatars.githubusercontent.com/u/100950301?s=200&v=4",
).send()
await cl.Avatar(
name="leonardo.ai",
url="https://avatars.githubusercontent.com/u/115455698?s=200&v=4",
).send()
await cl.Avatar(
name="chatgpt",
url="https://avatars.githubusercontent.com/u/14957082?s=200&v=4",
).send()
await cl.Avatar(
name="octoai",
url="https://avatars.githubusercontent.com/u/53242847?s=200&v=4",
).send()
await cl.Message(
content="Welcome! Please enter your prompt to generate an image", author="octoai"
).send()
# # send a welcome message to the user and ask them to provide a prompt
# cl.Message(content="Welcome to the Stable Diffusion Image Prompter. Please provide a prompt for an image you would like to generate.").send()
@cl.on_message
async def main(message: str):
await cl.Message(author="octoai", content="").send()
completions = []
# response = ai(message, tools=[octo_gen])
# response = ai(message)
# openai_msg = cl.Message(author="chatgpt", content=response)
# openai_msg = cl.Message(author="chatgpt", content="")
# async for chunk in await ai.stream(message):
# response_td = chunk["response"] # dict contains "delta" for the new token and "response"
# # print(response_td)
# completions.append(response_td)
# await openai_msg.stream_token(response_td, is_sequence=True)
# response = ai.stream(message)
# async for chunk in response:
# response_td = chunk["response"] # dict contains "delta" for the new token and "response"
# # print(response_td)
# completions.append(response_td)
# for token in completions:
# openai_msg.stream_token(token)
# await openai_msg.send()
# cl.Message(author="stability.ai", content=response).send()
# # send a message informing the user that the agent is generating an image
# cl.Message(author="stability.ai", content="Now generating the image...").send()
# Generate an image using the prompt
# convert completions list to string
# response = ''.join(completions)
image_response, image = await octo_gen(message)
image_name = cl.user_session.get("generated_image")
if image:
elements = [
cl.Image(
content=image,
name=image_name,
display="inline",
)
]
await cl.Message(author="octoai", content=image_response, elements=elements).send()
# prompt = sd_prompter(message)
# send a message informing the user that the agent is creating a prompt
# cl.Message(author="stability.ai", content="Created a prompt for your image...").send()
# cl.Message(author="stability.ai", content=f"Created prompt: {prompt}").send()
# cl.user_session.set(image_response, image)
# cl.user_session.set("generated_image", name)
| [
"Write a prompt for an image of: PLACEHOLDER",
"You are a helpful assistant that creates text-to-image prompts for AI image generation. Provide a single prompt for each request. Be as creative and imaginative as possible. ",
" A stylish woman in a red dress enjoying a slice of pizza and a glass of wine in a cozy restaurant.",
"Write a prompt for an image of: a woman in a red dress eating a pizza at a restaurant"
] |
2024-01-10 | cheddarking/octoai-sdxl-demo | app_octo_cl_0.6.py | # Dependencies:
# simpleaichat==0.2.0
# chainlit==0.6.1
# anthropic==0.3.8
# openai==0.27.8
# octoai-sdk==0.2.0
#
# environment: octo
import openai
import chainlit as cl
from chainlit.action import Action
from config import OCTO_SYSTEM_PROMPT
import re
import json
import asyncio
import io
from octoai_functions import generate_image
from simpleaichat import AIChat
from simpleaichat import AsyncAIChat
### UTILITY FUNCTIONS ###
def run_chatgpt(prompt):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{"role": "system", "content": "You are a helpful assistant that creates text-to-image prompts for AI image generation. Provide a single prompt for each request. Be as creative and imaginative as possible. "},
{"role": "user", "content": 'Write a prompt for an image of: a woman in a red dress eating a pizza at a restaurant'},
{"role": "assistant", "content": " A stylish woman in a red dress enjoying a slice of pizza and a glass of wine in a cozy restaurant."},
{"role": "user", "content": "Write a prompt for an image of: " + prompt}
]
)
text = response['choices'][0]['message']['content'].strip() #
return text
# Define the functions that will be used by the agent
functions=[
{
"name": "generate_image",
"description": "Use the image prompt you generated from the user's input to generate an image using Stable Diffusion",
"parameters": {
"type": "object",
"properties": {
"prompt": {
"type": "string",
"description": "The image prompt you generated from the user's input",
}
},
"required": ["prompt"],
},
},
]
async def octo_gen(prompt):
'''Generate an image using the prompt'''
image_response, image = generate_image(prompt)
return image_response, image
### OPENAI API SETUP ###
settings = {
"temperature": 0.7,
"max_tokens": 500,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
}
gpt3 = "gpt-3.5-turbo-0613"
gpt4 = "gpt-4-0613"
ai = AsyncAIChat(api_key="key",
system=OCTO_SYSTEM_PROMPT,
model=gpt3,
console=False,
params=settings,
)
@cl.on_chat_start
async def start_chat():
await cl.Avatar(
name="stability.ai",
url="https://avatars.githubusercontent.com/u/100950301?s=200&v=4",
).send()
await cl.Avatar(
name="leonardo.ai",
url="https://avatars.githubusercontent.com/u/115455698?s=200&v=4",
).send()
await cl.Avatar(
name="chatgpt",
url="https://avatars.githubusercontent.com/u/14957082?s=200&v=4",
).send()
await cl.Avatar(
name="octoai",
url="https://avatars.githubusercontent.com/u/53242847?s=200&v=4",
).send()
await cl.Message(
content="Welcome! Please enter your prompt to generate an image", author="octoai"
).send()
# # send a welcome message to the user and ask them to provide a prompt
# cl.Message(content="Welcome to the Stable Diffusion Image Prompter. Please provide a prompt for an image you would like to generate.").send()
@cl.on_message
async def main(message: str):
await cl.Message(author="octoai", content="").send()
completions = []
# response = ai(message, tools=[octo_gen])
# response = ai(message)
# openai_msg = cl.Message(author="chatgpt", content=response)
# openai_msg = cl.Message(author="chatgpt", content="")
# async for chunk in await ai.stream(message):
# response_td = chunk["response"] # dict contains "delta" for the new token and "response"
# # print(response_td)
# completions.append(response_td)
# await openai_msg.stream_token(response_td, is_sequence=True)
# response = ai.stream(message)
# async for chunk in response:
# response_td = chunk["response"] # dict contains "delta" for the new token and "response"
# # print(response_td)
# completions.append(response_td)
# for token in completions:
# openai_msg.stream_token(token)
# await openai_msg.send()
# cl.Message(author="stability.ai", content=response).send()
# # send a message informing the user that the agent is generating an image
# cl.Message(author="stability.ai", content="Now generating the image...").send()
# Generate an image using the prompt
# convert completions list to string
# response = ''.join(completions)
image_response, image = await octo_gen(message)
image_name = cl.user_session.get("generated_image")
if image:
elements = [
cl.Image(
content=image,
name=image_name,
display="inline",
)
]
await cl.Message(author="octoai", content=image_response, elements=elements).send()
# prompt = sd_prompter(message)
# send a message informing the user that the agent is creating a prompt
# cl.Message(author="stability.ai", content="Created a prompt for your image...").send()
# cl.Message(author="stability.ai", content=f"Created prompt: {prompt}").send()
# cl.user_session.set(image_response, image)
# cl.user_session.set("generated_image", name)
| [
"Write a prompt for an image of: PLACEHOLDER",
"You are a helpful assistant that creates text-to-image prompts for AI image generation. Provide a single prompt for each request. Be as creative and imaginative as possible. ",
" A stylish woman in a red dress enjoying a slice of pizza and a glass of wine in a cozy restaurant.",
"Write a prompt for an image of: a woman in a red dress eating a pizza at a restaurant"
] |
2024-01-10 | cheddarking/octoai-sdxl-demo | app_octo_cl_0.2.py | # Dependencies:
# simpleaichat==0.2.0
# chainlit==0.2.111
# anthropic==0.3.8
# openai==0.27.8
# octoai-sdk==0.2.0
#
# environment: octo
import openai
import chainlit as cl
from chainlit.action import Action
from config import OCTO_SYSTEM_PROMPT
import re
import json
import asyncio
import io
from octoai_functions import generate_image
from simpleaichat import AIChat
from simpleaichat import AsyncAIChat
### UTILITY FUNCTIONS ###
def run_chatgpt(prompt):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-0613",
messages=[
{"role": "system", "content": "You are a helpful assistant that creates text-to-image prompts for AI image generation. Provide a single prompt for each request. Be as creative and imaginative as possible. "},
{"role": "user", "content": 'Write a prompt for an image of: a woman in a red dress eating a pizza at a restaurant'},
{"role": "assistant", "content": " A stylish woman in a red dress enjoying a slice of pizza and a glass of wine in a cozy restaurant."},
{"role": "user", "content": "Write a prompt for an image of: " + prompt}
]
)
text = response['choices'][0]['message']['content'].strip() #
return text
# Define the functions that will be used by the agent
functions=[
{
"name": "generate_image",
"description": "Use the image prompt you generated from the user's input to generate an image using Stable Diffusion",
"parameters": {
"type": "object",
"properties": {
"prompt": {
"type": "string",
"description": "The image prompt you generated from the user's input",
}
},
"required": ["prompt"],
},
},
]
def octo_gen(prompt):
'''Generate an image using the prompt'''
image_response, image = generate_image(prompt)
return image_response, image
### OPENAI API SETUP ###
settings = {
"temperature": 0.7,
"max_tokens": 500,
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
}
gpt3 = "gpt-3.5-turbo-0613"
gpt4 = "gpt-4-0613"
ai = AsyncAIChat(api_key="key",
system=OCTO_SYSTEM_PROMPT,
model=gpt3,
console=False,
params=settings,
)
@cl.on_chat_start
def start_chat():
# cl.Avatar(
# name="stability.ai",
# url="https://avatars.githubusercontent.com/u/100950301?s=200&v=4",
# ).send()
# cl.Avatar(
# name="leonardo.ai",
# url="https://avatars.githubusercontent.com/u/115455698?s=200&v=4",
# ).send()
# cl.Avatar(
# name="chatgpt",
# url="https://avatars.githubusercontent.com/u/14957082?s=200&v=4",
# ).send()
# cl.Avatar(
# name="octoai",
# url="https://avatars.githubusercontent.com/u/53242847?s=200&v=4",
# ).send()
cl.Message(
content="Welcome! Please enter your prompt to generate an image", author="octoai"
).send()
# # send a welcome message to the user and ask them to provide a prompt
# cl.Message(content="Welcome to the Stable Diffusion Image Prompter. Please provide a prompt for an image you would like to generate.").send()
@cl.on_message
def main(message: str):
cl.Message(author="octoai", content="").send()
completions = []
# response = ai(message, tools=[octo_gen])
# response = ai(message)
# openai_msg = cl.Message(author="chatgpt", content=response)
# openai_msg = cl.Message(author="chatgpt", content="")
# async for chunk in await ai.stream(message):
# response_td = chunk["response"] # dict contains "delta" for the new token and "response"
# # print(response_td)
# completions.append(response_td)
# await openai_msg.stream_token(response_td, is_sequence=True)
# response = ai.stream(message)
# async for chunk in response:
# response_td = chunk["response"] # dict contains "delta" for the new token and "response"
# # print(response_td)
# completions.append(response_td)
# for token in completions:
# openai_msg.stream_token(token)
# await openai_msg.send()
# cl.Message(author="stability.ai", content=response).send()
# # send a message informing the user that the agent is generating an image
# cl.Message(author="stability.ai", content="Now generating the image...").send()
# Generate an image using the prompt
# convert completions list to string
# response = ''.join(completions)
image_response, image = octo_gen(message)
image_name = cl.user_session.get("generated_image")
if image:
elements = [
cl.Image(
content=image,
name=image_name,
display="inline",
)
]
cl.Message(author="octoai", content=image_response, elements=elements).send()
# prompt = sd_prompter(message)
# send a message informing the user that the agent is creating a prompt
# cl.Message(author="stability.ai", content="Created a prompt for your image...").send()
# cl.Message(author="stability.ai", content=f"Created prompt: {prompt}").send()
# cl.user_session.set(image_response, image)
# cl.user_session.set("generated_image", name)
| [
"Write a prompt for an image of: PLACEHOLDER",
"You are a helpful assistant that creates text-to-image prompts for AI image generation. Provide a single prompt for each request. Be as creative and imaginative as possible. ",
" A stylish woman in a red dress enjoying a slice of pizza and a glass of wine in a cozy restaurant.",
"Write a prompt for an image of: a woman in a red dress eating a pizza at a restaurant"
] |
2024-01-10 | KaranpreetRaja/pipegen | api~pipelineManager.py | import weaviate
import json
import os
import requests
from dotenv import load_dotenv
import cohere
from cohere.custom_model_dataset import CsvDataset, InMemoryDataset, JsonlDataset
# load WEVIATE key and url and OpenAI key from .env file
load_dotenv()
weaviateKey = os.getenv('WEVIATE_KEY')
weaviateURL = os.getenv('WEVIATE_URL')
openaiKey = os.getenv('OPENAI_KEY')
cohereKey = os.getenv('COHERE_KEY')
class Pipeline:
def __init__(self, request):
self.request = request
self.name = request['name']
self.description = request['description']
self.author = request['author']
self.created = request['created']
self.last_updated = request['last_updated']
self.visibility_public = request['visibility_public']
self.has_upload = request['has_upload']
self.dynamic_upload = request['dynamic_upload'] if self.request['has_upload'] else False
self.uploads = self.handle_uploads(request)
self.model = self.handle_model(request)
self.co = cohere.Client(cohereKey)
def handle_uploads(self, request):
# Logic to handle and store uploads
uploads = []
for upload in request['uploads']:
current_upload = {
'name': upload['name'],
'type': upload['type'],
'content': upload['content'],
'reference': upload['reference']
}
uploads.append(current_upload)
return uploads
def handle_model(self, request):
model = request['model']
current_model = {
'type': model['type'],
'is_custom': model['is_custom'],
'train_file_format': model['train_file_format'] if model['is_custom'] else 'N/A',
'train_file': model['train_file'] if model['is_custom'] else 'N/A',
'has_test': model['has_test'] if model['is_custom'] else False,
'test_file': model['test_file'] if model['is_custom'] and model['has_test'] else 'N/A',
'generation': model['generation']
}
return [current_model]
def process_data(self, document_list):
'''
This function processes the data from each upload and adds it to a Weaviate vector database
'''
weaviate_client = weaviate.Client(
url = weaviateURL,
auth_client_secret=weaviate.AuthApiKey(api_key=weaviateKey),
additional_headers = {
"X-OpenAI-Api-Key": openaiKey,
}
)
weaviate_client.schema.get()
class_obj = {
"class": "Document",
"vectorizer": "text2vec-openai",
"moduleConfig": {
"text2vec-openai": {},
"generative-openai": {}
}
}
weaviate_client.schema.create_class(class_obj)
weaviate_client.batch.configure(batch_size=100) # Configure batch
with weaviate_client.batch as batch: # Initialize a batch process
for document in document_list: # Batch import data
batch.add_data_object(
data_object=document,
class_name="Document"
)
weaviate_client.batch.create() # Execute batch process
# save database to class
self.database = weaviate_client
def search_data(self, search_query):
search_result = self.database.query.get('Document').with_near_text(search_query).do()
documents = [result['chunk'] for result in search_result['data']['Get']['Document']]
return documents
def create_pipeline(self):
# logic to create the model in the database
# if custom model, call create_custom_model() first
if self.is_custom:
model = self.create_custom_model()
else:
# use the default model from cohere
model = self.co.get_model(self.model['type'])
def create_custom_model(self):
if self.model['train_file_format'] == 'csv':
dataset = CsvDataset(train_file=self.model['train_file'], delimiter=",")
elif self.model['train_file_format'] == 'jsonl':
dataset = JsonlDataset(train_file=self.model['train_file'])
# Add other file formats if necessary
finetune = self.co.create_custom_model(self.name, dataset=dataset, model_type=self.model['type'])
return finetune
def create_pipeline(self):
if self.model['is_custom']:
model_id = self.create_custom_model()
model = self.co.get_model(model_id)
else:
# use the default model from cohere
model = self.co.get_model(self.model['type'])
# add RAG capabilities
rag_model = self.add_rag_to_model(model)
return rag_model
def add_rag_to_model(self, model, prompt, documents):
rag_model = self.co.chat(
model_id=model,
message=prompt,
documents=documents,
connectors=[{"id": "web-search"}]
)
return rag_model
def call_rag_model(self, prompt):
response = self.pipeline.chat(
message=prompt,
documents=self.database.get_documents(),
)
return response
def export_as_json(self):
# TODO: implement good logic to export the pipeline as a JSON file
return self.request
def initialize_weaviate():
'''
This function processes the data from each upload and adds it to a Weaviate vector database
'''
client = weaviate.Client(
url = weaviateURL,
auth_client_secret=weaviate.AuthApiKey(api_key=weaviateKey),
additional_headers = {
"X-OpenAI-Api-Key": openaiKey,
}
)
print(client.schema.get())
initialize_weaviate() | [
"content"
] |
2024-01-10 | cliffweng/sandbox-conversant-lib | conversant~prompt_chatbot.py | # Copyright (c) 2022 Cohere Inc. and its affiliates.
#
# Licensed under the MIT License (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License in the LICENSE file at the top
# level of this repository.
import json
import logging
import os
from typing import Any, Dict
import cohere
import jsonschema
import conversant
from conversant.chatbot import Chatbot, Interaction
from conversant.prompts.chat_prompt import ChatPrompt
from conversant.prompts.prompt import Prompt
PERSONA_MODEL_DIRECTORY = f"{os.path.dirname(conversant.__file__)}/personas"
PERSONA_JSON_SCHEMA = {
"type": "object",
"properties": {
"chatbot_config": {
"type": "object",
"properties": {
"max_context_examples": {"type": "integer"},
"avatar": {"type": "string"},
},
},
"client_config": {
"type": "object",
"properties": {
"model": {"type": "string"},
"max_tokens": {"type": "integer"},
"temperature": {"type": "number"},
"frequency_penalty": {"type": "number"},
"presence_penalty": {"type": "number"},
"stop_sequences": {"type": "array"},
},
},
"prompt_config": {
"type": "object",
},
},
}
class PromptChatbot(Chatbot):
"""Use prompt templates and LLM generation to define a chatbot.
This bot makes no use of external knowledge sources.
"""
def __init__(
self,
client: cohere.Client,
prompt: Prompt,
persona_name: str = "",
chatbot_config: Dict[str, Any] = {},
client_config: Dict[str, Any] = {},
):
"""Enriches init by adding a prompt.
Args:
client (cohere.Client): Cohere client for API
prompt (Prompt): Prompt object to direct behavior.
persona_name (str, optional): Bot's persona name. Defaults to empty string.
chatbot_config: (Dict[str, Any], optional): Bot's chat config. Defaults to
empty dict.
client_config (Dict[str, Any], optional): Bot's client config. Defaults to
empty dict.
"""
super().__init__(client)
self.prompt = prompt
self.persona_name = persona_name
self.configure_chatbot(chatbot_config)
self.configure_client(client_config)
self.chat_history = []
self.prompt_history = [self.prompt.to_string()]
def __repr__(self) -> str:
return json.dumps(self.to_dict(), indent=4, default=str)
@property
def user_name(self):
"""
Returns:
str: The name of the user, defined in the prompt. Defaults to "User".
"""
if hasattr(self.prompt, "user_name"):
return self.prompt.user_name
else:
return "User"
@property
def bot_name(self):
"""
Returns:
str: The name of the chatbot, defined in the prompt. Defaults to
"PromptChatbot".
"""
if hasattr(self.prompt, "bot_name"):
return self.prompt.bot_name
else:
return "PromptChatbot"
@property
def latest_prompt(self) -> str:
"""Retrieves the latest prompt.
Returns:
str: The prompt most recently added to the prompt history.
"""
return self.prompt_history[-1]
def reply(self, query: str) -> Interaction:
"""Replies to a query given a chat history.
The reply is then generated directly from a call to a LLM.
Args:
query (str): A query passed to the prompt chatbot.
Returns:
Interaction: Dictionary of query and generated LLM response
"""
# The current prompt is assembled from the initial prompt,
# from the chat history with a maximum of max_context_examples,
# and from the current query
current_prompt = self.get_current_prompt(query)
# Make a call to Cohere's co.generate API
generated_object = self.co.generate(
model=self.client_config["model"],
prompt=current_prompt,
max_tokens=self.client_config["max_tokens"],
temperature=self.client_config["temperature"],
frequency_penalty=self.client_config["frequency_penalty"],
presence_penalty=self.client_config["presence_penalty"],
stop_sequences=self.client_config["stop_sequences"],
)
# If response was cut off by .generate() finding a stop sequence,
# remove that sequence from the response.
response = generated_object.generations[0].text
for stop_seq in self.client_config["stop_sequences"]:
if response.endswith(stop_seq):
response = response[: -len(stop_seq)]
response = response.lstrip()
# We need to remember the current response in the chat history for future
# responses.
self.chat_history.append(self.prompt.create_interaction(query, response))
self.prompt_history.append(current_prompt)
return response
def get_current_prompt(self, query) -> str:
"""Stitches the prompt with a trailing window of the chat.
Args:
query (str): The current user query.
Returns:
str: The current prompt given a query.
"""
# get base prompt
base_prompt = self.prompt.to_string() + "\n"
# get context prompt
context_prompt_lines = []
trimmed_chat_history = (
self.chat_history[-self.chatbot_config["max_context_examples"] :]
if self.chatbot_config["max_context_examples"] > 0
else []
)
# TODO when prompt is updated, the history is mutated
# as it is recreated using the new prompt. A possible fix is to save the old
# prompt in history and use it when recreating.
for turn in trimmed_chat_history:
context_prompt_lines.append(self.prompt.create_interaction_string(**turn))
context_prompt = self.prompt.example_separator + "".join(context_prompt_lines)
# get query prompt
query_prompt = self.prompt.create_interaction_string(query)
current_prompt = base_prompt + context_prompt + query_prompt
return current_prompt.strip()
def configure_chatbot(self, chatbot_config: Dict = {}) -> None:
"""Configures chatbot options.
Args:
chatbot_config (Dict, optional): Updates self.chatbot_config. Defaults
to {}.
"""
# We initialize the chatbot to these default config values.
if not hasattr(self, "chatbot_config"):
self.chatbot_config = {"max_context_examples": 10, "avatar": ":robot:"}
# Override default config values with the config passed in
if isinstance(chatbot_config, Dict):
self.chatbot_config.update(chatbot_config)
else:
raise TypeError(
"chatbot_config must be of type Dict, but was passed in as "
f"{type(chatbot_config)}"
)
def configure_client(self, client_config: Dict = {}) -> None:
"""Configures client options.
Args:
client_config (Dict, optional): Updates self.client_config. Defaults to {}.
"""
# We initialize the client to these default config values.
if not hasattr(self, "client_config"):
self.client_config = {
"model": "xlarge",
"max_tokens": 100,
"temperature": 0.75,
"frequency_penalty": 0.0,
"presence_penalty": 0.0,
"stop_sequences": ["\n"],
}
# Override default config values with the config passed in
if isinstance(client_config, Dict):
self.client_config.update(client_config)
else:
raise TypeError(
"client_config must be of type Dict, but was passed in as "
f"{type(client_config)}"
)
@classmethod
def from_persona(
cls,
persona_name: str,
client: cohere.Client,
persona_dir: str = PERSONA_MODEL_DIRECTORY,
):
"""Initializes a PromptChatbot using a persona.
Args:
persona (str): Name of persona, corresponding to a .json file.
client (cohere.Client): Cohere client for API
persona_dir (str): Path to where pre-defined personas are.
"""
# Load the persona from a local directory
persona_path = os.path.join(persona_dir, persona_name, "config.json")
if os.path.isfile(persona_path):
logging.info(f"loading persona from {persona_path}")
else:
raise FileNotFoundError(f"{persona_path} cannot be found.")
with open(persona_path) as f:
persona = json.load(f)
# Validate that the persona follows our predefined schema
cls._validate_persona_dict(persona, persona_path)
return cls(
client=client,
prompt=ChatPrompt.from_dict(persona["chat_prompt_config"]),
persona_name=persona_name,
chatbot_config=persona["chatbot_config"],
client_config=persona["client_config"],
)
def to_dict(self) -> Dict[str, Any]:
"""Serializes this instance into a Python dictionary.
Returns:
Dict[str, Any]: Dictionary of attributes that defines this instance of a
PromptChatbot.
"""
return {
"co": self.co,
"prompt": self.prompt.to_dict(),
"persona_name": self.persona_name,
"chatbot_config": self.chatbot_config,
"client_config": self.client_config,
"chat_history": self.chat_history,
"prompt_history": self.prompt_history,
"user_name": self.user_name,
"bot_name": self.bot_name,
"latest_prompt": self.latest_prompt,
}
@staticmethod
def _validate_persona_dict(persona: Dict[str, Any], persona_path: str) -> None:
"""Validates formatting of a persona defined as a dictionary.
Args:
persona (Dict[str, Any]): A dictionary containing the persona.
persona_path: The path from which the persona was loaded.
"""
try:
jsonschema.validate(instance=persona, schema=PERSONA_JSON_SCHEMA)
except jsonschema.exceptions.ValidationError as e:
raise jsonschema.exceptions.ValidationError(
f"Type of values in given dictionary (persona from {persona_path}) do "
f"not match schema': {e}"
)
except KeyError as e:
raise KeyError(
f"Invalid key in given dictionary (persona from {persona_path})': {e}"
)
except Exception as e:
raise Exception(
"Failed to validate persona in given dictionary (persona from "
f"{persona_path}): {e}"
)
| [
"\n",
"[]",
"self.prompt.to_string() + \"\\nself.prompt.example_separator + \"\".join(context_prompt_lines)PLACEHOLDER"
] |
2024-01-10 | cliffweng/sandbox-conversant-lib | conversant~demo~streamlit_example.py | # Copyright (c) 2022 Cohere Inc. and its affiliates.
#
# Licensed under the MIT License (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License in the LICENSE file at the top
# level of this repository.
import ast
import copy
import os
import sys
import cohere
import emoji
import streamlit as st
from conversant.demo import ui, utils
from conversant.prompt_chatbot import PromptChatbot
from conversant.utils import demo_utils
USER_AVATAR_SHORTCODE = ":bust_in_silhouette:"
def get_reply() -> None:
"""Replies query from the message input, and resets the message input"""
_ = st.session_state.bot.reply(query=st.session_state.message_input)
st.session_state.message_input = ""
def initialize_chatbot() -> None:
"""Initializes the chatbot from a selected persona and saves the session state."""
if st.session_state.persona.startswith("from launch_demo") and len(sys.argv) > 1:
st.session_state.bot = demo_utils.decode_chatbot(
sys.argv[1], client=cohere.Client(os.environ.get("COHERE_API_KEY"))
) # Launched via demo_utils.launch_streamlit() utility function
elif st.session_state.persona == "":
st.session_state.bot = None
elif st.session_state.persona == "parrot":
st.session_state.bot = utils.ParrotChatbot()
else:
st.session_state.bot = PromptChatbot.from_persona(
emoji.replace_emoji(st.session_state.persona, "").strip(),
client=cohere.Client(os.environ.get("COHERE_API_KEY")),
)
if "bot" in st.session_state and st.session_state.bot:
update_session_with_prompt()
# Reset the edit_promp_json session state so we don't remain on the JSON editor when
# changing to another bot. This is because st_ace is unable to write
# new values from the current session state.
st.session_state.edit_prompt_json = False
def update_session_with_prompt() -> None:
"""Saves the prompt config dictionary into the session state."""
if "bot" in st.session_state and st.session_state.bot:
st.session_state.snapshot_prompt_config = copy.deepcopy(
st.session_state.bot.prompt.to_dict()
)
st.session_state.snapshot_chatbot_config = copy.deepcopy(
st.session_state.bot.chatbot_config
)
st.session_state.snapshot_client_config = copy.deepcopy(
st.session_state.bot.client_config
)
def update_prompt_from_json() -> None:
"""Evaluates JSON string and updates the session's bot prompt."""
if st.session_state.json_editor_input:
try:
prompt_config = ast.literal_eval(st.session_state.json_editor_input)
st.session_state.bot.prompt.update(prompt_config)
update_session_with_prompt()
st.session_state.error = ""
except Exception as e:
st.session_state.error = e
# This ensures rendering is prevented upon import of this file.
if __name__ == "__main__":
st.set_page_config(
page_title="Conversational personas using Cohere",
page_icon="๐ญ",
layout="wide",
)
# Streamlit's default elements are not easy to style. Instead, we have to
# define styling in a custom CSS file and inject it into the Streamlit DOM.
# This is brittle and dependent on the DOM structure. Any changes to the layout
# will break the styling defined in this file.
with open(f"{os.path.dirname(__file__)}/styles.css") as f:
utils.style_using_css(f.read())
# We use the :bust_in_silhouette: emoji as a neutral user avatar.
st.session_state.user_avatar = utils.get_twemoji_url_from_shortcode(
USER_AVATAR_SHORTCODE
)
# Each persona is a directory in PERSONA_MODEL_DIRECTORY, each with its
# config.json file.
st.session_state.persona_options = utils.get_persona_options()
# Check if COHERE_API_KEY is not set from secrets.toml or os.environ
if "COHERE_API_KEY" not in os.environ:
raise KeyError(
"COHERE_API_KEY not found in st.secrets or os.environ. Please set it in "
".streamlit/secrets.toml or as an environment variable."
)
# A chatbot can be passed in as a base64 encoding of a pickled PromptChatbot object.
# This is only used when calling the launch_demo() method of a PromptChatbot object.
# The chatbot is then injected into the list of available personas in this streamlit
# demo.
if len(sys.argv) > 1 and "bot" not in st.session_state:
# The PromptChatbot passed in should be a base64 encoding of a pickled
# PromptChatbot object.
bot = demo_utils.decode_chatbot(
sys.argv[1], cohere.Client(os.environ.get("COHERE_API_KEY"))
)
if not isinstance(bot, PromptChatbot):
raise TypeError("base64 string passed in is not of class PromptChatbot")
else:
st.session_state.bot = bot
st.session_state.persona_options.insert(
0, f"from launch_demo: {st.session_state.bot.persona_name}"
)
# Page control flow logic is determined from the sidebar.
with st.sidebar:
st.selectbox(
"Choose a chatbot persona:",
options=st.session_state.persona_options,
key="persona",
on_change=initialize_chatbot,
)
st.checkbox(
"Edit prompt",
value=False,
key="edit_prompt",
on_change=update_session_with_prompt,
)
if st.session_state.edit_prompt:
st.checkbox(
"Use JSON editor",
value=False,
key="edit_prompt_json",
on_change=update_session_with_prompt,
)
# Initialize a settings container in the sidebar. This allows us to place
# Streamlit elements within this placeholder later in this script.
settings_placeholder = st.empty()
# Initialize a chat container as the middle of 3 vertical columns.
# Only visible when the edit prompt checkbox is not selected.
_, chat_placeholder, _ = st.columns([1, 1, 1])
with chat_placeholder.container():
chat_history_placeholder = st.empty()
message_input_placeholder = st.empty()
# Initialize a prompt json and string view as 2 vertical columns.
# Only visible when the edit prompt checkbox is selected.
prompt_json_column, prompt_string_column = st.columns([1, 1])
with prompt_json_column:
prompt_json_edit_placeholder = st.empty()
prompt_json_view_placeholder = st.empty()
with prompt_string_column:
prompt_string_placeholder = st.empty()
# Check if bot has been initialized in the Streamlit session.
if "bot" in st.session_state and st.session_state.bot:
# Initialize the bot avatar
bot_avatar_string = st.session_state.bot.chatbot_config["avatar"]
st.session_state.bot_avatar = (
utils.get_twemoji_url_from_shortcode(bot_avatar_string)
if emoji.is_emoji(emoji.emojize(bot_avatar_string, language="alias"))
else bot_avatar_string
)
# Editor view for the prompt
if st.session_state.edit_prompt:
# Edit the prompt using a JSON editor
if st.session_state.edit_prompt_json:
# The prompt JSON editor needs to be drawn first so that
# the displayed form values in the sidebar take reference from
# the editor.
with prompt_json_edit_placeholder.container():
ui.draw_prompt_json_editor(
max_height=955
) # st_ace only accepts hardcoded pixel values
update_prompt_from_json()
with settings_placeholder.container():
with st.expander("Client Config"):
ui.draw_client_config_form()
with st.expander("Chatbot Config"):
ui.draw_chatbot_config_form()
ui.draw_prompt_form(disabled=True)
with prompt_string_placeholder.container():
ui.draw_prompt_view(json=False)
# Edit the prompt using a form in the sidebar
else:
# The settings form needs to be drawn first so that
# the displayed JSON values in prompt JSON placeholder
# take reference from the form.
with settings_placeholder.container():
with st.expander("Client Config"):
ui.draw_client_config_form()
with st.expander("Chatbot Config"):
ui.draw_chatbot_config_form()
ui.draw_prompt_form(disabled=False)
with prompt_json_view_placeholder.container():
ui.draw_prompt_view(json=True)
with prompt_string_placeholder.container():
ui.draw_prompt_view(json=False)
# When in editor view, elements should anchored from the top.
utils.style_using_css(
"""
div.css-18e3th9.egzxvld2 {
display: flex;
align-items: flex-start;
overflow: visible;
}
"""
)
# Chat view with the persona
else:
# We can get the chatbot to begin the conversation with this.
# The session's state needs to be manually updated since we are not
# refreshing the entire Streamlit app.
if not st.session_state.bot.chat_history:
st.session_state.bot.reply(
query="Hello",
)
update_session_with_prompt()
# Draw UI elements for the sidebar
with settings_placeholder.container():
with st.expander("Client Config"):
ui.draw_client_config_form()
with st.expander("Chatbot Config"):
ui.draw_chatbot_config_form()
with st.expander("Prompt (JSON)"):
ui.draw_prompt_view(json=True)
with st.expander("Prompt (string)", expanded=True):
ui.draw_prompt_view(json=False)
# Draw chat history.
with chat_history_placeholder.container():
ui.draw_chat_history()
# Draw the message input field and a disclaimer.
with message_input_placeholder.container():
st.text_input(
label=f"Chat with {st.session_state.bot.prompt.bot_name}!",
placeholder="Type a message",
key="message_input",
on_change=get_reply,
)
ui.draw_disclaimer()
# When in chat view, anchor elements from the bottom so that
# the message input field is at the bottom (more natural).
utils.style_using_css(
"""
div.css-18e3th9.egzxvld2 {
display: flex;
align-items: flex-end;
overflow: visible;
}
"""
)
| [] |
2024-01-10 | cliffweng/sandbox-conversant-lib | tests~conftest.py | # Copyright (c) 2022 Cohere Inc. and its affiliates.
#
# Licensed under the MIT License (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License in the LICENSE file at the top
# level of this repository.
import json
import os
from typing import Any, Dict, Optional
import cohere
import pytest
from cohere.embeddings import Embeddings
from cohere.generation import Generations
from conversant.prompt_chatbot import PERSONA_MODEL_DIRECTORY, PromptChatbot
from conversant.prompts.chat_prompt import ChatPrompt
from conversant.prompts.prompt import Prompt
from conversant.prompts.rewrite_prompt import RewritePrompt
from conversant.search.document import Document
from conversant.search.local_searcher import LocalSearcher
from conversant.search.searcher import Searcher
class MockCo:
def generate(*args, **kwargs) -> Generations:
return Generations(
response={"generations": [dict(text="Hello!", likelihood=1.0)]},
return_likelihoods="NONE",
)
def embed(*args, **kwargs) -> Embeddings:
if "texts" in kwargs:
embeddings = [[1.0, 1.0]] * len(kwargs["texts"])
return Embeddings(embeddings=embeddings)
return Embeddings(embeddings=[[1.0, 1.0]])
@pytest.fixture
def mock_co() -> object:
"""Mock of Cohere client.
Returns:
object: A simple mock of Cohere's API client.
"""
return MockCo()
@pytest.fixture
def mock_prompt_config() -> Dict[str, Any]:
"""A Prompt config fixture for tests.
Returns:
Dict[str, Any]: Dictionary that can be used to construct to instantiate a
Prompt.
"""
return {
"preamble": "This is a prompt.",
"example_separator": "<example>\n",
"headers": {
"query": "<query>",
"context": "<context>",
"generation": "<generation>",
},
"examples": [
{
"query": "This is a query.",
"context": "This is a context.",
"generation": "This is a generation.",
},
{
"query": "This is a second query.",
"context": "This is a second context.",
"generation": "This is a second generation.",
},
],
"REQUIRED_KEYS": ["query", "context", "generation"],
}
@pytest.fixture
def mock_prompt(mock_prompt_config: Dict[str, Any]) -> Prompt:
"""Instantiates a Prompt fixture for tests.
Args:
mock_prompt_config (Dict[str, Any]): A config used to instantiate a Prompt
fixture.
Returns:
Prompt: A mock Prompt object fixture for tests.
"""
return Prompt(**mock_prompt_config)
@pytest.fixture
def mock_chat_prompt_config() -> Dict[str, Any]:
"""A ChatPrompt config fixture for tests.
Returns:
Dict[str, Any]: Dictionary that can be used to construct to instantiate a
ChatPrompt.
"""
return {
"preamble": "This is a chat prompt.",
"example_separator": "\n",
"headers": {"user": "User", "bot": "Mock Chatbot"},
"examples": [
[
{"user": "This is a user utterance", "bot": "This is a bot utterance"},
{
"user": "This is second user utterance",
"bot": "This is second bot utterance",
},
],
[
{
"user": "This is a user utterance in the second example.",
"bot": "This is a bot utterance in the second example.",
},
{
"user": "This is second user utterance in the second example.",
"bot": "This is second bot utterance in the second example.",
},
],
],
}
@pytest.fixture
def mock_chat_prompt(mock_chat_prompt_config: Dict[str, Any]) -> ChatPrompt:
"""A ChatPrompt config fixture for tests.
Args:
mock_chat_prompt_config (Dict[str, Any]): A config used to instantiate a
ChatPrompt fixture.
Returns:
ChatPrompt: A mock ChatPrompt object fixture for tests.
"""
return ChatPrompt(**mock_chat_prompt_config)
@pytest.fixture
def mock_rewrite_prompt_config() -> Dict[str, Any]:
"""A RewritePrompt config fixture for tests.
Returns:
Dict[str, Any]: Dictionary that can be used to construct to instantiate a
RewritePrompt.
"""
return {
"preamble": "This is a rewrite prompt.",
"example_separator": "\n",
"headers": {
"conversation": "<<CONVERSATION>>",
"fact": "<<FACTUAL_PARAGRAPH>>",
"rewrite": "<<REWRITE BASED ON THE ABOVE>>",
},
"examples": [
{
"conversation": "This is a wrong message.",
"fact": "This is a fact.",
"rewrite": "This is a message based on fact.",
},
{
"conversation": "This is a second wrong message.",
"fact": "This is a second fact.",
"rewrite": "This is a second message based on fact.",
},
],
}
@pytest.fixture
def mock_rewrite_prompt(mock_rewrite_prompt_config: Dict[str, Any]) -> RewritePrompt:
"""A RewritePrompt config fixture for tests.
Args:
mock_rewrite_prompt_config (Dict[str, Any]): A config used to instantiate a
RewritePrompt fixture.
Returns:
RewritePrompt: A mock RewritePrompt fixture for tests.
"""
return RewritePrompt(**mock_rewrite_prompt_config)
@pytest.fixture
def mock_prompt_chatbot(mock_co: object, mock_chat_prompt: ChatPrompt) -> PromptChatbot:
"""Instantiates a single bot fixture for tests.
Args:
mock_co (object): A mock Cohere client.
mock_chat_prompt (ChatPrompt): A mock ChatPrompt.
Returns:
PromptChatbot: A simple mock of a chatbot that works through prompts.
"""
return PromptChatbot(
client=mock_co,
prompt=mock_chat_prompt,
)
@pytest.fixture
def mock_persona() -> Dict[str, Any]:
"""Instantiates a persona dict fixture for tests.
Returns:
Dict[str, Any]: A mock dictionary used to initialize a PromptChatbot.
"""
persona_name = "watch-sales-agent"
persona_path = os.path.join(PERSONA_MODEL_DIRECTORY, persona_name, "config.json")
with open(persona_path) as f:
persona = json.load(f)
return persona
@pytest.fixture()
def mock_searcher(mock_co: cohere.Client) -> Searcher:
"""Mock fixture subclass to test abstract class methods.
Args:
mock_co (cohere.Client): Cohere API client.
Returns:
Searcher: Mock Searcher object.
"""
class MockSearcher(Searcher):
def search(self, query: str) -> Optional[Document]:
return super().search(query)
return MockSearcher(
client=mock_co,
documents=[
Document(
source_link="http://url",
doc_id="123",
content="test content",
)
],
)
@pytest.fixture()
def mock_local_searcher(mock_co: cohere.Client) -> LocalSearcher:
"""Mock fixture subclass to test class methods.
Args:
mock_co (cohere.Client): Cohere API client.
Returns:
LocalSearcher: Mock Searcher object.
"""
return LocalSearcher(
client=mock_co,
documents=[
Document(
source_link="http://url",
doc_id="123",
content="test content",
embedding=[1.0, -1.0],
),
Document(
source_link="http://url",
doc_id="123",
content="test content",
embedding=[1.0, 1.0],
),
],
)
| [] |
2024-01-10 | Ryan526/gpt-engineer | gpt_engineer~steps.py | import inspect
import re
import subprocess
from enum import Enum
from typing import List, Union
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from termcolor import colored
from gpt_engineer.ai import AI
from gpt_engineer.chat_to_files import (
format_file_to_input,
get_code_strings,
overwrite_files,
to_files,
)
from gpt_engineer.db import DBs
from gpt_engineer.file_selector import FILE_LIST_NAME, ask_for_files
from gpt_engineer.learning import human_review_input
Message = Union[AIMessage, HumanMessage, SystemMessage]
def setup_sys_prompt(dbs: DBs) -> str:
"""
Primes the AI with instructions as to how it should
generate code and the philosophy to follow
"""
return (
dbs.preprompts["roadmap"]
+ dbs.preprompts["generate"].replace("FILE_FORMAT", dbs.preprompts["file_format"])
+ "\nUseful to know:\n"
+ dbs.preprompts["philosophy"]
)
def setup_sys_prompt_existing_code(dbs: DBs) -> str:
"""
Similar to code generation, but using an existing code base.
"""
return (
dbs.preprompts["improve"].replace("FILE_FORMAT", dbs.preprompts["file_format"])
+ "\nUseful to know:\n"
+ dbs.preprompts["philosophy"]
)
def curr_fn() -> str:
"""
Get the name of the current function
NOTE: This will be the name of the function that called this function,
so it serves to ensure we don't hardcode the function name in the step,
but allow the step names to be refactored
"""
return inspect.stack()[1].function
# All steps below have the Step signature
def simple_gen(ai: AI, dbs: DBs) -> List[Message]:
"""Run the AI on the main prompt and save the results"""
messages = ai.start(setup_sys_prompt(dbs), dbs.input["prompt"], step_name=curr_fn())
to_files(messages[-1].content.strip(), dbs.workspace)
return messages
def clarify(ai: AI, dbs: DBs) -> List[Message]:
"""
Ask the user if they want to clarify anything and save the results to the workspace
"""
messages: List[Message] = [ai.fsystem(dbs.preprompts["clarify"])]
user_input = dbs.input["prompt"]
while True:
messages = ai.next(messages, user_input, step_name=curr_fn())
msg = messages[-1].content.strip()
if msg == "Nothing more to clarify.":
break
if msg.lower().startswith("no"):
print("Nothing more to clarify.")
break
print()
user_input = input('(answer in text, or "c" to move on)\n')
print()
if not user_input or user_input == "c":
print("(letting gpt-engineer make its own assumptions)")
print()
messages = ai.next(
messages,
"Make your own assumptions and state them explicitly before starting",
step_name=curr_fn(),
)
print()
return messages
user_input += (
"\n\n"
"Is anything else unclear? If yes, only answer in the form:\n"
"{remaining unclear areas} remaining questions.\n"
"{Next question}\n"
'If everything is sufficiently clear, only answer "Nothing more to clarify.".'
)
print()
return messages
def gen_spec(ai: AI, dbs: DBs) -> List[Message]:
"""
Generate a spec from the main prompt + clarifications and save the results to
the workspace
"""
messages = [
ai.fsystem(setup_sys_prompt(dbs)),
ai.fsystem(f"Instructions: {dbs.input['prompt']}"),
]
messages = ai.next(messages, dbs.preprompts["spec"], step_name=curr_fn())
dbs.memory["specification"] = messages[-1].content.strip()
return messages
def respec(ai: AI, dbs: DBs) -> List[Message]:
"""Asks the LLM to review the specs so far and reiterate them if necessary"""
messages = AI.deserialize_messages(dbs.logs[gen_spec.__name__])
messages += [ai.fsystem(dbs.preprompts["respec"])]
messages = ai.next(messages, step_name=curr_fn())
messages = ai.next(
messages,
(
"Based on the conversation so far, please reiterate the specification for "
"the program. "
"If there are things that can be improved, please incorporate the "
"improvements. "
"If you are satisfied with the specification, just write out the "
"specification word by word again."
),
step_name=curr_fn(),
)
dbs.memory["specification"] = messages[-1].content.strip()
return messages
def gen_unit_tests(ai: AI, dbs: DBs) -> List[dict]:
"""
Generate unit tests based on the specification, that should work.
"""
messages = [
ai.fsystem(setup_sys_prompt(dbs)),
ai.fuser(f"Instructions: {dbs.input['prompt']}"),
ai.fuser(f"Specification:\n\n{dbs.memory['specification']}"),
]
messages = ai.next(messages, dbs.preprompts["unit_tests"], step_name=curr_fn())
dbs.memory["unit_tests"] = messages[-1].content.strip()
to_files(dbs.memory["unit_tests"], dbs.workspace)
return messages
def gen_clarified_code(ai: AI, dbs: DBs) -> List[dict]:
"""Takes clarification and generates code"""
messages = AI.deserialize_messages(dbs.logs[clarify.__name__])
messages = [
ai.fsystem(setup_sys_prompt(dbs)),
] + messages[
1:
] # skip the first clarify message, which was the original clarify priming prompt
messages = ai.next(
messages,
dbs.preprompts["generate"].replace("FILE_FORMAT", dbs.preprompts["file_format"]),
step_name=curr_fn(),
)
to_files(messages[-1].content.strip(), dbs.workspace)
return messages
def gen_code_after_unit_tests(ai: AI, dbs: DBs) -> List[dict]:
"""Generates project code after unit tests have been produced"""
messages = [
ai.fsystem(setup_sys_prompt(dbs)),
ai.fuser(f"Instructions: {dbs.input['prompt']}"),
ai.fuser(f"Specification:\n\n{dbs.memory['specification']}"),
ai.fuser(f"Unit tests:\n\n{dbs.memory['unit_tests']}"),
]
messages = ai.next(
messages,
dbs.preprompts["generate"].replace("FILE_FORMAT", dbs.preprompts["file_format"]),
step_name=curr_fn(),
)
to_files(messages[-1].content.strip(), dbs.workspace)
return messages
def execute_entrypoint(ai: AI, dbs: DBs) -> List[dict]:
command = dbs.workspace["run.sh"]
print()
print(
colored(
"Do you want to execute this code? (y/n)",
"red",
)
)
print()
print(command)
print()
print("To execute, you can also press enter.")
print()
if input() not in ["", "y", "yes"]:
print("Ok, not executing the code.")
return []
print("Executing the code...")
print()
print(
colored(
"Note: If it does not work as expected, consider running the code"
+ " in another way than above.",
"green",
)
)
print()
print("You can press ctrl+c *once* to stop the execution.")
print()
p = subprocess.Popen("bash run.sh", shell=True, cwd=dbs.workspace.path)
try:
p.wait()
except KeyboardInterrupt:
print()
print("Stopping execution.")
print("Execution stopped.")
p.kill()
print()
return []
def gen_entrypoint(ai: AI, dbs: DBs) -> List[dict]:
messages = ai.start(
system=(
"You will get information about a codebase that is currently on disk in "
"the current folder.\n"
"From this you will answer with code blocks that includes all the necessary "
"unix terminal commands to "
"a) install dependencies "
"b) run all necessary parts of the codebase (in parallel if necessary).\n"
"Do not install globally. Do not use sudo.\n"
"Do not explain the code, just give the commands.\n"
"Do not use placeholders, use example values (like . for a folder argument) "
"if necessary.\n"
),
user="Information about the codebase:\n\n" + dbs.workspace["all_output.txt"],
step_name=curr_fn(),
)
print()
regex = r"```\S*\n(.+?)```"
matches = re.finditer(regex, messages[-1].content.strip(), re.DOTALL)
dbs.workspace["run.sh"] = "\n".join(match.group(1) for match in matches)
return messages
def use_feedback(ai: AI, dbs: DBs):
messages = [
ai.fsystem(setup_sys_prompt(dbs)),
ai.fuser(f"Instructions: {dbs.input['prompt']}"),
ai.fassistant(
dbs.workspace["all_output.txt"]
), # reload previously generated code
]
if dbs.input["feedback"]:
messages = ai.next(messages, dbs.input["feedback"], step_name=curr_fn())
to_files(messages[-1].content.strip(), dbs.workspace)
return messages
else:
print(
"No feedback was found in the input folder. Please create a file "
+ "called 'feedback' in the same folder as the prompt file."
)
exit(1)
def set_improve_filelist(ai: AI, dbs: DBs):
"""Sets the file list for files to work with in existing code mode."""
ask_for_files(dbs.input) # stores files as full paths.
return []
def assert_files_ready(ai: AI, dbs: DBs):
"""Checks that the required files are present for headless
improve code execution."""
assert (
"file_list.txt" in dbs.input
), "For auto_mode file_list.txt need to be in your project folder."
assert "prompt" in dbs.input, "For auto_mode a prompt file must exist."
return []
def get_improve_prompt(ai: AI, dbs: DBs):
"""
Asks the user what they would like to fix.
"""
if not dbs.input.get("prompt"):
dbs.input["prompt"] = input(
"\nWhat do you need to improve with the selected files?\n"
)
confirm_str = "\n".join(
[
"-----------------------------",
"The following files will be used in the improvement process:",
f"{FILE_LIST_NAME}:",
str(dbs.input["file_list.txt"]),
"",
"The inserted prompt is the following:",
f"'{dbs.input['prompt']}'",
"-----------------------------",
"",
"You can change these files in your project before proceeding.",
"",
"Press enter to proceed with modifications.",
"",
]
)
input(confirm_str)
return []
def improve_existing_code(ai: AI, dbs: DBs):
"""
After the file list and prompt have been aquired, this function is called
to sent the formatted prompt to the LLM.
"""
files_info = get_code_strings(dbs.input) # this only has file names not paths
messages = [
ai.fsystem(setup_sys_prompt_existing_code(dbs)),
]
# Add files as input
for file_name, file_str in files_info.items():
code_input = format_file_to_input(file_name, file_str)
messages.append(ai.fuser(f"{code_input}"))
messages.append(ai.fuser(f"Request: {dbs.input['prompt']}"))
messages = ai.next(messages, step_name=curr_fn())
overwrite_files(messages[-1].content.strip(), dbs)
return messages
def fix_code(ai: AI, dbs: DBs):
messages = AI.deserialize_messages(dbs.logs[gen_code_after_unit_tests.__name__])
code_output = messages[-1].content.strip()
messages = [
ai.fsystem(setup_sys_prompt(dbs)),
ai.fuser(f"Instructions: {dbs.input['prompt']}"),
ai.fuser(code_output),
ai.fsystem(dbs.preprompts["fix_code"]),
]
messages = ai.next(
messages, "Please fix any errors in the code above.", step_name=curr_fn()
)
to_files(messages[-1].content.strip(), dbs.workspace)
return messages
def human_review(ai: AI, dbs: DBs):
"""Collects and stores human review of the code"""
review = human_review_input()
if review is not None:
dbs.memory["review"] = review.to_json() # type: ignore
return []
class Config(str, Enum):
DEFAULT = "default"
BENCHMARK = "benchmark"
SIMPLE = "simple"
TDD = "tdd"
TDD_PLUS = "tdd+"
CLARIFY = "clarify"
RESPEC = "respec"
EXECUTE_ONLY = "execute_only"
EVALUATE = "evaluate"
USE_FEEDBACK = "use_feedback"
IMPROVE_CODE = "improve_code"
EVAL_IMPROVE_CODE = "eval_improve_code"
EVAL_NEW_CODE = "eval_new_code"
# Define the steps to run for different configs
STEPS = {
Config.DEFAULT: [
clarify,
gen_clarified_code,
gen_entrypoint,
execute_entrypoint,
human_review,
],
Config.BENCHMARK: [
simple_gen,
gen_entrypoint,
],
Config.SIMPLE: [
simple_gen,
gen_entrypoint,
execute_entrypoint,
],
Config.TDD: [
gen_spec,
gen_unit_tests,
gen_code_after_unit_tests,
gen_entrypoint,
execute_entrypoint,
human_review,
],
Config.TDD_PLUS: [
gen_spec,
gen_unit_tests,
gen_code_after_unit_tests,
fix_code,
gen_entrypoint,
execute_entrypoint,
human_review,
],
Config.CLARIFY: [
clarify,
gen_clarified_code,
gen_entrypoint,
execute_entrypoint,
human_review,
],
Config.RESPEC: [
gen_spec,
respec,
gen_unit_tests,
gen_code_after_unit_tests,
fix_code,
gen_entrypoint,
execute_entrypoint,
human_review,
],
Config.USE_FEEDBACK: [use_feedback, gen_entrypoint, execute_entrypoint, human_review],
Config.EXECUTE_ONLY: [execute_entrypoint],
Config.EVALUATE: [execute_entrypoint, human_review],
Config.IMPROVE_CODE: [
set_improve_filelist,
get_improve_prompt,
improve_existing_code,
],
Config.EVAL_IMPROVE_CODE: [assert_files_ready, improve_existing_code],
Config.EVAL_NEW_CODE: [simple_gen],
}
# Future steps that can be added:
# run_tests_and_fix_files
# execute_entrypoint_and_fix_files_if_it_results_in_error
| [] |
2024-01-10 | tchoumi313/secu_log_pharmacie_management | pharmacy~DoctorViews.py | from django.shortcuts import render,redirect
from django.contrib.auth import authenticate,login,logout
from django.contrib import messages
from django.contrib.auth.forms import UserCreationForm
from .decorators import *
from django.db.models import ExpressionWrapper, Q, BooleanField
from django.utils.timezone import now
from .forms import *
from .models import *
from django.http import JsonResponse
from openai import OpenAI
from django.conf import settings
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
from fpdf import FPDF
# Assuming Django settings are properly configured with OPENAI_API_KEY
medicaments_disponibles = [
"Paracรฉtamol", "Ibuprofรจne", "Amoxicilline", "Metformine",
"Atorvastatine", "Lisinopril", "Amlodipine", "Omeprazole"
# Add other medications as needed
]
def create_prompt(nom, prenom, age, sexe, poids, temperature, antecedents_medicaux, symptomes, medicaments_disponibles, maladies_symptomes):
liste_medicaments = ', '.join(medicaments_disponibles)
# Format the list of diseases and their symptoms
formatted_maladies_symptomes = "\n".join([f"-- ('{maladie}', '{symptomes}')" for maladie, symptomes in maladies_symptomes])
prompt = f"""
Crรฉez une ordonnance mรฉdicale dรฉtaillรฉe et structurรฉe pour un patient avec les informations suivantes :
- Nom : {nom}
- Prรฉnom : {prenom}
- รge : {age}
- Sexe : {sexe}
- Poids : {poids} kg
- Tempรฉrature : {temperature} ยฐC
- Symptรดmes : {symptomes}
- Antรฉcรฉdents mรฉdicaux : {antecedents_medicaux}
- Liste des mรฉdicaments disponibles en pharmacie : {liste_medicaments}
Liste des maladies et de leurs symptรดmes possibles dans notre application :
{formatted_maladies_symptomes}
L'ordonnance doit inclure :
- la potentiel maladie dont le patient souffre(ceci parmis la liste des maladies fournies)
- Les mรฉdicaments appropriรฉs parmi la liste fournie en tenant compte des symptรดmes, des antรฉcรฉdents (contenant des allergies) et des informations du patient
- La posologie et la durรฉe du traitement
- Les recommandations spรฉcifiques pour le patient
NB:
Aucune reponse insensรฉ, si besoin prรฉcise que tu n'as pas de proposition a faire.
je ne veux aucune reponse hor contexte
Toujours repondre en franรงais
Ne pas include la liste claire des information venant de la base de donnees.
Si tu ne sais pas qoui dire, il faut juste repondre par Aucune decision ร proposer.
Veuillez รฉgalement inclure une mention ร la fin de l'ordonnance indiquant que celle-ci doit รชtre revue et approuvรฉe par un mรฉdecin avant utilisation.
Veuillez รฉgalement ne pas prescrire ce qu'il n'a pas dans la liste des medicaments.
"""
return prompt
#@csrf_exempt # Only for demonstration purposes. CSRF protection should be enabled in a real project.
#@require_POST
"""
def get_maladies_symptomes():
maladies_symptomes = []
maladies = Maladie.objects.all()
for maladie in maladies:
symptomes = Symptome.objects.filter(maladies_associees__pk=maladie.pk)
symptomes_list = ', '.join([symptome.nom_symptome for symptome in symptomes])
maladies_symptomes.append((maladie.nom_maladie, symptomes_list))
return maladies_symptomes
"""
def get_maladies_symptomes():
maladies_symptomes = []
maladies = Maladie.objects.all()
for maladie in maladies:
symptomes = Symptome.objects.filter(correspondance__pk=maladie.pk)
symptomes_list = ', '.join([symptome.nom_symptome for symptome in symptomes])
maladies_symptomes.append((maladie.nom_maladie, symptomes_list))
print("maladies:")
liste =[]
for cor in Correspondance.objects.all():
print(cor.symptome.nom_symptome)
print(cor.maladie.nom_maladie)
liste.append(f'{cor.symptome.nom_symptome}:{cor.maladie.nom_maladie}')
print(maladies_symptomes)
return maladies_symptomes
def generate_openai_response(request, patient, prescription):
print("Entering generate_openai_response function")
# Get patient information from the patient and prescription parameters
nom = patient.first_name
prenom = patient.last_name
age = patient.age
sexe = patient.gender
poids = prescription.poids
temperature = prescription.temperature
antecedents_medicaux = prescription.antecedents_medicaux
symptomes = prescription.symptoms
print("Before create_pdf_and_get_openai_response")
# Fetch the list of diseases and symptoms from the database
maladies_symptomes = get_maladies_symptomes()
# Create PDF and get OpenAI response
pdf_filename, openai_response = create_pdf_and_get_openai_response(
nom, prenom, age, sexe, poids, temperature, antecedents_medicaux, symptomes, maladies_symptomes
)
print("After create_pdf_and_get_openai_response")
# You can do further processing or send the response to the frontend as needed
return {'pdf_filename': pdf_filename, 'openai_response': openai_response}
def create_pdf_and_get_openai_response(nom, prenom, age, sexe, poids, temperature, antecedents_medicaux, symptomes,maladies_symptomes):
pdf = FPDF()
pdf.add_page()
pdf.set_font("Arial", size=12)
# En-tรชte
pdf.set_font("Arial", 'B', 16)
pdf.cell(200, 10, "e-Ordonnance Mรฉdicale", ln=True, align='C')
# Corps du document
stocks = Stock.objects.all().order_by("-id")
# Filter expired stocks
expired_stocks = stocks.annotate(
expired=ExpressionWrapper(Q(valid_to__lt=now()), output_field=BooleanField())
).filter(expired=True)
# Filter non-expired stocks
non_expired_stocks = stocks.annotate(
expired=ExpressionWrapper(Q(valid_to__lt=now()), output_field=BooleanField())
).filter(expired=False)
# Extract medication names from both expired and non-expired stocks
medicament_disponibles_expired = [stock.drug_name for stock in expired_stocks]
medicament_disponibles_non_expired = [stock.drug_name for stock in non_expired_stocks]
ordonnance_info = create_prompt(nom, prenom, age, sexe, poids, temperature, antecedents_medicaux, symptomes,
medicament_disponibles_non_expired, maladies_symptomes)
pdf.set_font("Arial", size=12)
pdf.multi_cell(0, 10, ordonnance_info)
# Note de bas de page
pdf.set_font("Arial", 'I', 10)
pdf.cell(0, 10, "Note: Cette ordonnance doit รชtre revue et approuvรฉe par un mรฉdecin.",
ln=True, align='C')
filename = f"{nom}_{prenom}_ordonnance.pdf"
pdf.output(filename)
# Get OpenAI response using the generated prompt
client = OpenAI(api_key=settings.OPENAI_API_KEY)
response = client.completions.create(
model="text-davinci-002",
prompt=ordonnance_info,
max_tokens=700
)
print("")
print(response.choices[0].text)
print("")
return filename, response.choices[0].text
def doctorHome(request):
prescip = Consultation.objects.all().count()
context={
"Prescription_total":prescip
}
return render(request,'doctor_templates/doctor_home.html',context)
def doctorProfile(request):
customuser=CustomUser.objects.get(id=request.user.id)
staff=Doctor.objects.get(admin=customuser.id)
form=DoctorForm()
if request.method == 'POST':
first_name=request.POST.get('first_name')
last_name=request.POST.get('last_name')
customuser=CustomUser.objects.get(id=request.user.id)
customuser.first_name=first_name
customuser.last_name=last_name
customuser.save()
staff=Doctor.objects.get(admin=customuser.id)
form =DoctorForm(request.POST,request.FILES,instance=staff)
staff.save()
if form.is_valid():
form.save()
context={
"form":form,
"staff":staff,
"user":customuser
}
return render(request,'doctor_templates/doctor_profile.html',context)
def managePatients(request):
patients=Patients.objects.all()
context={
"patients":patients,
}
return render(request,'doctor_templates/manage_patients.html',context)
def addConsultation(request, pk):
patient = Patients.objects.get(id=pk)
form = ConsultationForm(initial={'patient_id': patient})
if request.method == 'POST':
try:
form = ConsultationForm(request.POST)
if form.is_valid():
prescription = form.save()
print(prescription)
messages.success(request, 'Consultation added successfully')
print("here")
# Call the OpenAI function and pass the necessary parameters
openai_response = generate_openai_response(request, patient, prescription)
print("after")
# Redirect to the prescription result page with the prescription id
return render(request, 'doctor_templates/prescription_result.html', {'openai_response': openai_response})
except Exception as e:
messages.error(request, 'Consultation Not Added')
print(f'Exception: {str(e)}')
return redirect('manage_patient_doctor')
context = {
"form": form
}
return render(request, 'doctor_templates/prescribe_form.html', context)
def patient_personalDetails(request,pk):
patient=Patients.objects.get(id=pk)
prescrip=patient.prescription_set.all()
context={
"patient":patient,
"prescription":prescrip
}
return render(request,'doctor_templates/patient_personalRecords.html',context)
def deletePrescription(request,pk):
prescribe=Consultation.objects.get(id=pk)
if request.method == 'POST':
try:
prescribe.delete()
messages.success(request,'Consultation Deleted successfully')
return redirect('manage_precrip_doctor')
except Exception as e:
messages.error(request,'Consultation Not Deleted successfully')
print(f'Exception: {str(e)}')
return redirect('manage_precrip_doctor')
context={
"patient":prescribe
}
return render(request,'doctor_templates/sure_delete.html',context)
def managePrescription(request):
precrip=Consultation.objects.all()
patient = Patients.objects.all()
context={
"prescrips":precrip,
"patient":patient
}
return render(request,'doctor_templates/manage_prescription.html' ,context)
def editPrescription(request,pk):
prescribe=Consultation.objects.get(id=pk)
form=ConsultationForm(instance=prescribe)
if request.method == 'POST':
form=ConsultationForm(request.POST, instance=prescribe)
try:
if form.is_valid():
form.save()
messages.success(request,'Consultation Updated successfully')
return redirect('manage_precrip_doctor')
except:
messages.error(request,' Error!! Consultation Not Updated')
return redirect('manage_precrip_doctor')
context={
"patient":prescribe,
"form":form
}
return render(request,'doctor_templates/edit_prescription.html',context)
| [] |
2024-01-10 | onerachel/revolve2 | jlo~lea_cppn_array_openaies~ESoptimizer.py | import math
from random import Random
from typing import List, Tuple
import numpy as np
import numpy.typing as npt
from pyrr import Quaternion, Vector3
from revolve2.actor_controllers.cpg import CpgNetworkStructure
from sqlalchemy.ext.asyncio import AsyncEngine
from sqlalchemy.ext.asyncio.session import AsyncSession
from revolve2.actor_controller import ActorController
from revolve2.core.modular_robot import Body, ModularRobot
from revolve2.core.modular_robot.brains import (
BrainCpgNetworkStatic,
make_cpg_network_structure_neighbour,
)
from revolve2.core.optimization import ProcessIdGen
from revolve2.core.optimization.ea.openai_es import OpenaiESOptimizer
from revolve2.core.physics.actor import Actor
from revolve2.core.physics.running import (
ActorControl,
ActorState,
Batch,
Environment,
PosedActor,
Runner,
)
from revolve2.runners.isaacgym import LocalRunner
class ESOptimizer(OpenaiESOptimizer):
_body: Body
_actor: Actor
_dof_ids: List[int]
_cpg_network_structure: CpgNetworkStructure
_runner: Runner
_controllers: List[ActorController]
_simulation_time: int
_sampling_frequency: float
_control_frequency: float
_num_generations: int
async def ainit_new(
# type: ignore # TODO for now ignoring mypy complaint about LSP problem, override parent's ainit
self,
database: AsyncEngine,
session: AsyncSession,
process_id: int,
process_id_gen: ProcessIdGen,
rng: Random,
population_size: int,
sigma: float,
learning_rate: float,
robot_body: Body,
initial_mean: npt.NDArray[np.float_],
simulation_time: int,
sampling_frequency: float,
control_frequency: float,
num_generations: int,
cpg_structure: CpgNetworkStructure,
) -> None:
self._actor, self._dof_ids = robot_body.to_actor()
self._cpg_network_structure = cpg_structure
self._body = robot_body # ???
# nprng = np.random.Generator(
# np.random.PCG64(rng.randint(0, 2**63))
# ) # rng is currently not numpy, but this would be very convenient. do this until that is resolved.
# initial_mean = nprng.standard_normal(self._cpg_network_structure.num_params)
await super().ainit_new(
database=database,
session=session,
process_id=process_id,
process_id_gen=process_id_gen,
rng=rng,
population_size=population_size,
sigma=sigma,
learning_rate=learning_rate,
initial_mean=initial_mean,
)
self._init_runner()
self._simulation_time = simulation_time
self._sampling_frequency = sampling_frequency
self._control_frequency = control_frequency
self._num_generations = num_generations
async def ainit_from_database( # type: ignore # see comment at ainit_new
self,
database: AsyncEngine,
session: AsyncSession,
process_id: int,
process_id_gen: ProcessIdGen,
rng: Random,
robot_body: Body,
simulation_time: int,
sampling_frequency: float,
control_frequency: float,
num_generations: int,
cpg_structure: CpgNetworkStructure,
) -> bool:
if not await super().ainit_from_database(
database=database,
session=session,
process_id=process_id,
process_id_gen=process_id_gen,
rng=rng,
):
return False
self._body = robot_body # ???
self._actor, self._dof_ids = robot_body.to_actor()
self._cpg_network_structure = cpg_structure
self._init_runner()
self._simulation_time = simulation_time
self._sampling_frequency = sampling_frequency
self._control_frequency = control_frequency
self._num_generations = num_generations
return True
def _init_runner(self) -> None:
self._runner = LocalRunner(LocalRunner.SimParams(), headless=True)
async def _evaluate_population(
self,
database: AsyncEngine,
process_id: int,
process_id_gen: ProcessIdGen,
population: npt.NDArray[np.float_],
) -> npt.NDArray[np.float_]:
batch = Batch(
simulation_time=self._simulation_time,
sampling_frequency=self._sampling_frequency,
control_frequency=self._control_frequency,
control=self._control,
)
self._controllers = []
for params in population:
initial_state = self._cpg_network_structure.make_uniform_state(
0.5 * math.pi / 2.0
)
weight_matrix = self._cpg_network_structure.make_weight_matrix_from_params(params)
dof_ranges = self._cpg_network_structure.make_uniform_dof_ranges(1.0)
brain = BrainCpgNetworkStatic(
initial_state,
self._cpg_network_structure.num_cpgs,
weight_matrix,
dof_ranges,
)
controller = brain.make_controller(self._body, self._dof_ids)
bounding_box = self._actor.calc_aabb()
self._controllers.append(controller)
env = Environment()
env.actors.append(
PosedActor(
self._actor,
Vector3(
[
0.0,
0.0,
bounding_box.size.z / 2.0 - bounding_box.offset.z,
]
),
Quaternion(),
[0.0 for _ in controller.get_dof_targets()],
)
)
batch.environments.append(env)
states = await self._runner.run_batch(batch)
return np.array(
[
self._calculate_fitness(
states[0].envs[i].actor_states[0],
states[-1].envs[i].actor_states[0],
)
for i in range(len(population))
]
)
def _control(self, dt: float, control: ActorControl) -> None:
for control_i, controller in enumerate(self._controllers):
controller.step(dt)
control.set_dof_targets(control_i, 0, controller.get_dof_targets())
@staticmethod
def _calculate_fitness(begin_state: ActorState, end_state: ActorState) -> float:
# TODO simulation can continue slightly passed the defined sim time.
# distance traveled on the xy plane
return math.sqrt(
(begin_state.position[0] - end_state.position[0]) ** 2
+ ((begin_state.position[1] - end_state.position[1]) ** 2)
)
def _must_do_next_gen(self) -> bool:
return self.generation_number != self._num_generations
| [] |
2024-01-10 | onerachel/revolve2 | jlo~ea_cppn_cppn_revde~optimizer.py | import math
from random import Random
from typing import List, Tuple
import numpy as np
import numpy.typing as npt
from pyrr import Quaternion, Vector3
from revolve2.actor_controllers.cpg import CpgNetworkStructure
from sqlalchemy.ext.asyncio import AsyncEngine
from sqlalchemy.ext.asyncio.session import AsyncSession
from revolve2.actor_controller import ActorController
from revolve2.core.modular_robot import Body, ModularRobot
from revolve2.core.modular_robot.brains import (
BrainCpgNetworkStatic,
make_cpg_network_structure_neighbour,
)
from revolve2.core.optimization import ProcessIdGen
from revolve2.core.optimization.ea.openai_es import OpenaiESOptimizer
from revolve2.core.optimization.ea.de import DEOptimizer
from revolve2.core.physics.actor import Actor
from revolve2.core.physics.running import (
ActorControl,
ActorState,
Batch,
Environment,
PosedActor,
Runner,
)
from revolve2.runners.isaacgym import LocalRunner
class Optimizer(DEOptimizer):
_body: Body
_actor: Actor
_dof_ids: List[int]
_cpg_network_structure: CpgNetworkStructure
_runner: Runner
_controllers: List[ActorController]
_simulation_time: int
_sampling_frequency: float
_control_frequency: float
_num_generations: int
async def ainit_new( # type: ignore # TODO for now ignoring mypy complaint about LSP problem, override parent's ainit
self,
database: AsyncEngine,
session: AsyncSession,
process_id: int,
process_id_gen: ProcessIdGen,
rng: Random,
population_size: int,
sigma: float,
learning_rate: float,
robot_body: Body,
simulation_time: int,
sampling_frequency: float,
control_frequency: float,
num_generations: int,
) -> None:
self._body = robot_body
self._init_actor_and_cpg_network_structure()
nprng = np.random.Generator(
np.random.PCG64(rng.randint(0, 2**63))
) # rng is currently not numpy, but this would be very convenient. do this until that is resolved.
initial_mean = nprng.standard_normal(self._cpg_network_structure.num_params)
await super().ainit_new(
database=database,
session=session,
process_id=process_id,
process_id_gen=process_id_gen,
rng=rng,
population_size=population_size,
sigma=sigma,
learning_rate=learning_rate,
initial_mean=initial_mean,
)
self._init_runner()
self._simulation_time = simulation_time
self._sampling_frequency = sampling_frequency
self._control_frequency = control_frequency
self._num_generations = num_generations
async def ainit_from_database( # type: ignore # see comment at ainit_new
self,
database: AsyncEngine,
session: AsyncSession,
process_id: int,
process_id_gen: ProcessIdGen,
rng: Random,
robot_body: Body,
simulation_time: int,
sampling_frequency: float,
control_frequency: float,
num_generations: int,
) -> bool:
if not await super().ainit_from_database(
database=database,
session=session,
process_id=process_id,
process_id_gen=process_id_gen,
rng=rng,
):
return False
self._body = robot_body
self._init_actor_and_cpg_network_structure()
self._init_runner()
self._simulation_time = simulation_time
self._sampling_frequency = sampling_frequency
self._control_frequency = control_frequency
self._num_generations = num_generations
return True
def _init_actor_and_cpg_network_structure(self) -> None:
self._actor, self._dof_ids = self._body.to_actor()
active_hinges_unsorted = self._body.find_active_hinges()
active_hinge_map = {
active_hinge.id: active_hinge for active_hinge in active_hinges_unsorted
}
active_hinges = [active_hinge_map[id] for id in self._dof_ids]
self._cpg_network_structure = make_cpg_network_structure_neighbour(
active_hinges
)
def _init_runner(self) -> None:
self._runner = LocalRunner(LocalRunner.SimParams(), headless=True)
async def _evaluate_population(
self,
database: AsyncEngine,
process_id: int,
process_id_gen: ProcessIdGen,
population: npt.NDArray[np.float_],
) -> npt.NDArray[np.float_]:
batch = Batch(
simulation_time=self._simulation_time,
sampling_frequency=self._sampling_frequency,
control_frequency=self._control_frequency,
control=self._control,
)
self._controllers = []
for params in population:
initial_state = self._cpg_network_structure.make_uniform_state(
0.5 * math.pi / 2.0
)
weight_matrix = self._cpg_network_structure.make_weight_matrix_from_params(
params
)
dof_ranges = self._cpg_network_structure.make_uniform_dof_ranges(1.0)
brain = BrainCpgNetworkStatic(
initial_state,
self._cpg_network_structure.num_cpgs,
weight_matrix,
dof_ranges,
)
controller = brain.make_controller(self._body, self._dof_ids)
bounding_box = self._actor.calc_aabb()
self._controllers.append(controller)
env = Environment()
env.actors.append(
PosedActor(
self._actor,
Vector3(
[
0.0,
0.0,
bounding_box.size.z / 2.0 - bounding_box.offset.z,
]
),
Quaternion(),
[0.0 for _ in controller.get_dof_targets()],
)
)
batch.environments.append(env)
states = await self._runner.run_batch(batch)
return np.array(
[
self._calculate_fitness(
states[0].envs[i].actor_states[0],
states[-1].envs[i].actor_states[0],
)
for i in range(len(population))
]
)
def _control(self, dt: float, control: ActorControl) -> None:
for control_i, controller in enumerate(self._controllers):
controller.step(dt)
control.set_dof_targets(control_i, 0, controller.get_dof_targets())
@staticmethod
def _calculate_fitness(begin_state: ActorState, end_state: ActorState) -> float:
# TODO simulation can continue slightly passed the defined sim time.
# distance traveled on the xy plane
return math.sqrt(
(begin_state.position[0] - end_state.position[0]) ** 2
+ ((begin_state.position[1] - end_state.position[1]) ** 2)
)
def _must_do_next_gen(self) -> bool:
return self.generation_number != self._num_generations
| [] |
2024-01-10 | onerachel/revolve2 | examples~de~optimizer.py | import math
from random import Random
from typing import List, Tuple
import numpy as np
import numpy.typing as npt
from pyrr import Quaternion, Vector3
from revolve2.actor_controllers.cpg import CpgNetworkStructure
from sqlalchemy.ext.asyncio import AsyncEngine
from sqlalchemy.ext.asyncio.session import AsyncSession
from revolve2.actor_controller import ActorController
from revolve2.core.modular_robot import Body, ModularRobot
from revolve2.core.modular_robot.brains import (
BrainCpgNetworkStatic,
make_cpg_network_structure_neighbour,
)
from revolve2.core.optimization import ProcessIdGen
from revolve2.core.optimization.ea.openai_es import OpenaiESOptimizer
from revolve2.core.physics.actor import Actor
from revolve2.core.physics.running import (
ActorControl,
ActorState,
Batch,
Environment,
PosedActor,
Runner,
)
from revolve2.runners.isaacgym import LocalRunner
class Optimizer(OpenaiESOptimizer):
_body: Body
_actor: Actor
_dof_ids: List[int]
_cpg_network_structure: CpgNetworkStructure
_runner: Runner
_controllers: List[ActorController]
_simulation_time: int
_sampling_frequency: float
_control_frequency: float
_num_generations: int
async def ainit_new( # type: ignore # TODO for now ignoring mypy complaint about LSP problem, override parent's ainit
self,
database: AsyncEngine,
session: AsyncSession,
process_id: int,
process_id_gen: ProcessIdGen,
rng: Random,
population_size: int,
sigma: float,
learning_rate: float,
robot_body: Body,
simulation_time: int,
sampling_frequency: float,
control_frequency: float,
num_generations: int,
) -> None:
self._body = robot_body
self._init_actor_and_cpg_network_structure()
nprng = np.random.Generator(
np.random.PCG64(rng.randint(0, 2**63))
) # rng is currently not numpy, but this would be very convenient. do this until that is resolved.
initial_mean = nprng.standard_normal(self._cpg_network_structure.num_params)
await super().ainit_new(
database=database,
session=session,
process_id=process_id,
process_id_gen=process_id_gen,
rng=rng,
population_size=population_size,
sigma=sigma,
learning_rate=learning_rate,
initial_mean=initial_mean,
)
self._init_runner()
self._simulation_time = simulation_time
self._sampling_frequency = sampling_frequency
self._control_frequency = control_frequency
self._num_generations = num_generations
async def ainit_from_database( # type: ignore # see comment at ainit_new
self,
database: AsyncEngine,
session: AsyncSession,
process_id: int,
process_id_gen: ProcessIdGen,
rng: Random,
robot_body: Body,
simulation_time: int,
sampling_frequency: float,
control_frequency: float,
num_generations: int,
) -> bool:
if not await super().ainit_from_database(
database=database,
session=session,
process_id=process_id,
process_id_gen=process_id_gen,
rng=rng,
):
return False
self._body = robot_body
self._init_actor_and_cpg_network_structure()
self._init_runner()
self._simulation_time = simulation_time
self._sampling_frequency = sampling_frequency
self._control_frequency = control_frequency
self._num_generations = num_generations
return True
def _init_actor_and_cpg_network_structure(self) -> None:
self._actor, self._dof_ids = self._body.to_actor()
active_hinges_unsorted = self._body.find_active_hinges()
active_hinge_map = {
active_hinge.id: active_hinge for active_hinge in active_hinges_unsorted
}
active_hinges = [active_hinge_map[id] for id in self._dof_ids]
self._cpg_network_structure = make_cpg_network_structure_neighbour(
active_hinges
)
def _init_runner(self) -> None:
self._runner = LocalRunner(LocalRunner.SimParams(), headless=True)
async def _evaluate_population(
self,
database: AsyncEngine,
process_id: int,
process_id_gen: ProcessIdGen,
population: npt.NDArray[np.float_],
) -> npt.NDArray[np.float_]:
batch = Batch(
simulation_time=self._simulation_time,
sampling_frequency=self._sampling_frequency,
control_frequency=self._control_frequency,
control=self._control,
)
self._controllers = []
for params in population:
initial_state = self._cpg_network_structure.make_uniform_state(
0.5 * math.pi / 2.0
)
weight_matrix = self._cpg_network_structure.make_weight_matrix_from_params(
params
)
dof_ranges = self._cpg_network_structure.make_uniform_dof_ranges(1.0)
brain = BrainCpgNetworkStatic(
initial_state,
self._cpg_network_structure.num_cpgs,
weight_matrix,
dof_ranges,
)
controller = brain.make_controller(self._body, self._dof_ids)
bounding_box = self._actor.calc_aabb()
self._controllers.append(controller)
env = Environment()
env.actors.append(
PosedActor(
self._actor,
Vector3(
[
0.0,
0.0,
bounding_box.size.z / 2.0 - bounding_box.offset.z,
]
),
Quaternion(),
[0.0 for _ in controller.get_dof_targets()],
)
)
batch.environments.append(env)
states = await self._runner.run_batch(batch)
return np.array(
[
self._calculate_fitness(
states[0].envs[i].actor_states[0],
states[-1].envs[i].actor_states[0],
)
for i in range(len(population))
]
)
def _control(self, dt: float, control: ActorControl) -> None:
for control_i, controller in enumerate(self._controllers):
controller.step(dt)
control.set_dof_targets(control_i, 0, controller.get_dof_targets())
@staticmethod
def _calculate_fitness(begin_state: ActorState, end_state: ActorState) -> float:
# TODO simulation can continue slightly passed the defined sim time.
# distance traveled on the xy plane
return math.sqrt(
(begin_state.position[0] - end_state.position[0]) ** 2
+ ((begin_state.position[1] - end_state.position[1]) ** 2)
)
def _must_do_next_gen(self) -> bool:
return self.generation_number != self._num_generations
| [] |
2024-01-10 | onerachel/revolve2 | jlo~ea_fixedbody_cppn_openaies~optimizer.py | """Optimizer for finding a good modular robot brain using direct encoding of the CPG brain weights, OpenAI ES algoriothm, and simulation using mujoco."""
import math
from random import Random
from typing import List
import numpy as np
import numpy.typing as npt
from pyrr import Quaternion, Vector3
from revolve2.actor_controller import ActorController
from revolve2.actor_controllers.cpg import CpgNetworkStructure
from revolve2.core.modular_robot import Body
from revolve2.core.modular_robot.brains import (
BrainCpgNetworkStatic,
make_cpg_network_structure_neighbour,
)
from revolve2.core.optimization import ProcessIdGen
from revolve2.core.optimization.ea.openai_es import OpenaiESOptimizer
from revolve2.core.physics.actor import Actor
from revolve2.core.physics.running import (
ActorControl,
ActorState,
Batch,
Environment,
PosedActor,
Runner,
)
from revolve2.runners.isaacgym import LocalRunner
# from revolve2.runners.mujoco import LocalRunner
from sqlalchemy.ext.asyncio import AsyncEngine
from sqlalchemy.ext.asyncio.session import AsyncSession
class Optimizer(OpenaiESOptimizer):
"""
Optimizer for the problem.
Uses the generic EA optimizer as a base.
"""
_body: Body
_actor: Actor
_dof_ids: List[int]
_cpg_network_structure: CpgNetworkStructure
_runner: Runner
_controllers: List[ActorController]
_simulation_time: int
_sampling_frequency: float
_control_frequency: float
_num_generations: int
async def ainit_new( # type: ignore # TODO for now ignoring mypy complaint about LSP problem, override parent's ainit
self,
database: AsyncEngine,
session: AsyncSession,
process_id: int,
process_id_gen: ProcessIdGen,
rng: Random,
population_size: int,
sigma: float,
learning_rate: float,
robot_body: Body,
simulation_time: int,
sampling_frequency: float,
control_frequency: float,
num_generations: int,
) -> None:
"""
Initialize this class async.
Called when creating an instance using `new`.
:param database: Database to use for this optimizer.
:param session: Session to use when saving data to the database during initialization.
:param process_id: Unique identifier in the completely program specifically made for this optimizer.
:param process_id_gen: Can be used to create more unique identifiers.
:param rng: Random number generator.
:param population_size: Population size for the OpenAI ES algorithm.
:param sigma: Standard deviation for the OpenAI ES algorithm.
:param learning_rate: Directional vector gain for OpenAI ES algorithm.
:param robot_body: The body to optimize the brain for.
:param simulation_time: Time in second to simulate the robots for.
:param sampling_frequency: Sampling frequency for the simulation. See `Batch` class from physics running.
:param control_frequency: Control frequency for the simulation. See `Batch` class from physics running.
:param num_generations: Number of generation to run the optimizer for.
"""
self._body = robot_body
self._init_actor_and_cpg_network_structure()
nprng = np.random.Generator(
np.random.PCG64(rng.randint(0, 2**63))
) # rng is currently not numpy, but this would be very convenient. do this until that is resolved.
initial_mean = nprng.standard_normal(
self._cpg_network_structure.num_connections
)
await super().ainit_new(
database=database,
session=session,
process_id=process_id,
process_id_gen=process_id_gen,
rng=rng,
population_size=population_size,
sigma=sigma,
learning_rate=learning_rate,
initial_mean=initial_mean,
)
self._init_runner()
self._simulation_time = simulation_time
self._sampling_frequency = sampling_frequency
self._control_frequency = control_frequency
self._num_generations = num_generations
async def ainit_from_database( # type: ignore # see comment at ainit_new
self,
database: AsyncEngine,
session: AsyncSession,
process_id: int,
process_id_gen: ProcessIdGen,
rng: Random,
robot_body: Body,
simulation_time: int,
sampling_frequency: float,
control_frequency: float,
num_generations: int,
) -> bool:
"""
Try to initialize this class async from a database.
Called when creating an instance using `from_database`.
:param database: Database to use for this optimizer.
:param session: Session to use when loading and saving data to the database during initialization.
:param process_id: Unique identifier in the completely program specifically made for this optimizer.
:param process_id_gen: Can be used to create more unique identifiers.
:param rng: Random number generator.
:param robot_body: The body to optimize the brain for.
:param simulation_time: Time in second to simulate the robots for.
:param sampling_frequency: Sampling frequency for the simulation. See `Batch` class from physics running.
:param control_frequency: Control frequency for the simulation. See `Batch` class from physics running.
:param num_generations: Number of generation to run the optimizer for.
:returns: True if this complete object could be deserialized from the database.
"""
if not await super().ainit_from_database(
database=database,
session=session,
process_id=process_id,
process_id_gen=process_id_gen,
rng=rng,
):
return False
self._body = robot_body
self._init_actor_and_cpg_network_structure()
self._init_runner()
self._simulation_time = simulation_time
self._sampling_frequency = sampling_frequency
self._control_frequency = control_frequency
self._num_generations = num_generations
return True
def _init_actor_and_cpg_network_structure(self) -> None:
self._actor, self._dof_ids = self._body.to_actor()
active_hinges_unsorted = self._body.find_active_hinges()
active_hinge_map = {
active_hinge.id: active_hinge for active_hinge in active_hinges_unsorted
}
active_hinges = [active_hinge_map[id] for id in self._dof_ids]
self._cpg_network_structure = make_cpg_network_structure_neighbour(
active_hinges
)
def _init_runner(self) -> None:
self._runner = LocalRunner(LocalRunner.SimParams(), headless=True) #isaacgym
#self._runner = LocalRunner(headless=True) #mujoco
async def _evaluate_population(
self,
database: AsyncEngine,
process_id: int,
process_id_gen: ProcessIdGen,
population: npt.NDArray[np.float_],
) -> npt.NDArray[np.float_]:
batch = Batch(
simulation_time=self._simulation_time,
sampling_frequency=self._sampling_frequency,
control_frequency=self._control_frequency,
control=self._control,
)
self._controllers = []
for params in population:
initial_state = self._cpg_network_structure.make_uniform_state(
0.5 * math.pi / 2.0
)
weight_matrix = (
self._cpg_network_structure.make_connection_weights_matrix_from_params(
params
)
)
dof_ranges = self._cpg_network_structure.make_uniform_dof_ranges(1.0)
brain = BrainCpgNetworkStatic(
initial_state,
self._cpg_network_structure.num_cpgs,
weight_matrix,
dof_ranges,
)
controller = brain.make_controller(self._body, self._dof_ids)
bounding_box = self._actor.calc_aabb()
self._controllers.append(controller)
env = Environment()
env.actors.append(
PosedActor(
self._actor,
Vector3(
[
0.0,
0.0,
bounding_box.size.z / 2.0 - bounding_box.offset.z,
]
),
Quaternion(),
[0.0 for _ in controller.get_dof_targets()],
)
)
batch.environments.append(env)
batch_results = await self._runner.run_batch(batch)
return np.array(
[
self._calculate_fitness(
environment_result.environment_states[0].actor_states[0],
environment_result.environment_states[-1].actor_states[0],
)
for environment_result in batch_results.environment_results
]
)
def _control(
self, environment_index: int, dt: float, control: ActorControl
) -> None:
controller = self._controllers[environment_index]
controller.step(dt)
control.set_dof_targets(0, controller.get_dof_targets())
@staticmethod
def _calculate_fitness(begin_state: ActorState, end_state: ActorState) -> float:
# TODO simulation can continue slightly passed the defined sim time.
# distance traveled on the xy plane
return math.sqrt(
(begin_state.position[0] - end_state.position[0]) ** 2
+ ((begin_state.position[1] - end_state.position[1]) ** 2)
)
def _must_do_next_gen(self) -> bool:
return self.generation_number != self._num_generations
| [] |
2024-01-10 | onerachel/revolve2 | jlo~ea_fixedbody_cppn_revde~optimizer.py |
import math
from random import Random
from typing import List
from black import schedule_formatting
import numpy as np
import numpy.typing as npt
from pyrr import Quaternion, Vector3
from revolve2.actor_controller import ActorController
from revolve2.actor_controllers.cpg import CpgNetworkStructure
from revolve2.core.modular_robot import Body
from revolve2.core.modular_robot.brains import (
BrainCpgNetworkStatic,
make_cpg_network_structure_neighbour,
)
from revolve2.core.optimization import ProcessIdGen
from revolve2.core.optimization.ea.openai_es import OpenaiESOptimizer
from revde import RevDEOptimizer
from revolve2.core.physics.actor import Actor
from revolve2.core.physics.running import (
ActorControl,
ActorState,
Batch,
Environment,
PosedActor,
Runner,
)
from revolve2.runners.isaacgym import LocalRunner
from sqlalchemy.ext.asyncio import AsyncEngine
from sqlalchemy.ext.asyncio.session import AsyncSession
class Optimizer(RevDEOptimizer):
"""
Optimizer for the problem.
Uses the generic EA optimizer as a base.
"""
_body: Body
_actor: Actor
_dof_ids: List[int]
_cpg_network_structure: CpgNetworkStructure
_runner: Runner
_controllers: List[ActorController]
_simulation_time: int
_sampling_frequency: float
_control_frequency: float
_num_generations: int
async def ainit_new( # type: ignore # TODO for now ignoring mypy complaint about LSP problem, override parent's ainit
self,
database: AsyncEngine,
session: AsyncSession,
process_id: int,
process_id_gen: ProcessIdGen,
rng: Random,
population_size: int,
robot_body: Body,
simulation_time: int,
sampling_frequency: float,
control_frequency: float,
num_generations: int,
scaling: float,
cross_prob: float,
) -> None:
"""
Initialize this class async.
Called when creating an instance using `new`.
:param database: Database to use for this optimizer.
:param session: Session to use when saving data to the database during initialization.
:param process_id: Unique identifier in the completely program specifically made for this optimizer.
:param process_id_gen: Can be used to create more unique identifiers.
:param rng: Random number generator.
:param population_size: Population size for the OpenAI ES algorithm.
:param sigma: Standard deviation for the OpenAI ES algorithm.
:param learning_rate: Directional vector gain for OpenAI ES algorithm.
:param robot_body: The body to optimize the brain for.
:param simulation_time: Time in second to simulate the robots for.
:param sampling_frequency: Sampling frequency for the simulation. See `Batch` class from physics running.
:param control_frequency: Control frequency for the simulation. See `Batch` class from physics running.
:param num_generations: Number of generation to run the optimizer for.
"""
self._body = robot_body
self._init_actor_and_cpg_network_structure()
nprng = np.random.Generator(
np.random.PCG64(rng.randint(0, 2**63))
) # rng is currently not numpy, but this would be very convenient. do this until that is resolved.
nprng = np.random.Generator(
np.random.PCG64(rng.randint(0, 2**63))
) # rng is currently not numpy, but this would be very convenient. do this until that is resolved.
initial_population = nprng.standard_normal((population_size, self._cpg_network_structure.num_connections))
await super().ainit_new(
database=database,
session=session,
process_id=process_id,
process_id_gen=process_id_gen,
rng=rng,
population_size=population_size,
initial_population=initial_population,
scaling=scaling,
cross_prob=cross_prob,
)
self._init_runner()
self._simulation_time = simulation_time
self._sampling_frequency = sampling_frequency
self._control_frequency = control_frequency
self._num_generations = num_generations
async def ainit_from_database( # type: ignore # see comment at ainit_new
self,
database: AsyncEngine,
session: AsyncSession,
process_id: int,
process_id_gen: ProcessIdGen,
rng: Random,
robot_body: Body,
simulation_time: int,
sampling_frequency: float,
control_frequency: float,
num_generations: int,
) -> bool:
"""
Try to initialize this class async from a database.
Called when creating an instance using `from_database`.
:param database: Database to use for this optimizer.
:param session: Session to use when loading and saving data to the database during initialization.
:param process_id: Unique identifier in the completely program specifically made for this optimizer.
:param process_id_gen: Can be used to create more unique identifiers.
:param rng: Random number generator.
:param robot_body: The body to optimize the brain for.
:param simulation_time: Time in second to simulate the robots for.
:param sampling_frequency: Sampling frequency for the simulation. See `Batch` class from physics running.
:param control_frequency: Control frequency for the simulation. See `Batch` class from physics running.
:param num_generations: Number of generation to run the optimizer for.
:returns: True if this complete object could be deserialized from the database.
"""
if not await super().ainit_from_database(
database=database,
session=session,
process_id=process_id,
process_id_gen=process_id_gen,
rng=rng,
):
return False
self._body = robot_body
self._init_actor_and_cpg_network_structure()
self._init_runner()
self._simulation_time = simulation_time
self._sampling_frequency = sampling_frequency
self._control_frequency = control_frequency
self._num_generations = num_generations
return True
def _init_actor_and_cpg_network_structure(self) -> None:
self._actor, self._dof_ids = self._body.to_actor()
active_hinges_unsorted = self._body.find_active_hinges()
active_hinge_map = {
active_hinge.id: active_hinge for active_hinge in active_hinges_unsorted
}
active_hinges = [active_hinge_map[id] for id in self._dof_ids]
self._cpg_network_structure = make_cpg_network_structure_neighbour(
active_hinges
)
def _init_runner(self) -> None:
self._runner = LocalRunner(LocalRunner.SimParams(), headless=True)
async def _evaluate_population(
self,
database: AsyncEngine,
process_id: int,
process_id_gen: ProcessIdGen,
population: npt.NDArray[np.float_],
) -> npt.NDArray[np.float_]:
batch = Batch(
simulation_time=self._simulation_time,
sampling_frequency=self._sampling_frequency,
control_frequency=self._control_frequency,
control=self._control,
)
self._controllers = []
for params in population:
initial_state = self._cpg_network_structure.make_uniform_state(
0.5 * math.pi / 2.0
)
weight_matrix = (
self._cpg_network_structure.make_connection_weights_matrix_from_params(
params
)
)
dof_ranges = self._cpg_network_structure.make_uniform_dof_ranges(1.0)
brain = BrainCpgNetworkStatic(
initial_state,
self._cpg_network_structure.num_cpgs,
weight_matrix,
dof_ranges,
)
controller = brain.make_controller(self._body, self._dof_ids)
bounding_box = self._actor.calc_aabb()
self._controllers.append(controller)
env = Environment()
env.actors.append(
PosedActor(
self._actor,
Vector3(
[
0.0,
0.0,
bounding_box.size.z / 2.0 - bounding_box.offset.z,
]
),
Quaternion(),
[0.0 for _ in controller.get_dof_targets()],
)
)
batch.environments.append(env)
batch_results = await self._runner.run_batch(batch)
return np.array(
[
self._calculate_fitness(
environment_result.environment_states[0].actor_states[0],
environment_result.environment_states[-1].actor_states[0],
)
for environment_result in batch_results.environment_results
]
)
def _control(
self, environment_index: int, dt: float, control: ActorControl
) -> None:
controller = self._controllers[environment_index]
controller.step(dt)
control.set_dof_targets(0, controller.get_dof_targets())
@staticmethod
def _calculate_fitness(begin_state: ActorState, end_state: ActorState) -> float:
# TODO simulation can continue slightly passed the defined sim time.
# distance traveled on the xy plane
return math.sqrt(
(begin_state.position[0] - end_state.position[0]) ** 2
+ ((begin_state.position[1] - end_state.position[1]) ** 2)
)
def _must_do_next_gen(self) -> bool:
return self.generation_number != self._num_generations
| [] |
2024-01-10 | FelipeCampos14/entregaveis-modulo8 | Ponderada4~ollama.py | from langchain.llms import Ollama
from langchain.schema import AIMessage, HumanMessage
import gradio as gr
import requests
import json
def llm_post(prompt):
url = "http://localhost:11434/api/generate"
post = {"model":"securityAdvisor","prompt":prompt,"stream":False}
response = requests.post(url, json=post)
# Check the status code of the response
if response.status_code == 200:
print("POST request succeeded!")
response_dict = vars(response)
response_dict['_content'] = json.loads(response_dict["_content"].decode('utf-8'))
return response_dict['_content']['response']
else:
print("POST request failed. Status code:", response.status_code)
with gr.Blocks() as demo:
chatbot = gr.Chatbot()
msg = gr.Textbox()
def respond(message, chat_history):
answer = llm_post(message)
chat_history.append((message, answer))
return "", chat_history
msg.submit(respond, [msg, chatbot], [msg, chatbot])
demo.launch() | [] |
2024-01-10 | aman313/qa4ml | src~operations.py | import csv
import random
from typing import List, Iterable, Union, Dict, Callable
import openai
from src.constants import openai_key, openai_engine
openai.api_key = openai_key
from src.models import Prompt, FoundationModelAPIWrapper, PromptBasedCaseDetectionCriterion, DataPoint, \
TextClassificationDataPoint, Subset
class Operation():
def validate_inputs(self):
raise NotImplementedError
def __call__(self, *args, **kwargs):
raise NotImplementedError
class Sampler(Operation):
pass
class RandomSampler(Sampler):
def __init__(self, sampling_ratio:Union[int,float],seed:int=42):
self._sampling_ratio = sampling_ratio
self.sampler_seeded = random.Random(seed)
def __call__(self, iter:Iterable ):
sampled = []
for elem in iter:
sample = self.sampler_seeded.random()
if sample<self._sampling_ratio:
sampled.append(elem)
return sampled
class ConditionSampler(Sampler):
def __init__(self, matching_function:Callable, max_samples = -1):
self._matching_function = matching_function
self._max_samples = -1
def __call__(self, iter:Iterable):
sampled = []
for elem in iter:
if self._matching_function(elem) and (self._max_samples ==-1 or (self._max_samples>-1 and len(sampled) < self._max_samples)):
sampled.append(elem)
return sampled
class HTTPOperation(Operation):
def _get(self):
pass
def __call__(self, *args, **kwargs):
pass
class CaseDetectionCriteriaCreator(Operation):
pass
class PromptBasedCaseDetectionCriteriaCreator(CaseDetectionCriteriaCreator):
def __init__(self, prompt:Prompt):
self._prompt = prompt
def __call__(self)->PromptBasedCaseDetectionCriterion:
return PromptBasedCaseDetectionCriterion(self._prompt)
class CriterionApplier(Operation):
pass
class FoundationModelCriterionApplier(CriterionApplier):
def __init__(self, api_wrapper:FoundationModelAPIWrapper, criterion:PromptBasedCaseDetectionCriterion):
self._api_wrapper = api_wrapper
self._criterion = criterion
def __call__(self, data_points:List[TextClassificationDataPoint]):
for data_point in data_points:
augmented_prompt_text = self._criterion._prompt.augment_text(data_point)
engine = openai_engine
kwargs = self._api_wrapper._api_details
response = openai.Completion.create(engine=engine,
prompt=augmented_prompt_text,
**kwargs
)
yield response.choices[0].text
class SubsetCreator(Operation):
pass
class SubsetCreatorFromCriteriaCreator(SubsetCreator):
def __init__(self, criteria_creator:CaseDetectionCriteriaCreator):
self._criteria_creator = criteria_creator
class PromptBasedSubsetCreatorFromCriteriaCreator(SubsetCreatorFromCriteriaCreator):
def __init__(self, prompt_criteria_creator:PromptBasedCaseDetectionCriteriaCreator,
foundation_model_api_wrapper:FoundationModelAPIWrapper):
super().__init__(prompt_criteria_creator)
self._criterion = prompt_criteria_creator()
self._foundation_model_api_wrapper = foundation_model_api_wrapper
def __call__(self, data_points:List[DataPoint]):
applier = FoundationModelCriterionApplier(self._foundation_model_api_wrapper,self._criterion)
prompted_classes = list(applier(data_points))
prompted_class_dict = {}
for i in range(len(prompted_classes)):
try:
prompted_class_dict[prompted_classes[i]].append(data_points[i])
except KeyError:
prompted_class_dict[prompted_classes[i]] = [data_points[i]]
subsets = []
for cls,data in prompted_class_dict.items():
subsets.append(Subset(cls,data))
return subsets
class Summarizer(Operation):
pass
class SubsetCountSummarizer(Summarizer):
def __init__(self,verbose=False):
self._verbose=verbose
def __call__(self, subsets:List[Subset]):
if self._verbose:
for x in subsets:
print('\n')
print(x._name)
for i in range(len(x._data_points)):
d = x._data_points[i]
print(str(i) + ': ' + str(d))
return {x._name:len(x._data_points) for x in subsets}
class DatasetReader(Operation):
def __call__(self, dataset_location:str)->Iterable[DataPoint]:
raise NotImplementedError
class TextClassificationDatasetReader(DatasetReader):
def __call__(self, dataset_location:str)->Iterable[TextClassificationDataPoint]:
raise NotImplementedError
class ToxicCommentDatasetReader(TextClassificationDatasetReader):
def __call__(self, dataset_location:str)->Iterable[TextClassificationDataPoint]:
raise NotImplementedError
class DisasterTweetDatasetReader(TextClassificationDatasetReader):
def __call__(self, dataset_location):
with open(dataset_location) as dl:
reader = csv.DictReader(dl)
for row in reader:
text = row['text']
label = row['target']
metadata = {'location':row['location'],'keyword':row['keyword']}
yield TextClassificationDataPoint(text=text,label=label,metadata=metadata) | [
"{}"
] |
2024-01-10 | HarshavardhanK/LLM-Experiments | starter.py | from llama_index import VectorStoreIndex, SimpleDirectoryReader
from llama_index.embeddings import OpenAIEmbedding
from langchain.embeddings import OllamaEmbeddings
from llama_index import ServiceContext, set_global_service_context
from langchain.llms import Ollama
llm = Ollama(model="llama2")
embed_model = OllamaEmbeddings(base_url="http://localhost:11434", model="llama2")
embed_model_open_ai = OpenAIEmbedding()
service_context = ServiceContext.from_defaults(embed_model=embed_model_open_ai, llm=llm)
# optionally set a global service context
set_global_service_context(service_context)
documents = SimpleDirectoryReader("data").load_data()
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine(service_context=service_context)
#response = query_engine.query("What did the shopkeeper ask Harsha in Manali? Can you answer the shopkeeper's question?")
response = query_engine.query("What is The rational agent approach?")
print(response)
import logging
import sys
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
#With Llama2 Embedding:
'''In Kirchenbauer's paper on "A Watermarking Algorithm for Large Language Models," the parameters of gamma and delta play a crucial role in the proposed watermarking algorithm.
Gamma (ฮณ) represents the step size or stretching factor, which determines how much the watermark is stretched or compressed during embedding. A larger value of ฮณ results in a stronger watermark, but it also increases the risk of detection by an attacker. On the other hand, a smaller value of ฮณ leads to a weaker watermark, but it reduces the risk of detection.
Delta (ฮด) represents the threshold or decision point, which determines the level of similarity between the embedded watermark and the cover text required for detection. A larger value of ฮด results in a higher detection probability, but it also increases the risk of false positives. On the other hand, a smaller value of ฮด leads to a lower detection probability, but it reduces the risk of false positives.''' | [] |
2024-01-10 | nuass/modelscope | modelscope~models~cv~image_probing_model~backbone.py | # The implementation is adopted from OpenAI-CLIP,
# made pubicly available under the MIT License at https://github.com/openai/CLIP
import math
import sys
from collections import OrderedDict
from functools import reduce
from operator import mul
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from PIL import Image
from torchvision import models
from .utils import convert_weights, load_pretrained
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed
# after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool,
# and the subsequent convolution has stride 1
self.downsample = nn.Sequential(
OrderedDict([('-1', nn.AvgPool2d(stride)),
('0',
nn.Conv2d(
inplanes,
planes * self.expansion,
1,
stride=1,
bias=False)),
('1', nn.BatchNorm2d(planes * self.expansion))]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self,
spacial_dim: int,
embed_dim: int,
num_heads: int,
output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(
torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1],
x.shape[2] * x.shape[3]).permute(2, 0, 1)
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0)
x = x + self.positional_embedding[:, None, :].to(x.dtype)
x, _ = F.multi_head_attention_forward(
query=x,
key=x,
value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat(
[self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False)
return x[0]
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self,
d_model: int,
n_head: int,
attn_mask: torch.Tensor = None):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(
OrderedDict([('c_fc', nn.Linear(d_model, d_model * 4)),
('gelu', QuickGELU()),
('c_proj', nn.Linear(d_model * 4, d_model))]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(
dtype=x.dtype,
device=x.device) if self.attn_mask is not None else None
return self.attn(
x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, x: torch.Tensor, idx):
features = {}
x_norm = self.ln_1(x)
features['layer_{}_pre_attn'.format(idx)] = x_norm.permute(1, 0, 2)
attn = self.attention(x_norm)
features['layer_{}_attn'.format(idx)] = attn.permute(1, 0, 2)
x = x + attn
mlp = self.mlp(self.ln_2(x))
features['layer_{}_mlp'.format(idx)] = mlp.permute(1, 0, 2)
x = x + mlp
return x, features
class Transformer(nn.Module):
def __init__(self,
width: int,
layers: int,
heads: int,
attn_mask: torch.Tensor = None):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.ModuleList()
for i in range(layers):
block = ResidualAttentionBlock(width, heads, attn_mask)
self.resblocks.append(block)
def forward(self, x: torch.Tensor):
features = {}
for idx, block in enumerate(self.resblocks):
x, block_feats = block(x, idx)
features.update(block_feats)
return x, features
class VisualTransformer(nn.Module):
def __init__(self, input_resolution: int, patch_size: int, width: int,
layers: int, heads: int, output_dim: int):
super().__init__()
print(input_resolution, patch_size, width, layers, heads, output_dim)
self.input_resolution = input_resolution
self.output_dim = output_dim
self.conv1 = nn.Conv2d(
in_channels=3,
out_channels=width,
kernel_size=patch_size,
stride=patch_size,
bias=False)
scale = width**-0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn(
(input_resolution // patch_size)**2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def forward(self, x: torch.Tensor, return_all=True):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1],
-1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
zeros = torch.zeros(
x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device)
# shape = [*, grid ** 2 + 1, width]
x = torch.cat([self.class_embedding.to(x.dtype) + zeros, x], dim=1)
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x, features = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if return_all:
features['pre_logits'] = x
return features
if self.proj is not None:
x = x @ self.proj
return x
class CLIPNet(nn.Module):
def __init__(self, arch_name, pretrained, **kwargs):
super(CLIPNet, self).__init__()
if arch_name == 'CLIP_ViTB32':
self.clip = VisualTransformer(
input_resolution=224,
patch_size=32,
width=768,
layers=12,
heads=12,
output_dim=512)
elif arch_name in ('CLIP_ViTB16', 'CLIP_ViTB16_FP16'):
self.clip = VisualTransformer(
input_resolution=224,
patch_size=16,
width=768,
layers=12,
heads=12,
output_dim=512)
elif arch_name in ('CLIP_ViTL14', 'CLIP_ViTL14_FP16'):
self.clip = VisualTransformer(
input_resolution=224,
patch_size=14,
width=1024,
layers=24,
heads=16,
output_dim=768)
else:
raise KeyError(f'Unsupported arch_name for CLIP, {arch_name}')
def forward(self, input_data):
output = self.clip(input_data)
return output
def CLIP(arch_name='CLIP_RN50',
use_pretrain=False,
load_from='',
state_dict=None,
**kwargs):
model = CLIPNet(arch_name=arch_name, pretrained=None, **kwargs)
if use_pretrain:
if arch_name.endswith('FP16'):
convert_weights(model.clip)
load_pretrained(model.clip, state_dict, load_from)
return model
class ProbingModel(torch.nn.Module):
def __init__(self, feat_size, num_classes):
super(ProbingModel, self).__init__()
self.linear = torch.nn.Linear(feat_size, num_classes)
def forward(self, x):
return self.linear(x)
| [] |
2024-01-10 | uqarni/reposite-demo2 | reminderwrapper.py | import openai
import json
def followup(yesno):
if yesno == 'yes':
return 'y'
if yesno == 'no':
return 'n'
else:
return 'n'
########
functions=[
{
"name": "followup",
"description": "Analyze the conversation provided by the user and determine if a followup is warranted",
"parameters": {
"type": "object",
"properties": {
"yesno": {
"type": "string",
"enum": ["yes", "no"],
"description": "yes if a follow up is warranted, and no if it is not",
}
},
"required": ["yesno"],
},
}
]
# Step 1, send model the user query and what functions it has access to
def run_conversation(bot_messages):
prompt = '''
You work in the sales department for Reposite, a travel agency and experience supplier marketplace.
Your job is to analyze the conversation between our sales agent (the Assistant) and the potential customer (the User) and determine if a follow-up is warranted.
A follow-up is NOT warranted if:
(1) the user has indicated that they are not interested or are unhappy in some way. For example, they have said that they are not interested in the product or do not want to be contacted.
(2) the user has indicated that they already purchased the Reposite membership.
Otherwise, a follow-up is warranted.
If a follow-up is warranted, execute the followup() function with 'yes' as the input. If a follow-up is not warranted, execute the followup() function with "no" as the input.
'''
all_messages = [{'role': 'system', 'content': prompt}]
#for iterate through redis database of all the conversations thus far:
all_messages.append({"role": "user", "content": str(bot_messages)})
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo-16k",
messages=all_messages,
functions=functions,
function_call= {"name": "followup"}
)
message = response["choices"][0]["message"]
# Step 2, check if the model wants to call a function
if message.get("function_call"):
function_name = message["function_call"]["name"]
return json.loads(message["function_call"]["arguments"])["yesno"]
else:
return 'error'
| [
"\n You work in the sales department for Reposite, a travel agency and experience supplier marketplace.\n Your job is to analyze the conversation between our sales agent (the Assistant) and the potential customer (the User) and determine if a follow-up is warranted. \n\n A follow-up is NOT warranted if:\n (1) the user has indicated that they are not interested or are unhappy in some way. For example, they have said that they are not interested in the product or do not want to be contacted.\n (2) the user has indicated that they already purchased the Reposite membership.\n \n\n Otherwise, a follow-up is warranted. \n\n If a follow-up is warranted, execute the followup() function with 'yes' as the input. If a follow-up is not warranted, execute the followup() function with \"no\" as the input.\n "
] |
2024-01-10 | uqarni/reposite-demo2 | functions2.py | import openai
import os
import re
import random
from datetime import datetime, timedelta
import random
import time
#similarity search
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
import pandas as pd
from langchain.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
def find_txt_examples(query, k=8):
loader = TextLoader("examples.txt")
documents = loader.load()
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings()
db = FAISS.from_documents(docs, embeddings)
docs = db.similarity_search(query, k=k)
examples = ""
for doc in docs:
examples += '\n\n' + doc.page_content
return examples
def find_examples(query, type, k=8):
if type == 'taylor_RAG':
full_file = 'RAG_examples/taylor.csv'
col1 = 'RAG_examples/taylorcol1.csv'
elif type == 'taylorNMQR_RAG':
full_file = 'RAG_examples/taylorNMQR.csv'
col1 = 'RAG_examples/taylorNMQRcol1.csv'
else:
full_file = 'RAG_examples/taylorNMQR.csv'
col1 = 'RAG_examples/taylorNMQRcol1.csv'
loader = CSVLoader(file_path=col1)
data = loader.load()
embeddings = OpenAIEmbeddings()
db = FAISS.from_documents(data, embeddings)
examples = ''
docs = db.similarity_search(query, k)
df = pd.read_csv(full_file)
i = 1
for doc in docs:
input_text = doc.page_content[14:]
try:
output = df.loc[df['User Message'] == input_text, 'Assistant Message'].iloc[0]
except:
print('found error for input')
try:
examples += f'Example {i}: \n\nLead Email: {input_text} \n\nTaylor Response: {output} \n\n'
except:
continue
i += 1
return examples
def my_function(og, permuted):
try:
output = find_examples(permuted, k = 10)
if og in output:
return 'yes'
else:
return 'no'
except:
print('error')
print('\n\n')
return 'error'
# Read CSV
def find_in_examples_script():
df = pd.read_csv('oct12comparison.csv')
# Apply function to each row and store result in a new column
df['Output'] = df.apply(lambda row: my_function(row['Assistant Reference Message'], row['Modified user message']), axis=1)
# Write DataFrame back to CSV
df.to_csv('oct12comparison_modified.csv', index=False)
#generate openai response; returns messages with openai response
def ideator(messages, lead_dict_info, bot_used):
print('message length: ' + str(len(messages)))
prompt = messages[0]['content']
messages = messages[1:]
new_message = messages[-1]['content']
#perform similarity search
examples = find_examples(new_message, bot_used, k=4)
examples = examples.format(**lead_dict_info)
prompt = prompt + examples
print('inbound message: ' + str(messages[-1]))
print('prompt' + prompt)
print('\n\n')
prompt = {'role': 'system', 'content': prompt}
messages.insert(0,prompt)
for i in range(5):
try:
key = os.environ.get("OPENAI_API_KEY")
openai.api_key = key
result = openai.ChatCompletion.create(
model="gpt-4",
messages= messages,
max_tokens = 500,
temperature = 0
)
response = result["choices"][0]["message"]["content"]
response = response.replace('\n','<br>')
print('response:')
print(response)
print('\n\n')
break
except Exception as e:
error_message = f"Attempt {i + 1} failed: {e}"
print(error_message)
if i < 4: # we don't want to wait after the last try
time.sleep(5) # wait for 5 seconds before the next attempt
def split_sms(message):
import re
# Use regular expressions to split the string at ., !, or ? followed by a space or newline
sentences = re.split('(?<=[.!?]) (?=\\S)|(?<=[.!?])\n', message.strip())
# Strip leading and trailing whitespace from each sentence
sentences = [sentence.strip() for sentence in sentences if sentence.strip()]
# Compute the cumulative length of all sentences
cum_length = [0]
for sentence in sentences:
cum_length.append(cum_length[-1] + len(sentence))
total_length = cum_length[-1]
# Find the splitting point
split_point = next(i for i, cum_len in enumerate(cum_length) if cum_len >= total_length / 2)
# Split the sentences into two parts at the splitting point
part1 = sentences[:split_point]
part2 = sentences[split_point:]
# Join the sentences in each part back into strings and exclude any part that is empty
strings = []
if part1:
strings.append(" ".join(part1))
if part2:
strings.append(" ".join(part2))
return strings
split_response = [response]
count = len(split_response)
for section in split_response:
section = {
"role": "assistant",
"content": section
}
messages.append(section)
return messages, count
def initial_text_info(selection=None):
dictionary = {
'NMQR Received': '''
Hey {lead_first_name} -
I just saw you got a group reservation request through Reposite from {reseller_org_name}!
Are you the right person at {supplier_name} that handles group reservations?
Cheers,
Taylor
''',
'New QR':'''
Hey {lead_first_name} -
I saw that your Reposite profile just sparked some new interest! A planner {reseller_org_name}, just sent you a new quote request - they're looking for {category} suppliers in {destination}.
Based on the details, do you feel like this lead is relevant for {supplier_name}?
Cheers,
Taylor
''',
'NTM $500 Membership - Token Change':'''
Hey {lead_first_name} -
I saw that you just used tokens to discover new group planners. It's great to see you taking active steps to expand your connections!
Are there certain types of planners that you're targeting (corporate, student groups, international groups, luxury, etc.)?
Cheers,
Taylor
''',
'NTM $500 Membership - Quote Hotlist':'''
Hey {lead_first_name} -
I noticed that your conversation with {reseller_org_name} is off to a good start - congrats (though I don't want to jinx it)!
Are you open to receiving more quotes and group leads from other planners?
Cheers,
Taylor
''',
'NTM $500 Membership - Booking Received': '''
Hey {lead_first_name} -
Congrats on your recent booking with {reseller_org_name}! Was everything up to your expectations?
Best,
Taylor
'''
}
if selection is None:
return list(dictionary.keys())
return dictionary[selection]
| [
"content",
"PLACEHOLDERPLACEHOLDER",
"{'role': 'system', 'content': PLACEHOLDER}"
] |
2024-01-10 | uqarni/reposite-demo2 | playground.py | from functions import find_examples, ideator, initial_text_info
import openai
import pandas
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
import pandas as pd
from playground2 import generate_response
from datetime import datetime
import time
#open permutation_test in pandas
df = pd.read_csv('permutation_test.csv')
df['RAG 50 Response'] = df['RAG 50 Response'].astype(object)
# Create the output CSV and write the header
output_path = 'permutation_test_modified.csv'
df.iloc[0:0].to_csv(output_path, index=False)
# Processing
now = datetime.now()
print('start time: ' + str(now))
for index, row in df.iterrows():
if pd.notna(row['User Message Reworded']):
processed_value = generate_response(row['User Message Reworded'])
print(processed_value)
df.at[index, 'RAG 50 Response'] = processed_value
time.sleep(10)
# Append the updated row to CSV
df.iloc[index:index+1].to_csv(output_path, mode='a', header=False, index=False)
now = datetime.now()
print('end time: ' + str(now))
| [] |
2024-01-10 | uqarni/reposite-demo2 | playground2.py |
from functions import find_examples
import os
from datetime import datetime
from supabase import create_client, Client
import openai
#connect to supabase database
urL: str = os.environ.get("SUPABASE_URL")
key: str = os.environ.get("SUPABASE_KEY")
supabase: Client = create_client(urL, key)
now = datetime.now()
now = now.strftime("%Y-%m-%d %H:%M:%S")
def generate_response(input_message):
#variables for system prompt
info = {
'bot_name': 'Taylor',
'membership_link': 'https://www.reposite.io/membership-overview-1',
'email': '[email protected]',
'supplier_name': 'Acme Trading Co',
'lead_first_name': 'John',
'lead_last_name': 'Doe',
'nmqr_count': '10',
'nmqrurl': 'nmqrurl.com',
'reseller_org_name': 'Smith Co',
'category': 'travel',
'date': 'June 20, 2023',
'current_date': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'destination': 'Honolulu',
'group_size': '50',
'trip_dates': 'August 10, 2023 to August 20, 2023'
}
initial_text = '''
Hey {lead_first_name} -
I saw that your Reposite profile just sparked some new interest! A planner from {reseller_org_name} just sent you a new quote request - they're looking for {category} suppliers in {destination}.
Based on the details, do you feel like this lead is relevant for {supplier_name}?
Cheers,
Taylor
'''
initial_text = initial_text.format(**info)
data, count = supabase.table("bots_dev").select("*").eq("id", "taylor").execute()
bot_info = data[1][0]
system_prompt = bot_info['system_prompt']
#extract examples
examples = find_examples(input_message, k = 6)
print(examples)
print('\n\n')
system_prompt = system_prompt + '\n\n' + examples
system_prompt = system_prompt.format(**info)
system_prompt = {'role': 'system', 'content': system_prompt}
initial_text = {'role': 'assistant', 'content': initial_text}
user_response = {"role": "user", "content": input_message}
messages = []
messages.append(system_prompt)
messages.append(initial_text)
messages.append(user_response)
response = openai.ChatCompletion.create(
messages = messages,
model = 'gpt-4',
max_tokens = 400
)
response = response["choices"][0]["message"]["content"]
return response
| [
"\n Hey {lead_first_name} -\n\nI saw that your Reposite profile just sparked some new interest! A planner from {reseller_org_name} just sent you a new quote request - they're looking for {category} suppliers in {destination}.\n\nBased on the details, do you feel like this lead is relevant for {supplier_name}?\n\nCheers,\nTaylor\n",
"system_prompt63c2fee8-5627-4c60-9612-1c36baef52f3\n\nPLACEHOLDER\n\nPLACEHOLDER",
"{'role': 'system', 'content': 'system_promptb4ceca05-dddf-44be-a401-7e4214fa1a83\\n\\nPLACEHOLDER'}",
"system_prompt",
"system_promptab6320c5-cf5b-42be-9583-ab01ee39a283\n\nPLACEHOLDER"
] |
2024-01-10 | BlueBug12/custom_stable_baselines | stable_baselines3~common~policies.py | """Policies: abstract base class and concrete implementations."""
import collections
import copy
import warnings
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import gym
import numpy as np
import torch as th
import torch_geometric as thg
from torch import nn
from stable_baselines3.common.distributions import (
BernoulliDistribution,
CategoricalDistribution,
DiagGaussianDistribution,
Distribution,
MultiCategoricalDistribution,
StateDependentNoiseDistribution,
make_proba_distribution,
)
from stable_baselines3.common.preprocessing import get_action_dim, is_image_space, maybe_transpose, preprocess_obs
from stable_baselines3.common.torch_layers import (
GraphFeaturesExtractor,
BaseFeaturesExtractor,
CombinedExtractor,
FlattenExtractor,
MlpExtractor,
NatureCNN,
create_mlp,
)
from stable_baselines3.common.type_aliases import Schedule
from stable_baselines3.common.utils import get_device, is_vectorized_observation, obs_as_tensor
class BaseModel(nn.Module, ABC):
"""
The base model object: makes predictions in response to observations.
In the case of policies, the prediction is an action. In the case of critics, it is the
estimated value of the observation.
:param observation_space: The observation space of the environment
:param action_space: The action space of the environment
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param features_extractor: Network to extract features
(a CNN when using images, a nn.Flatten() layer otherwise)
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
features_extractor: Optional[nn.Module] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super().__init__()
if optimizer_kwargs is None:
optimizer_kwargs = {}
if features_extractor_kwargs is None:
features_extractor_kwargs = {}
self.observation_space = observation_space
self.action_space = action_space
self.features_extractor = features_extractor
self.normalize_images = normalize_images
self.optimizer_class = optimizer_class
self.optimizer_kwargs = optimizer_kwargs
self.optimizer = None # type: Optional[th.optim.Optimizer]
self.features_extractor_class = features_extractor_class
self.features_extractor_kwargs = features_extractor_kwargs
@abstractmethod
def forward(self, *args, **kwargs):
pass
def _update_features_extractor(
self,
net_kwargs: Dict[str, Any],
features_extractor: Optional[BaseFeaturesExtractor] = None,
) -> Dict[str, Any]:
"""
Update the network keyword arguments and create a new features extractor object if needed.
If a ``features_extractor`` object is passed, then it will be shared.
:param net_kwargs: the base network keyword arguments, without the ones
related to features extractor
:param features_extractor: a features extractor object.
If None, a new object will be created.
:return: The updated keyword arguments
"""
net_kwargs = net_kwargs.copy()
if features_extractor is None:
# The features extractor is not shared, create a new one
features_extractor = self.make_features_extractor()
net_kwargs.update(dict(features_extractor=features_extractor, features_dim=features_extractor.features_dim))
return net_kwargs
def make_features_extractor(self) -> BaseFeaturesExtractor:
"""Helper method to create a features extractor."""
return self.features_extractor_class(self.observation_space, **self.features_extractor_kwargs)
def extract_features(self, obs: th.Tensor) -> th.Tensor:
"""
Preprocess the observation if needed and extract features.
:param obs:
:return:
"""
assert self.features_extractor is not None, "No features extractor was set"
preprocessed_obs = preprocess_obs(obs, self.observation_space, normalize_images=self.normalize_images)
return self.features_extractor(preprocessed_obs)
def _get_constructor_parameters(self) -> Dict[str, Any]:
"""
Get data that need to be saved in order to re-create the model when loading it from disk.
:return: The dictionary to pass to the as kwargs constructor when reconstruction this model.
"""
return dict(
observation_space=self.observation_space,
action_space=self.action_space,
# Passed to the constructor by child class
# squash_output=self.squash_output,
# features_extractor=self.features_extractor
normalize_images=self.normalize_images,
)
@property
def device(self) -> th.device:
"""Infer which device this policy lives on by inspecting its parameters.
If it has no parameters, the 'cpu' device is used as a fallback.
:return:"""
for param in self.parameters():
return param.device
return get_device("cpu")
def save(self, path: str) -> None:
"""
Save model to a given location.
:param path:
"""
th.save({"state_dict": self.state_dict(), "data": self._get_constructor_parameters()}, path)
@classmethod
def load(cls, path: str, device: Union[th.device, str] = "auto") -> "BaseModel":
"""
Load model from path.
:param path:
:param device: Device on which the policy should be loaded.
:return:
"""
device = get_device(device)
saved_variables = th.load(path, map_location=device)
# Allow to load policy saved with older version of SB3
if "sde_net_arch" in saved_variables["data"]:
warnings.warn(
"sde_net_arch is deprecated, please downgrade to SB3 v1.2.0 if you need such parameter.",
DeprecationWarning,
)
del saved_variables["data"]["sde_net_arch"]
# Create policy object
model = cls(**saved_variables["data"]) # pytype: disable=not-instantiable
# Load weights
model.load_state_dict(saved_variables["state_dict"])
model.to(device)
return model
def load_from_vector(self, vector: np.ndarray) -> None:
"""
Load parameters from a 1D vector.
:param vector:
"""
th.nn.utils.vector_to_parameters(th.FloatTensor(vector).to(self.device), self.parameters())
def parameters_to_vector(self) -> np.ndarray:
"""
Convert the parameters to a 1D vector.
:return:
"""
return th.nn.utils.parameters_to_vector(self.parameters()).detach().cpu().numpy()
def set_training_mode(self, mode: bool) -> None:
"""
Put the policy in either training or evaluation mode.
This affects certain modules, such as batch normalisation and dropout.
:param mode: if true, set to training mode, else set to evaluation mode
"""
self.train(mode)
def obs_to_tensor(self, observation: Union[np.ndarray, Dict[str, np.ndarray]]) -> Tuple[th.Tensor, bool]:
"""
Convert an input observation to a PyTorch tensor that can be fed to a model.
Includes sugar-coating to handle different observations (e.g. normalizing images).
:param observation: the input observation
:return: The observation as PyTorch tensor
and whether the observation is vectorized or not
"""
vectorized_env = False
if isinstance(observation, dict):
# need to copy the dict as the dict in VecFrameStack will become a torch tensor
observation = copy.deepcopy(observation)
for key, obs in observation.items():
obs_space = self.observation_space.spaces[key]
if is_image_space(obs_space):
obs_ = maybe_transpose(obs, obs_space)
else:
obs_ = np.array(obs)
vectorized_env = vectorized_env or is_vectorized_observation(obs_, obs_space)
# Add batch dimension if needed
observation[key] = obs_.reshape((-1,) + self.observation_space[key].shape)
elif is_image_space(self.observation_space):
# Handle the different cases for images
# as PyTorch use channel first format
observation = maybe_transpose(observation, self.observation_space)
else:
observation = np.array(observation)
if not isinstance(observation, dict):
# Dict obs need to be handled separately
vectorized_env = is_vectorized_observation(observation, self.observation_space)
# Add batch dimension if needed
observation = observation.reshape((-1,) + self.observation_space.shape)
observation = obs_as_tensor(observation, self.device)
return observation, vectorized_env
class BasePolicy(BaseModel):
"""The base policy object.
Parameters are mostly the same as `BaseModel`; additions are documented below.
:param args: positional arguments passed through to `BaseModel`.
:param kwargs: keyword arguments passed through to `BaseModel`.
:param squash_output: For continuous actions, whether the output is squashed
or not using a ``tanh()`` function.
"""
def __init__(self, *args, squash_output: bool = False, **kwargs):
super().__init__(*args, **kwargs)
self._squash_output = squash_output
@staticmethod
def _dummy_schedule(progress_remaining: float) -> float:
"""(float) Useful for pickling policy."""
del progress_remaining
return 0.0
@property
def squash_output(self) -> bool:
"""(bool) Getter for squash_output."""
return self._squash_output
@staticmethod
def init_weights(module: nn.Module, gain: float = 1) -> None:
"""
Orthogonal initialization (used in PPO and A2C)
"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
nn.init.orthogonal_(module.weight, gain=gain)
if module.bias is not None:
module.bias.data.fill_(0.0)
@abstractmethod
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
"""
Get the action according to the policy for a given observation.
By default provides a dummy implementation -- not all BasePolicy classes
implement this, e.g. if they are a Critic in an Actor-Critic method.
:param observation:
:param deterministic: Whether to use stochastic or deterministic actions
:return: Taken action according to the policy
"""
def predict(
self,
observation: Union[np.ndarray, Dict[str, np.ndarray]],
state: Optional[Tuple[np.ndarray, ...]] = None,
episode_start: Optional[np.ndarray] = None,
deterministic: bool = False,
) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:
"""
Get the policy action from an observation (and optional hidden state).
Includes sugar-coating to handle different observations (e.g. normalizing images).
:param observation: the input observation
:param state: The last hidden states (can be None, used in recurrent policies)
:param episode_start: The last masks (can be None, used in recurrent policies)
this correspond to beginning of episodes,
where the hidden states of the RNN must be reset.
:param deterministic: Whether or not to return deterministic actions.
:return: the model's action and the next hidden state
(used in recurrent policies)
"""
# TODO (GH/1): add support for RNN policies
# if state is None:
# state = self.initial_state
# if episode_start is None:
# episode_start = [False for _ in range(self.n_envs)]
# Switch to eval mode (this affects batch norm / dropout)
self.set_training_mode(False)
observation, vectorized_env = self.obs_to_tensor(observation)
if isinstance(observation, thg.data.data.Data):
observation = thg.data.Batch.from_data_list([observation])
with th.no_grad():
actions = self._predict(observation, deterministic=deterministic)
# Convert to numpy, and reshape to the original action shape
actions = actions.cpu().numpy().reshape((-1,) + self.action_space.shape)
if isinstance(self.action_space, gym.spaces.Box):
if self.squash_output:
# Rescale to proper domain when using squashing
actions = self.unscale_action(actions)
else:
# Actions could be on arbitrary scale, so clip the actions to avoid
# out of bound error (e.g. if sampling from a Gaussian distribution)
actions = np.clip(actions, self.action_space.low, self.action_space.high)
# Remove batch dimension if needed
if not vectorized_env:
actions = actions.squeeze(axis=0)
return actions, state
def scale_action(self, action: np.ndarray) -> np.ndarray:
"""
Rescale the action from [low, high] to [-1, 1]
(no need for symmetric action space)
:param action: Action to scale
:return: Scaled action
"""
low, high = self.action_space.low, self.action_space.high
return 2.0 * ((action - low) / (high - low)) - 1.0
def unscale_action(self, scaled_action: np.ndarray) -> np.ndarray:
"""
Rescale the action from [-1, 1] to [low, high]
(no need for symmetric action space)
:param scaled_action: Action to un-scale
"""
low, high = self.action_space.low, self.action_space.high
return low + (0.5 * (scaled_action + 1.0) * (high - low))
class ActorCriticPolicy(BasePolicy):
"""
Policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
if optimizer_kwargs is None:
optimizer_kwargs = {}
# Small values to avoid NaN in Adam optimizer
if optimizer_class == th.optim.Adam:
optimizer_kwargs["eps"] = 1e-5
super().__init__(
observation_space,
action_space,
features_extractor_class,
features_extractor_kwargs,
optimizer_class=optimizer_class,
optimizer_kwargs=optimizer_kwargs,
squash_output=squash_output,
)
# Default network architecture, from stable-baselines
if net_arch is None:
if features_extractor_class == NatureCNN:
net_arch = []
else:
net_arch = [dict(pi=[64, 64], vf=[64, 64])]
self.net_arch = net_arch
self.activation_fn = activation_fn
self.ortho_init = ortho_init
self.features_extractor = features_extractor_class(self.observation_space, **self.features_extractor_kwargs)
self.features_dim = self.features_extractor.features_dim
self.normalize_images = normalize_images
self.log_std_init = log_std_init
dist_kwargs = None
# Keyword arguments for gSDE distribution
if use_sde:
dist_kwargs = {
"full_std": full_std,
"squash_output": squash_output,
"use_expln": use_expln,
"learn_features": False,
}
if sde_net_arch is not None:
warnings.warn("sde_net_arch is deprecated and will be removed in SB3 v2.4.0.", DeprecationWarning)
self.use_sde = use_sde
self.dist_kwargs = dist_kwargs
# Action distribution
self.action_dist = make_proba_distribution(action_space, use_sde=use_sde, dist_kwargs=dist_kwargs)
self._build(lr_schedule)
def _get_constructor_parameters(self) -> Dict[str, Any]:
data = super()._get_constructor_parameters()
default_none_kwargs = self.dist_kwargs or collections.defaultdict(lambda: None)
data.update(
dict(
net_arch=self.net_arch,
activation_fn=self.activation_fn,
use_sde=self.use_sde,
log_std_init=self.log_std_init,
squash_output=default_none_kwargs["squash_output"],
full_std=default_none_kwargs["full_std"],
use_expln=default_none_kwargs["use_expln"],
lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
ortho_init=self.ortho_init,
optimizer_class=self.optimizer_class,
optimizer_kwargs=self.optimizer_kwargs,
features_extractor_class=self.features_extractor_class,
features_extractor_kwargs=self.features_extractor_kwargs,
)
)
return data
def reset_noise(self, n_envs: int = 1) -> None:
"""
Sample new weights for the exploration matrix.
:param n_envs:
"""
assert isinstance(self.action_dist, StateDependentNoiseDistribution), "reset_noise() is only available when using gSDE"
self.action_dist.sample_weights(self.log_std, batch_size=n_envs)
def _build_mlp_extractor(self) -> None:
"""
Create the policy and value networks.
Part of the layers can be shared.
"""
# Note: If net_arch is None and some features extractor is used,
# net_arch here is an empty list and mlp_extractor does not
# really contain any layers (acts like an identity module).
self.mlp_extractor = MlpExtractor(
self.features_dim,
net_arch=self.net_arch,
activation_fn=self.activation_fn,
device=self.device,
)
def _build(self, lr_schedule: Schedule) -> None:
"""
Create the networks and the optimizer.
:param lr_schedule: Learning rate schedule
lr_schedule(1) is the initial learning rate
"""
self._build_mlp_extractor()
latent_dim_pi = self.mlp_extractor.latent_dim_pi
if isinstance(self.action_dist, DiagGaussianDistribution):
self.action_net, self.log_std = self.action_dist.proba_distribution_net(
latent_dim=latent_dim_pi, log_std_init=self.log_std_init
)
elif isinstance(self.action_dist, StateDependentNoiseDistribution):
self.action_net, self.log_std = self.action_dist.proba_distribution_net(
latent_dim=latent_dim_pi, latent_sde_dim=latent_dim_pi, log_std_init=self.log_std_init
)
elif isinstance(self.action_dist, (CategoricalDistribution, MultiCategoricalDistribution, BernoulliDistribution)):
self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)
else:
raise NotImplementedError(f"Unsupported distribution '{self.action_dist}'.")
self.value_net = nn.Linear(self.mlp_extractor.latent_dim_vf, 1)
# Init weights: use orthogonal initialization
# with small initial weight for the output
if self.ortho_init:
# TODO: check for features_extractor
# Values from stable-baselines.
# features_extractor/mlp values are
# originally from openai/baselines (default gains/init_scales).
module_gains = {
self.features_extractor: np.sqrt(2),
self.mlp_extractor: np.sqrt(2),
self.action_net: 0.01,
self.value_net: 1,
}
for module, gain in module_gains.items():
module.apply(partial(self.init_weights, gain=gain))
# Setup optimizer with initial learning rate
self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
def forward(self, obs: th.Tensor, deterministic: bool = False) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Forward pass in all the networks (actor and critic)
:param obs: Observation
:param deterministic: Whether to sample or use deterministic actions
:return: action, value and log probability of the action
"""
# Preprocess the observation if needed
features = self.extract_features(obs)
latent_pi, latent_vf = self.mlp_extractor(features)
# Evaluate the values for the given observations
values = self.value_net(latent_vf)
distribution = self._get_action_dist_from_latent(latent_pi)
actions = distribution.get_actions(deterministic=deterministic)
log_prob = distribution.log_prob(actions)
actions = actions.reshape((-1,) + self.action_space.shape)
return actions, values, log_prob
def _get_action_dist_from_latent(self, latent_pi: th.Tensor) -> Distribution:
"""
Retrieve action distribution given the latent codes.
:param latent_pi: Latent code for the actor
:return: Action distribution
"""
mean_actions = self.action_net(latent_pi)
if isinstance(self.action_dist, DiagGaussianDistribution):
return self.action_dist.proba_distribution(mean_actions, self.log_std)
elif isinstance(self.action_dist, CategoricalDistribution):
# Here mean_actions are the logits before the softmax
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, MultiCategoricalDistribution):
# Here mean_actions are the flattened logits
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, BernoulliDistribution):
# Here mean_actions are the logits (before rounding to get the binary actions)
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, StateDependentNoiseDistribution):
return self.action_dist.proba_distribution(mean_actions, self.log_std, latent_pi)
else:
raise ValueError("Invalid action distribution")
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
"""
Get the action according to the policy for a given observation.
:param observation:
:param deterministic: Whether to use stochastic or deterministic actions
:return: Taken action according to the policy
"""
return self.get_distribution(observation).get_actions(deterministic=deterministic)
def evaluate_actions(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Evaluate actions according to the current policy,
given the observations.
:param obs:
:param actions:
:return: estimated value, log likelihood of taking those actions
and entropy of the action distribution.
"""
# Preprocess the observation if needed
features = self.extract_features(obs)
latent_pi, latent_vf = self.mlp_extractor(features)
distribution = self._get_action_dist_from_latent(latent_pi)
log_prob = distribution.log_prob(actions)
values = self.value_net(latent_vf)
return values, log_prob, distribution.entropy()
def get_distribution(self, obs: th.Tensor) -> Distribution:
"""
Get the current policy distribution given the observations.
:param obs:
:return: the action distribution.
"""
features = self.extract_features(obs)
latent_pi = self.mlp_extractor.forward_actor(features)
return self._get_action_dist_from_latent(latent_pi)
def predict_values(self, obs: th.Tensor) -> th.Tensor:
"""
Get the estimated values according to the current policy given the observations.
:param obs:
:return: the estimated values.
"""
features = self.extract_features(obs)
latent_vf = self.mlp_extractor.forward_critic(features)
return self.value_net(latent_vf)
class ActorCriticCnnPolicy(ActorCriticPolicy):
"""
CNN policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super().__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
use_sde,
log_std_init,
full_std,
sde_net_arch,
use_expln,
squash_output,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
class MultiInputActorCriticPolicy(ActorCriticPolicy):
"""
MultiInputActorClass policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space (Tuple)
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Uses the CombinedExtractor
:param features_extractor_kwargs: Keyword arguments
to pass to the feature extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Dict,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super().__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
use_sde,
log_std_init,
full_std,
sde_net_arch,
use_expln,
squash_output,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
class ContinuousCritic(BaseModel):
"""
Critic network(s) for DDPG/SAC/TD3.
It represents the action-state value function (Q-value function).
Compared to A2C/PPO critics, this one represents the Q-value
and takes the continuous action as input. It is concatenated with the state
and then fed to the network which outputs a single value: Q(s, a).
For more recent algorithms like SAC/TD3, multiple networks
are created to give different estimates.
By default, it creates two critic networks used to reduce overestimation
thanks to clipped Q-learning (cf TD3 paper).
:param observation_space: Obervation space
:param action_space: Action space
:param net_arch: Network architecture
:param features_extractor: Network to extract features
(a CNN when using images, a nn.Flatten() layer otherwise)
:param features_dim: Number of features
:param activation_fn: Activation function
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param n_critics: Number of critic networks to create.
:param share_features_extractor: Whether the features extractor is shared or not
between the actor and the critic (this saves computation time)
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
net_arch: List[int],
features_extractor: nn.Module,
features_dim: int,
activation_fn: Type[nn.Module] = nn.ReLU,
normalize_images: bool = True,
n_critics: int = 2,
share_features_extractor: bool = True,
):
super().__init__(
observation_space,
action_space,
features_extractor=features_extractor,
normalize_images=normalize_images,
)
action_dim = get_action_dim(self.action_space)
self.share_features_extractor = share_features_extractor
self.n_critics = n_critics
self.q_networks = []
for idx in range(n_critics):
q_net = create_mlp(features_dim + action_dim, 1, net_arch, activation_fn)
q_net = nn.Sequential(*q_net)
self.add_module(f"qf{idx}", q_net)
self.q_networks.append(q_net)
def forward(self, obs: th.Tensor, actions: th.Tensor) -> Tuple[th.Tensor, ...]:
# Learn the features extractor using the policy loss only
# when the features_extractor is shared with the actor
with th.set_grad_enabled(not self.share_features_extractor):
features = self.extract_features(obs)
qvalue_input = th.cat([features, actions], dim=1)
return tuple(q_net(qvalue_input) for q_net in self.q_networks)
def q1_forward(self, obs: th.Tensor, actions: th.Tensor) -> th.Tensor:
"""
Only predict the Q-value using the first network.
This allows to reduce computation when all the estimates are not needed
(e.g. when updating the policy in TD3).
"""
with th.no_grad():
features = self.extract_features(obs)
return self.q_networks[0](th.cat([features, actions], dim=1))
class GNNActorCriticPolicy(ActorCriticPolicy):
def __init__(
self,
observation_space: gym.spaces.Graph,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[Union[int, Dict[str, List[int]]]]] = None,
activation_fn: Type[th.nn.Module] = th.nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: Type[BaseFeaturesExtractor] = GraphFeaturesExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super().__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
use_sde,
log_std_init,
full_std,
sde_net_arch,
use_expln,
squash_output,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
def obs_to_tensor(self, observation: gym.spaces.GraphInstance):
if isinstance(observation, list):
vectorized_env = True
else:
vectorized_env = False
if vectorized_env:
torch_obs = list()
for obs in observation:
x = th.tensor(obs.nodes).float()
#edge_index = th.tensor(obs.edge_links, dtype=th.long).t().contiguous().view(2, -1)
edge_index = th.tensor(obs.edge_links, dtype=th.long)
torch_obs.append(thg.data.Data(x=x, edge_index=edge_index))
if len(torch_obs) == 1:
torch_obs = torch_obs[0]
else:
x = th.tensor(observation.nodes).float()
#edge_index = th.tensor(observation.edge_links, dtype=th.long).t().contiguous().view(2, -1)
edge_index = th.tensor(observation.edge_links, dtype=th.long)
torch_obs = thg.data.Data(x=x, edge_index=edge_index)
return torch_obs, vectorized_env
def forward(self, obs: thg.data.Data,
deterministic: bool = False):
features = self.extract_features(obs)
latent_pi, latent_vf = self.mlp_extractor(features)
values = self.value_net(latent_vf)
distribution = self._get_action_dist_from_latent(latent_pi)
actions = distribution.get_actions(deterministic=deterministic)
log_prob = distribution.log_prob(actions)
actions = actions.reshape((-1,) + self.action_space.shape)
return actions, values, log_prob
def _predict(self, observation: thg.data.Data,
deterministic: bool = False) -> th.Tensor:
observation.to(self.device)
return self.get_distribution(observation).get_actions(deterministic=deterministic)
def evaluate_actions(self, obs: thg.data.Data,
actions: th.Tensor):
features = self.extract_features(obs)
latent_pi, latent_vf = self.mlp_extractor(features)
distribution = self._get_action_dist_from_latent(latent_pi)
log_prob = distribution.log_prob(actions)
values = self.value_net(latent_vf)
return values, log_prob, distribution.entropy()
def get_distribution(self, obs: thg.data.Data):
features = self.extract_features(obs)
latent_pi = self.mlp_extractor.forward_actor(features)
return self._get_action_dist_from_latent(latent_pi)
def predict_values(self, obs: thg.data.Data):
features = self.extract_features(obs)
latent_vf = self.mlp_extractor.forward_critic(features)
return self.value_net(latent_vf) | [] |
2024-01-10 | athiyaman-m/AI-Chatbot-with-Voice-Assistant | worker.py | import openai
import requests
openai.api_key = "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"
def speech_to_text(audio_binary):
# Set up Watson Speech to Text HTTP Api url
base_url = "http://speech-to-text.192k8io9ig4v.svc.cluster.local"
api_url = base_url+'/speech-to-text/api/v1/recognize'
# Set up parameters for our HTTP reqeust
params = {
'model': 'en-US_Multimedia',
}
# Set up the body of our HTTP request
body = audio_binary
# Send a HTTP Post request
response = requests.post(api_url, params=params, data=audio_binary).json()
# Parse the response to get our transcribed text
text = 'null'
while bool(response.get('results')):
print('speech to text response:', response)
text = response.get('results').pop().get('alternatives').pop().get('transcript')
print('recognised text: ', text)
return text
def text_to_speech(text, voice=""):
# Set up Watson Text to Speech HTTP Api url
base_url = "http://text-to-speech.192k8io9ig4v.svc.cluster.local"
api_url = base_url + '/text-to-speech/api/v1/synthesize?output=output_text.wav'
# Adding voice parameter in api_url if the user has selected a preferred voice
if voice != "" and voice != "default":
api_url += "&voice=" + voice
# Set the headers for our HTTP request
headers = {
'Accept': 'audio/wav',
'Content-Type': 'application/json',
}
# Set the body of our HTTP request
json_data = {
'text': text,
}
# Send a HTTP Post reqeust to Watson Text to Speech Service
response = requests.post(api_url, headers=headers, json=json_data)
print('text to speech response:', response)
return response.content
def openai_process_message(user_message):
# Set the prompt for OpenAI Api
prompt = "\"Act like a personal assistant. You can respond to questions, translate sentences, summarize news, and give recommendations. " + user_message + "\""
# Call the OpenAI Api to process our prompt
openai_response = openai.Completion.create(model="text-davinci-003", prompt=prompt,max_tokens=4000)
print("openai response:", openai_response)
# Parse the response to get the response text for our prompt
response_text = openai_response.choices[0].text
return response_text
| [
"\"Act like a personal assistant. You can respond to questions, translate sentences, summarize news, and give recommendations. PLACEHOLDER\""
] |
2024-01-10 | JackQin007/Safety-GOPS | quadrotor~env~vectorized_env~vec_env.py | '''Adapted from OpenAI Baselines.
See also:
* https://github.com/openai/baselines/blob/master/baselines/common/vec_env/vec_env.py
* https://github.com/DLR-RM/stable-baselines3/blob/master/stable_baselines3/common/vec_env/base_vec_env.py
'''
from abc import ABC, abstractmethod
from quadrotor.env.vectorized_env.vec_env_utils import tile_images
class VecEnv(ABC):
'''An abstract asynchronous, vectorized environment.
Used to batch data from multiple copies of an environment, so that each observation becomes a
batch of observations, and expected action is a batch of actions to be applied per-environment.
'''
closed = False
viewer = None
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self,
num_envs,
observation_space,
action_space
):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
@abstractmethod
def reset(self):
'''Reset all the environments and return an array of observations, or a dict of observation arrays.
If step_async is still doing work, that work will be cancelled and step_wait() should not
be called until step_async() is invoked again.
'''
pass
@abstractmethod
def step_async(self,
actions
):
'''Tell all the environments to start taking a step with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is already pending.
'''
pass
@abstractmethod
def step_wait(self):
'''Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a dict of arrays of observations.
- rews: an array of rewards
- dones: an array of 'episode done' booleans
- infos: a sequence of info objects
'''
pass
def close_extras(self):
'''Clean up the extra resources. Only runs when not self.closed.'''
pass
def close(self):
if self.closed:
return
if self.viewer is not None:
self.viewer.close()
self.close_extras()
self.closed = True
def step(self,
actions
):
'''Step the environments synchronously.'''
self.step_async(actions)
return self.step_wait()
def render(self,
mode='human'
):
'''Display environment via a viewer.'''
imgs = self.get_images()
bigimg = tile_images(imgs)
if mode == 'human':
self.get_viewer().imshow(bigimg)
return self.get_viewer().isopen
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
def get_images(self):
'''Return RGB images from each environment.'''
raise NotImplementedError
@property
def unwrapped(self):
if isinstance(self, VecEnvWrapper):
return self.venv.unwrapped
else:
return self
def get_viewer(self):
if self.viewer is None:
from gymnasium.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
return self.viewer
@abstractmethod
def get_attr(self, attr_name, indices=None):
'''Return attribute from vectorized environment.'''
pass
@abstractmethod
def set_attr(self, attr_name, values, indices=None):
'''Set attribute inside vectorized environments.'''
pass
@abstractmethod
def env_method(self,
method_name,
method_args=None,
method_kwargs=None,
indices=None):
'''Call instance methods of vectorized environments.'''
raise NotImplementedError()
def _get_indices(self,
indices
):
'''Convert a flexibly-typed reference to environment indices to an implied list of indices.'''
if indices is None:
indices = range(self.num_envs)
elif isinstance(indices, int):
indices = [indices]
return indices
class VecEnvWrapper(VecEnv):
'''An environment wrapper that applies to an entire batch of environments at once.'''
def __init__(self,
venv,
observation_space=None,
action_space=None
):
self.venv = venv
super().__init__(num_envs=venv.num_envs,
observation_space=observation_space or venv.observation_space,
action_space=action_space or venv.action_space)
def step_async(self, actions):
self.venv.step_async(actions)
@abstractmethod
def reset(self):
pass
@abstractmethod
def step_wait(self):
pass
def close(self):
return self.venv.close()
def render(self, mode='human'):
return self.venv.render(mode=mode)
def get_images(self):
return self.venv.get_images()
def __getattr__(self,
name
):
if name.startswith('_'):
raise AttributeError(
f'attempted to get missing private attribute \'{name}\'')
return getattr(self.venv, name)
def get_attr(self,
attr_name,
indices=None
):
return self.venv.get_attr(attr_name, indices)
def set_attr(self,
attr_name,
values,
indices=None
):
return self.venv.set_attr(attr_name, values, indices)
def env_method(self,
method_name,
method_args=None,
method_kwargs=None,
indices=None):
return self.venv.env_method(method_name,
method_args=method_args,
method_kwargs=method_kwargs,
indices=indices)
| [] |
2024-01-10 | danteGPT/langserve | langserve~client.py | from __future__ import annotations
import asyncio
import weakref
from concurrent.futures import ThreadPoolExecutor
from typing import (
Any,
AsyncIterator,
Iterator,
List,
Optional,
Sequence,
Union,
)
from urllib.parse import urljoin
import httpx
from langchain.callbacks.tracers.log_stream import RunLog, RunLogPatch
from langchain.load.dump import dumpd
from langchain.schema.runnable import Runnable
from langchain.schema.runnable.config import (
RunnableConfig,
ensure_config,
get_async_callback_manager_for_config,
get_callback_manager_for_config,
)
from langchain.schema.runnable.utils import Input, Output
from langserve.serialization import simple_dumpd, simple_loads
def _without_callbacks(config: Optional[RunnableConfig]) -> RunnableConfig:
"""Evict callbacks from the config since those are definitely not supported."""
_config = config or {}
return {k: v for k, v in _config.items() if k != "callbacks"}
def _raise_for_status(response: httpx.Response) -> None:
"""Re-raise with a more informative message.
Args:
response: The response to check
Raises:
httpx.HTTPStatusError: If the response is not 2xx, appending the response
text to the message
"""
try:
response.raise_for_status()
except httpx.HTTPStatusError as e:
message = str(e)
# Append the response text if it exists, as it may contain more information
# Especially useful when the user's request is malformed
if e.response.text:
message += f" for {e.response.text}"
raise httpx.HTTPStatusError(
message=message,
request=e.request,
response=e.response,
)
def _is_async() -> bool:
"""Return True if we are in an async context."""
try:
asyncio.get_running_loop()
except RuntimeError:
return False
else:
return True
def _close_clients(sync_client: httpx.Client, async_client: httpx.AsyncClient) -> None:
"""Close the async and sync clients.
_close_clients should not be a bound method since it is called by a weakref
finalizer.
Args:
sync_client: The sync client to close
async_client: The async client to close
"""
sync_client.close()
if _is_async():
# Use a ThreadPoolExecutor to run async_client_close in a separate thread
with ThreadPoolExecutor(max_workers=1) as executor:
# Submit the async_client_close coroutine to the thread pool
future = executor.submit(asyncio.run, async_client.aclose())
future.result()
else:
asyncio.run(async_client.aclose())
class RemoteRunnable(Runnable[Input, Output]):
"""A RemoteRunnable is a runnable that is executed on a remote server.
This client implements the majority of the runnable interface.
The following features are not supported:
- `batch` with `return_exceptions=True` since we do not support exception
translation from the server.
- Callbacks via the `config` argument as serialization of callbacks is not
supported.
"""
def __init__(
self,
url: str,
*,
timeout: Optional[float] = None,
) -> None:
"""Initialize the client.
Args:
url: The url of the server
timeout: The timeout for requests
"""
self.url = url
self.sync_client = httpx.Client(base_url=url, timeout=timeout)
self.async_client = httpx.AsyncClient(base_url=url, timeout=timeout)
# Register cleanup handler once RemoteRunnable is garbage collected
weakref.finalize(self, _close_clients, self.sync_client, self.async_client)
def _invoke(
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Output:
"""Invoke the runnable with the given input and config."""
response = self.sync_client.post(
"/invoke",
json={
"input": simple_dumpd(input),
"config": _without_callbacks(config),
"kwargs": kwargs,
},
)
_raise_for_status(response)
return simple_loads(response.text)["output"]
def invoke(
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Output:
if kwargs:
raise NotImplementedError("kwargs not implemented yet.")
return self._call_with_config(self._invoke, input, config=config)
async def _ainvoke(
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Output:
response = await self.async_client.post(
"/invoke",
json={
"input": simple_dumpd(input),
"config": _without_callbacks(config),
"kwargs": kwargs,
},
)
_raise_for_status(response)
return simple_loads(response.text)["output"]
async def ainvoke(
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Output:
if kwargs:
raise NotImplementedError("kwargs not implemented yet.")
return await self._acall_with_config(self._ainvoke, input, config)
def _batch(
self,
inputs: List[Input],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Optional[Any],
) -> List[Output]:
if not inputs:
return []
if return_exceptions:
raise NotImplementedError(
"return_exceptions is not supported for remote clients"
)
if isinstance(config, list):
_config = [_without_callbacks(c) for c in config]
else:
_config = _without_callbacks(config)
response = self.sync_client.post(
"/batch",
json={
"inputs": simple_dumpd(inputs),
"config": _config,
"kwargs": kwargs,
},
)
_raise_for_status(response)
return simple_loads(response.text)["output"]
def batch(
self,
inputs: List[Input],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> List[Output]:
if kwargs:
raise NotImplementedError("kwargs not implemented yet.")
return self._batch_with_config(self._batch, inputs, config)
async def _abatch(
self,
inputs: List[Input],
config: Optional[Union[RunnableConfig, List[RunnableConfig]]] = None,
*,
return_exceptions: bool = False,
**kwargs: Optional[Any],
) -> List[Output]:
"""Batch invoke the runnable."""
if not inputs:
return []
if return_exceptions:
raise NotImplementedError(
"return_exceptions is not supported for remote clients"
)
if isinstance(config, list):
_config = [_without_callbacks(c) for c in config]
else:
_config = _without_callbacks(config)
response = await self.async_client.post(
"/batch",
json={
"inputs": simple_dumpd(inputs),
"config": _config,
"kwargs": kwargs,
},
)
_raise_for_status(response)
return simple_loads(response.text)["output"]
async def abatch(
self,
inputs: List[Input],
config: Optional[RunnableConfig] = None,
*,
return_exceptions: bool = False,
**kwargs: Any,
) -> List[Output]:
"""Batch invoke the runnable."""
if kwargs:
raise NotImplementedError("kwargs not implemented yet.")
if not inputs:
return []
return await self._abatch_with_config(self._abatch, inputs, config)
def stream(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Iterator[Output]:
"""Stream invoke the runnable."""
config = ensure_config(config)
callback_manager = get_callback_manager_for_config(config)
final_output: Optional[Output] = None
run_manager = callback_manager.on_chain_start(
dumpd(self),
simple_dumpd(input),
name=config.get("run_name"),
)
data = {
"input": simple_dumpd(input),
"config": _without_callbacks(config),
"kwargs": kwargs,
}
endpoint = urljoin(self.url, "stream")
try:
from httpx_sse import connect_sse
except ImportError:
raise ImportError(
"Missing `httpx_sse` dependency to use the stream method. "
"Install via `pip install httpx_sse`'"
)
try:
with connect_sse(
self.sync_client, "POST", endpoint, json=data
) as event_source:
for sse in event_source.iter_sse():
if sse.event == "data":
chunk = simple_loads(sse.data)
yield chunk
if final_output:
final_output += chunk
else:
final_output = chunk
elif sse.event == "end":
break
else:
raise NotImplementedError(f"Unknown event {sse.event}")
except BaseException as e:
run_manager.on_chain_error(e)
raise
else:
run_manager.on_chain_end(final_output)
async def astream(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> AsyncIterator[Output]:
config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config)
final_output: Optional[Output] = None
run_manager = await callback_manager.on_chain_start(
dumpd(self),
simple_dumpd(input),
name=config.get("run_name"),
)
data = {
"input": simple_dumpd(input),
"config": _without_callbacks(config),
"kwargs": kwargs,
}
endpoint = urljoin(self.url, "stream")
try:
from httpx_sse import aconnect_sse
except ImportError:
raise ImportError("You must install `httpx_sse` to use the stream method.")
try:
async with aconnect_sse(
self.async_client, "POST", endpoint, json=data
) as event_source:
async for sse in event_source.aiter_sse():
if sse.event == "data":
chunk = simple_loads(sse.data)
yield chunk
if final_output:
final_output += chunk
else:
final_output = chunk
elif sse.event == "end":
break
else:
raise NotImplementedError(f"Unknown event {sse.event}")
except BaseException as e:
await run_manager.on_chain_error(e)
raise
else:
await run_manager.on_chain_end(final_output)
async def astream_log(
self,
input: Input,
config: Optional[RunnableConfig] = None,
*,
diff: bool = False,
include_names: Optional[Sequence[str]] = None,
include_types: Optional[Sequence[str]] = None,
include_tags: Optional[Sequence[str]] = None,
exclude_names: Optional[Sequence[str]] = None,
exclude_types: Optional[Sequence[str]] = None,
exclude_tags: Optional[Sequence[str]] = None,
**kwargs: Optional[Any],
) -> Union[AsyncIterator[RunLogPatch], AsyncIterator[RunLog]]:
"""Stream all output from a runnable, as reported to the callback system.
This includes all inner runs of LLMs, Retrievers, Tools, etc.
Output is streamed as Log objects, which include a list of
jsonpatch ops that describe how the state of the run has changed in each
step, and the final state of the run.
The jsonpatch ops can be applied in order to construct state.
"""
# Create a stream handler that will emit Log objects
config = ensure_config(config)
callback_manager = get_async_callback_manager_for_config(config)
final_output: Optional[Output] = None
run_manager = await callback_manager.on_chain_start(
dumpd(self),
simple_dumpd(input),
name=config.get("run_name"),
)
data = {
"input": simple_dumpd(input),
"config": _without_callbacks(config),
"kwargs": kwargs,
"diff": diff,
"include_names": include_names,
"include_types": include_types,
"include_tags": include_tags,
"exclude_names": exclude_names,
"exclude_types": exclude_types,
"exclude_tags": exclude_tags,
}
endpoint = urljoin(self.url, "stream_log")
try:
from httpx_sse import aconnect_sse
except ImportError:
raise ImportError("You must install `httpx_sse` to use the stream method.")
try:
async with aconnect_sse(
self.async_client, "POST", endpoint, json=data
) as event_source:
async for sse in event_source.aiter_sse():
if sse.event == "data":
data = simple_loads(sse.data)
if diff:
chunk = RunLogPatch(*data["ops"])
else:
chunk = RunLog(*data["ops"], state=data["state"])
yield chunk
if diff:
if final_output:
final_output += chunk
else:
final_output = chunk
else:
final_output = chunk
elif sse.event == "end":
break
else:
raise NotImplementedError(f"Unknown event {sse.event}")
except BaseException as e:
await run_manager.on_chain_error(e)
raise
else:
await run_manager.on_chain_end(final_output)
| [] |
2024-01-10 | danteGPT/langserve | langserve~server.py | """FastAPI integration for langchain runnables.
This code contains integration for langchain runnables with FastAPI.
The main entry point is the `add_routes` function which adds the routes to an existing
FastAPI app or APIRouter.
"""
from inspect import isclass
from typing import (
Any,
AsyncIterator,
Dict,
Literal,
Mapping,
Sequence,
Type,
Union,
)
from fastapi import Request
from langchain.callbacks.tracers.log_stream import RunLog, RunLogPatch
from langchain.load.serializable import Serializable
from langchain.schema.runnable import Runnable
from typing_extensions import Annotated
from langserve.version import __version__
try:
from pydantic.v1 import BaseModel, create_model
except ImportError:
from pydantic import BaseModel, Field, create_model
from langserve.serialization import simple_dumpd, simple_dumps
from langserve.validation import (
create_batch_request_model,
create_batch_response_model,
create_invoke_request_model,
create_invoke_response_model,
create_stream_log_request_model,
create_stream_request_model,
)
try:
from fastapi import APIRouter, FastAPI
except ImportError:
# [server] extra not installed
APIRouter = FastAPI = Any
def _unpack_config(d: Union[BaseModel, Mapping], keys: Sequence[str]) -> Dict[str, Any]:
"""Project the given keys from the given dict."""
_d = d.dict() if isinstance(d, BaseModel) else d
return {k: _d[k] for k in keys if k in _d}
def _unpack_input(validated_model: BaseModel) -> Any:
"""Unpack the decoded input from the validated model."""
if hasattr(validated_model, "__root__"):
model = validated_model.__root__
else:
model = validated_model
if isinstance(model, BaseModel) and not isinstance(model, Serializable):
# If the model is a pydantic model, but not a Serializable, then
# it was created by the server as part of validation and isn't expected
# to be accepted by the runnables as input as a pydantic model,
# instead we need to convert it into a corresponding python dict.
return model.dict()
return model
# This is a global registry of models to avoid creating the same model
# multiple times.
# Duplicated model names break fastapi's openapi generation.
_MODEL_REGISTRY = {}
def _resolve_model(type_: Union[Type, BaseModel], default_name: str) -> Type[BaseModel]:
"""Resolve the input type to a BaseModel."""
if isclass(type_) and issubclass(type_, BaseModel):
model = type_
else:
model = create_model(default_name, __root__=(type_, ...))
hash_ = model.schema_json()
if hash_ not in _MODEL_REGISTRY:
_MODEL_REGISTRY[hash_] = model
return _MODEL_REGISTRY[hash_]
def _add_namespace_to_model(namespace: str, model: Type[BaseModel]) -> Type[BaseModel]:
"""Prefix the name of the given model with the given namespace.
Code is used to help avoid name collisions when hosting multiple runnables
that may use the same underlying models.
Args:
namespace: The namespace to use for the model.
model: The model to create a unique name for.
Returns:
A new model with name prepended with the given namespace.
"""
class Config:
arbitrary_types_allowed = True
model_with_unique_name = create_model(
f"{namespace}{model.__name__}",
config=Config,
**{
name: (
field.annotation,
Field(
field.default,
title=name,
description=field.field_info.description,
),
)
for name, field in model.__fields__.items()
},
)
model_with_unique_name.update_forward_refs()
return model_with_unique_name
def _add_tracing_info_to_metadata(config: Dict[str, Any], request: Request) -> None:
"""Add information useful for tracing and debugging purposes.
Args:
config: The config to expand with tracing information.
request: The request to use for expanding the metadata.
"""
metadata = config["metadata"] if "metadata" in config else {}
info = {
"__useragent": request.headers.get("user-agent"),
"__langserve_version": __version__,
}
metadata.update(info)
config["metadata"] = metadata
# PUBLIC API
def add_routes(
app: Union[FastAPI, APIRouter],
runnable: Runnable,
*,
path: str = "",
input_type: Union[Type, Literal["auto"], BaseModel] = "auto",
output_type: Union[Type, Literal["auto"], BaseModel] = "auto",
config_keys: Sequence[str] = (),
) -> None:
"""Register the routes on the given FastAPI app or APIRouter.
The following routes are added per runnable under the specified `path`:
* /invoke - for invoking a runnable with a single input
* /batch - for invoking a runnable with multiple inputs
* /stream - for streaming the output of a runnable
* /stream_log - for streaming intermediate outputs for a runnable
* /input_schema - for returning the input schema of the runnable
* /output_schema - for returning the output schema of the runnable
* /config_schema - for returning the config schema of the runnable
Args:
app: The FastAPI app or APIRouter to which routes should be added.
runnable: The runnable to wrap, must not be stateful.
path: A path to prepend to all routes.
input_type: type to use for input validation.
Default is "auto" which will use the InputType of the runnable.
User is free to provide a custom type annotation.
output_type: type to use for output validation.
Default is "auto" which will use the OutputType of the runnable.
User is free to provide a custom type annotation.
config_keys: list of config keys that will be accepted, by default
no config keys are accepted.
"""
try:
from sse_starlette import EventSourceResponse
except ImportError:
raise ImportError(
"sse_starlette must be installed to implement the stream and "
"stream_log endpoints. "
"Use `pip install sse_starlette` to install."
)
input_type_ = _resolve_model(
runnable.input_schema if input_type == "auto" else input_type, "Input"
)
output_type_ = _resolve_model(
runnable.output_schema if output_type == "auto" else output_type, "Output"
)
namespace = path or ""
model_namespace = path.strip("/").replace("/", "_")
config = _add_namespace_to_model(
model_namespace, runnable.config_schema(include=config_keys)
)
InvokeRequest = create_invoke_request_model(model_namespace, input_type_, config)
BatchRequest = create_batch_request_model(model_namespace, input_type_, config)
StreamRequest = create_stream_request_model(model_namespace, input_type_, config)
StreamLogRequest = create_stream_log_request_model(
model_namespace, input_type_, config
)
# Generate the response models
InvokeResponse = create_invoke_response_model(model_namespace, output_type_)
BatchResponse = create_batch_response_model(model_namespace, output_type_)
@app.post(
f"{namespace}/invoke",
response_model=InvokeResponse,
)
async def invoke(
invoke_request: Annotated[InvokeRequest, InvokeRequest],
request: Request,
) -> InvokeResponse:
"""Invoke the runnable with the given input and config."""
# Request is first validated using InvokeRequest which takes into account
# config_keys as well as input_type.
config = _unpack_config(invoke_request.config, config_keys)
_add_tracing_info_to_metadata(config, request)
output = await runnable.ainvoke(
_unpack_input(invoke_request.input), config=config
)
return InvokeResponse(output=simple_dumpd(output))
#
@app.post(f"{namespace}/batch", response_model=BatchResponse)
async def batch(
batch_request: Annotated[BatchRequest, BatchRequest],
request: Request,
) -> BatchResponse:
"""Invoke the runnable with the given inputs and config."""
if isinstance(batch_request.config, list):
config = [
_unpack_config(config, config_keys) for config in batch_request.config
]
for c in config:
_add_tracing_info_to_metadata(c, request)
else:
config = _unpack_config(batch_request.config, config_keys)
_add_tracing_info_to_metadata(config, request)
inputs = [_unpack_input(input_) for input_ in batch_request.inputs]
output = await runnable.abatch(inputs, config=config)
return BatchResponse(output=simple_dumpd(output))
@app.post(f"{namespace}/stream")
async def stream(
stream_request: Annotated[StreamRequest, StreamRequest],
request: Request,
) -> EventSourceResponse:
"""Invoke the runnable stream the output.
This endpoint allows to stream the output of the runnable.
The endpoint uses a server sent event stream to stream the output.
https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events
Important: Set the "text/event-stream" media type for request headers if
not using an existing SDK.
This endpoint uses two different types of events:
* data - for streaming the output of the runnable
{
"event": "data",
"data": {
...
}
}
* end - for signaling the end of the stream.
This helps the client to know when to stop listening for events and
know that the streaming has ended successfully.
{
"event": "end",
}
"""
# Request is first validated using InvokeRequest which takes into account
# config_keys as well as input_type.
# After validation, the input is loaded using LangChain's load function.
input_ = _unpack_input(stream_request.input)
config = _unpack_config(stream_request.config, config_keys)
_add_tracing_info_to_metadata(config, request)
async def _stream() -> AsyncIterator[dict]:
"""Stream the output of the runnable."""
async for chunk in runnable.astream(
input_,
config=config,
):
yield {"data": simple_dumps(chunk), "event": "data"}
yield {"event": "end"}
return EventSourceResponse(_stream())
@app.post(f"{namespace}/stream_log")
async def stream_log(
stream_log_request: Annotated[StreamLogRequest, StreamLogRequest],
request: Request,
) -> EventSourceResponse:
"""Invoke the runnable stream_log the output.
This endpoint allows to stream the output of the runnable, including
the output of all intermediate steps.
The endpoint uses a server sent event stream to stream the output.
https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events
Important: Set the "text/event-stream" media type for request headers if
not using an existing SDK.
This endpoint uses two different types of events:
* data - for streaming the output of the runnable
{
"event": "data",
"data": {
...
}
}
* end - for signaling the end of the stream.
This helps the client to know when to stop listening for events and
know that the streaming has ended successfully.
{
"event": "end",
}
"""
# Request is first validated using InvokeRequest which takes into account
# config_keys as well as input_type.
# After validation, the input is loaded using LangChain's load function.
input_ = _unpack_input(stream_log_request.input)
config = _unpack_config(stream_log_request.config, config_keys)
_add_tracing_info_to_metadata(config, request)
async def _stream_log() -> AsyncIterator[dict]:
"""Stream the output of the runnable."""
async for chunk in runnable.astream_log(
input_,
config=config,
diff=stream_log_request.diff,
include_names=stream_log_request.include_names,
include_types=stream_log_request.include_types,
include_tags=stream_log_request.include_tags,
exclude_names=stream_log_request.exclude_names,
exclude_types=stream_log_request.exclude_types,
exclude_tags=stream_log_request.exclude_tags,
):
if stream_log_request.diff: # Run log patch
if not isinstance(chunk, RunLogPatch):
raise AssertionError(
f"Expected a RunLog instance got {type(chunk)}"
)
data = {
"ops": chunk.ops,
}
else:
# Then it's a run log
if not isinstance(chunk, RunLog):
raise AssertionError(
f"Expected a RunLog instance got {type(chunk)}"
)
data = {
"state": chunk.state,
"ops": chunk.ops,
}
# Temporary adapter
yield {
"data": simple_dumps(data),
"event": "data",
}
yield {"event": "end"}
return EventSourceResponse(_stream_log())
@app.get(f"{namespace}/input_schema")
async def input_schema() -> Any:
"""Return the input schema of the runnable."""
return runnable.input_schema.schema()
@app.get(f"{namespace}/output_schema")
async def output_schema() -> Any:
"""Return the output schema of the runnable."""
return runnable.output_schema.schema()
@app.get(f"{namespace}/config_schema")
async def config_schema() -> Any:
"""Return the config schema of the runnable."""
return runnable.config_schema(include=config_keys).schema()
| [] |
2024-01-10 | danteGPT/langserve | examples~chain~server.py | #!/usr/bin/env python
"""Example LangChain server exposes a chain composed of a prompt and an LLM."""
from fastapi import FastAPI
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from typing_extensions import TypedDict
from langserve import add_routes
model = ChatOpenAI()
prompt = ChatPromptTemplate.from_template("tell me a joke about {topic}")
chain = prompt | model
app = FastAPI(
title="LangChain Server",
version="1.0",
description="Spin up a simple api server using Langchain's Runnable interfaces",
)
# The input type is automatically inferred from the runnable
# interface; however, if you want to override it, you can do so
# by passing in the input_type argument to add_routes.
class ChainInput(TypedDict):
"""The input to the chain."""
topic: str
"""The topic of the joke."""
add_routes(app, chain, input_type=ChainInput)
# Alternatively, you can rely on langchain's type inference
# to infer the input type from the runnable interface.
# add_routes(app, chain)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="localhost", port=8000)
| [
"tell me a joke about {topic}"
] |
2024-01-10 | danteGPT/langserve | tests~unit_tests~test_server_client.py | """Test the server and client together."""
import asyncio
from asyncio import AbstractEventLoop
from contextlib import asynccontextmanager
from typing import List, Optional, Union
import httpx
import pytest
import pytest_asyncio
from fastapi import FastAPI
from fastapi.testclient import TestClient
from httpx import AsyncClient
from langchain.callbacks.tracers.log_stream import RunLog, RunLogPatch
from langchain.prompts import PromptTemplate
from langchain.schema.messages import HumanMessage, SystemMessage
from langchain.schema.runnable import RunnablePassthrough
from langchain.schema.runnable.base import RunnableLambda
from langchain.schema.runnable.utils import ConfigurableField
from pytest_mock import MockerFixture
from langserve.client import RemoteRunnable
from langserve.server import add_routes
from tests.unit_tests.utils import FakeListLLM
@pytest.fixture(scope="session")
def event_loop():
"""Create an instance of the default event loop for each test case."""
loop = asyncio.get_event_loop()
try:
yield loop
finally:
loop.close()
@pytest.fixture()
def app(event_loop: AbstractEventLoop) -> FastAPI:
"""A simple server that wraps a Runnable and exposes it as an API."""
async def add_one_or_passthrough(
x: Union[int, HumanMessage]
) -> Union[int, HumanMessage]:
"""Add one to int or passthrough."""
if isinstance(x, int):
return x + 1
else:
return x
runnable_lambda = RunnableLambda(func=add_one_or_passthrough)
app = FastAPI()
try:
add_routes(app, runnable_lambda)
yield app
finally:
del app
@pytest.fixture()
def client(app: FastAPI) -> RemoteRunnable:
"""Create a FastAPI app that exposes the Runnable as an API."""
remote_runnable_client = RemoteRunnable(url="http://localhost:9999")
sync_client = TestClient(app=app)
remote_runnable_client.sync_client = sync_client
yield remote_runnable_client
sync_client.close()
@asynccontextmanager
async def get_async_client(
server: FastAPI, path: Optional[str] = None
) -> RemoteRunnable:
"""Get an async client."""
url = "http://localhost:9999"
if path:
url += path
remote_runnable_client = RemoteRunnable(url=url)
async_client = AsyncClient(app=server, base_url=url)
remote_runnable_client.async_client = async_client
try:
yield remote_runnable_client
finally:
await async_client.aclose()
@pytest_asyncio.fixture()
async def async_client(app: FastAPI) -> RemoteRunnable:
"""Create a FastAPI app that exposes the Runnable as an API."""
async with get_async_client(app) as client:
yield client
def test_server(app: FastAPI) -> None:
"""Test the server directly via HTTP requests."""
sync_client = TestClient(app=app)
# Test invoke
response = sync_client.post("/invoke", json={"input": 1})
assert response.json() == {"output": 2}
# Test batch
response = sync_client.post("/batch", json={"inputs": [1]})
assert response.json() == {
"output": [2],
}
# Test schema
input_schema = sync_client.get("/input_schema").json()
assert isinstance(input_schema, dict)
assert input_schema["title"] == "RunnableLambdaInput"
output_schema = sync_client.get("/output_schema").json()
assert isinstance(output_schema, dict)
assert output_schema["title"] == "RunnableLambdaOutput"
output_schema = sync_client.get("/config_schema").json()
assert isinstance(output_schema, dict)
assert output_schema["title"] == "RunnableLambdaConfig"
# TODO(Team): Fix test. Issue with eventloops right now when using sync client
## Test stream
# response = sync_client.post("/stream", json={"input": 1})
# assert response.text == "event: data\r\ndata: 2\r\n\r\nevent: end\r\n\r\n"
@pytest.mark.asyncio
async def test_server_async(app: FastAPI) -> None:
"""Test the server directly via HTTP requests."""
async_client = AsyncClient(app=app, base_url="http://localhost:9999")
# Test invoke
response = await async_client.post("/invoke", json={"input": 1})
assert response.json() == {"output": 2}
# Test batch
response = await async_client.post("/batch", json={"inputs": [1]})
assert response.json() == {
"output": [2],
}
# Test stream
response = await async_client.post("/stream", json={"input": 1})
assert response.text == "event: data\r\ndata: 2\r\n\r\nevent: end\r\n\r\n"
def test_invoke(client: RemoteRunnable) -> None:
"""Test sync invoke."""
assert client.invoke(1) == 2
assert client.invoke(HumanMessage(content="hello")) == HumanMessage(content="hello")
# Test invocation with config
assert client.invoke(1, config={"tags": ["test"]}) == 2
def test_batch(client: RemoteRunnable) -> None:
"""Test sync batch."""
assert client.batch([]) == []
assert client.batch([1, 2, 3]) == [2, 3, 4]
assert client.batch([HumanMessage(content="hello")]) == [
HumanMessage(content="hello")
]
@pytest.mark.asyncio
async def test_ainvoke(async_client: RemoteRunnable) -> None:
"""Test async invoke."""
assert await async_client.ainvoke(1) == 2
assert await async_client.ainvoke(HumanMessage(content="hello")) == HumanMessage(
content="hello"
)
@pytest.mark.asyncio
async def test_abatch(async_client: RemoteRunnable) -> None:
"""Test async batch."""
assert await async_client.abatch([]) == []
assert await async_client.abatch([1, 2, 3]) == [2, 3, 4]
assert await async_client.abatch([HumanMessage(content="hello")]) == [
HumanMessage(content="hello")
]
# TODO(Team): Determine how to test
# Some issue with event loops
# def test_stream(client: RemoteRunnable) -> None:
# """Test stream."""
# assert list(client.stream(1)) == [2]
@pytest.mark.asyncio
async def test_astream(async_client: RemoteRunnable) -> None:
"""Test async stream."""
outputs = []
async for chunk in async_client.astream(1):
outputs.append(chunk)
assert outputs == [2]
outputs = []
data = HumanMessage(content="hello")
async for chunk in async_client.astream(data):
outputs.append(chunk)
assert outputs == [data]
@pytest.mark.asyncio
async def test_astream_log_no_diff(async_client: RemoteRunnable) -> None:
"""Test async stream."""
run_logs = []
async for chunk in async_client.astream_log(1, diff=False):
run_logs.append(chunk)
assert len(run_logs) == 3
op = run_logs[0].ops[0]
uuid = op["value"]["id"]
for run_log in run_logs:
assert isinstance(run_log, RunLog)
states = [run_log.state for run_log in run_logs]
assert states == [
{
"final_output": None,
"id": uuid,
"logs": {},
"streamed_output": [],
},
{
"final_output": {"output": 2},
"id": uuid,
"logs": {},
"streamed_output": [],
},
{
"final_output": {"output": 2},
"id": uuid,
"logs": {},
"streamed_output": [2],
},
]
# Check that we're picking up one extra op on each chunk
assert [len(run_log.ops) for run_log in run_logs] == [1, 2, 3]
@pytest.mark.asyncio
async def test_astream_log(async_client: RemoteRunnable) -> None:
"""Test async stream."""
run_log_patches = []
async for chunk in async_client.astream_log(1, diff=True):
run_log_patches.append(chunk)
op = run_log_patches[0].ops[0]
uuid = op["value"]["id"]
assert [run_log_patch.ops for run_log_patch in run_log_patches] == [
[
{
"op": "replace",
"path": "",
"value": {
"final_output": {"output": 2},
"id": uuid,
"logs": {},
"streamed_output": [],
},
}
],
[{"op": "replace", "path": "/final_output", "value": {"output": 2}}],
[{"op": "add", "path": "/streamed_output/-", "value": 2}],
]
def test_invoke_as_part_of_sequence(client: RemoteRunnable) -> None:
"""Test as part of sequence."""
runnable = client | RunnableLambda(func=lambda x: x + 1)
# without config
assert runnable.invoke(1) == 3
# with config
assert runnable.invoke(1, config={"tags": ["test"]}) == 3
# without config
assert runnable.batch([1, 2]) == [3, 4]
# with config
assert runnable.batch([1, 2], config={"tags": ["test"]}) == [3, 4]
# TODO(Team): Determine how to test some issues with event loops for testing
# set up
# without config
# assert list(runnable.stream([1, 2])) == [3, 4]
# # with config
# assert list(runnable.stream([1, 2], config={"tags": ["test"]})) == [3, 4]
@pytest.mark.asyncio
async def test_invoke_as_part_of_sequence_async(async_client: RemoteRunnable) -> None:
"""Test as part of a sequence.
This helps to verify that config is handled properly (e.g., callbacks are not
passed to the server, but other config is)
"""
runnable = async_client | RunnableLambda(
func=lambda x: x + 1 if isinstance(x, int) else x
).with_config({"run_name": "hello"})
# without config
assert await runnable.ainvoke(1) == 3
# with config
assert await runnable.ainvoke(1, config={"tags": ["test"]}) == 3
# without config
assert await runnable.abatch([1, 2]) == [3, 4]
# with config
assert await runnable.abatch([1, 2], config={"tags": ["test"]}) == [3, 4]
# Verify can pass many configs to batch
configs = [{"tags": ["test"]}, {"tags": ["test2"]}]
assert await runnable.abatch([1, 2], config=configs) == [3, 4]
# Verify can ValueError on mismatched configs number
with pytest.raises(ValueError):
assert await runnable.abatch([1, 2], config=[configs[0]]) == [3, 4]
configs = [{"tags": ["test"]}, {"tags": ["test2"]}]
assert await runnable.abatch([1, 2], config=configs) == [3, 4]
configs = [
{"tags": ["test"]},
{"tags": ["test2"], "other": "test"},
]
assert await runnable.abatch([1, 2], config=configs) == [3, 4]
# Without config
assert [x async for x in runnable.astream(1)] == [3]
# With Config
assert [x async for x in runnable.astream(1, config={"tags": ["test"]})] == [3]
# With config and LC input data
assert [
x
async for x in runnable.astream(
HumanMessage(content="hello"), config={"tags": ["test"]}
)
] == [HumanMessage(content="hello")]
log_patches = [x async for x in runnable.astream_log(1)]
for log_patch in log_patches:
assert isinstance(log_patch, RunLogPatch)
# Only check the first entry (not validating implementation here)
first_op = log_patches[0].ops[0]
assert first_op["op"] == "replace"
assert first_op["path"] == ""
# Validate with HumanMessage
log_patches = [x async for x in runnable.astream_log(HumanMessage(content="hello"))]
for log_patch in log_patches:
assert isinstance(log_patch, RunLogPatch)
# Only check the first entry (not validating implementation here)
first_op = log_patches[0].ops[0]
assert first_op == {
"op": "replace",
"path": "",
"value": {
"final_output": None,
"id": first_op["value"]["id"],
"logs": {},
"streamed_output": [],
},
}
@pytest.mark.asyncio
async def test_multiple_runnables(event_loop: AbstractEventLoop) -> None:
"""Test serving multiple runnables."""
async def add_one(x: int) -> int:
"""Add one to simulate a valid function"""
return x + 1
async def mul_2(x: int) -> int:
"""Add one to simulate a valid function"""
return x * 2
app = FastAPI()
add_routes(app, RunnableLambda(add_one), path="/add_one")
add_routes(
app,
RunnableLambda(mul_2),
input_type=int,
path="/mul_2",
)
async with get_async_client(app, path="/add_one") as runnable:
async with get_async_client(app, path="/mul_2") as runnable2:
assert await runnable.ainvoke(1) == 2
assert await runnable2.ainvoke(4) == 8
composite_runnable = runnable | runnable2
assert await composite_runnable.ainvoke(3) == 8
# Invoke runnable (remote add_one), local add_one, remote mul_2
composite_runnable_2 = runnable | add_one | runnable2
assert await composite_runnable_2.ainvoke(3) == 10
@pytest.mark.asyncio
async def test_input_validation(
event_loop: AbstractEventLoop, mocker: MockerFixture
) -> None:
"""Test client side and server side exceptions."""
async def add_one(x: int) -> int:
"""Add one to simulate a valid function"""
return x + 1
server_runnable = RunnableLambda(func=add_one)
server_runnable2 = RunnableLambda(func=add_one)
app = FastAPI()
add_routes(
app,
server_runnable,
input_type=int,
path="/add_one",
)
add_routes(
app,
server_runnable2,
input_type=int,
path="/add_one_config",
config_keys=["tags", "run_name", "metadata"],
)
async with get_async_client(app, path="/add_one") as runnable:
# Verify that can be invoked with valid input
assert await runnable.ainvoke(1) == 2
# Verify that the following substring is present in the error message
with pytest.raises(httpx.HTTPError):
await runnable.ainvoke("hello")
with pytest.raises(httpx.HTTPError):
await runnable.abatch(["hello"])
config = {"tags": ["test"], "metadata": {"a": 5}}
invoke_spy_1 = mocker.spy(server_runnable, "ainvoke")
# Verify config is handled correctly
async with get_async_client(app, path="/add_one") as runnable1:
# Verify that can be invoked with valid input
# Config ignored for runnable1
assert await runnable1.ainvoke(1, config=config) == 2
# Config should be ignored but default debug information
# will still be added
config_seen = invoke_spy_1.call_args[1]["config"]
assert "metadata" in config_seen
assert "__useragent" in config_seen["metadata"]
assert "__langserve_version" in config_seen["metadata"]
invoke_spy_2 = mocker.spy(server_runnable2, "ainvoke")
async with get_async_client(app, path="/add_one_config") as runnable2:
# Config accepted for runnable2
assert await runnable2.ainvoke(1, config=config) == 2
# Config ignored
config_seen = invoke_spy_2.call_args[1]["config"]
assert config_seen["tags"] == ["test"]
assert config_seen["metadata"]["a"] == 5
assert "__useragent" in config_seen["metadata"]
assert "__langserve_version" in config_seen["metadata"]
@pytest.mark.asyncio
async def test_input_validation_with_lc_types(event_loop: AbstractEventLoop) -> None:
"""Test client side and server side exceptions."""
app = FastAPI()
# Test with langchain objects
add_routes(
app, RunnablePassthrough(), input_type=List[HumanMessage], config_keys=["tags"]
)
# Invoke request
async with get_async_client(app) as passthrough_runnable:
with pytest.raises(httpx.HTTPError):
await passthrough_runnable.ainvoke("Hello")
with pytest.raises(httpx.HTTPError):
await passthrough_runnable.ainvoke(["hello"])
with pytest.raises(httpx.HTTPError):
await passthrough_runnable.ainvoke(HumanMessage(content="h"))
with pytest.raises(httpx.HTTPError):
await passthrough_runnable.ainvoke([SystemMessage(content="hello")])
# Valid
result = await passthrough_runnable.ainvoke([HumanMessage(content="hello")])
# Valid
result = await passthrough_runnable.ainvoke(
[HumanMessage(content="hello")], config={"tags": ["test"]}
)
assert isinstance(result, list)
assert isinstance(result[0], HumanMessage)
# Batch request
async with get_async_client(app) as passthrough_runnable:
# invalid
with pytest.raises(httpx.HTTPError):
await passthrough_runnable.abatch("Hello")
with pytest.raises(httpx.HTTPError):
await passthrough_runnable.abatch(["hello"])
with pytest.raises(httpx.HTTPError):
await passthrough_runnable.abatch([[SystemMessage(content="hello")]])
# valid
result = await passthrough_runnable.abatch([[HumanMessage(content="hello")]])
assert isinstance(result, list)
assert isinstance(result[0], list)
assert isinstance(result[0][0], HumanMessage)
def test_client_close() -> None:
"""Test that the client can be automatically."""
runnable = RemoteRunnable(url="/dev/null", timeout=1)
sync_client = runnable.sync_client
async_client = runnable.async_client
assert async_client.is_closed is False
assert sync_client.is_closed is False
del runnable
assert sync_client.is_closed is True
assert async_client.is_closed is True
@pytest.mark.asyncio
async def test_async_client_close() -> None:
"""Test that the client can be automatically."""
runnable = RemoteRunnable(url="/dev/null", timeout=1)
sync_client = runnable.sync_client
async_client = runnable.async_client
assert async_client.is_closed is False
assert sync_client.is_closed is False
del runnable
assert sync_client.is_closed is True
assert async_client.is_closed is True
@pytest.mark.asyncio
async def test_openapi_docs_with_identical_runnables(
event_loop: AbstractEventLoop, mocker: MockerFixture
) -> None:
"""Test client side and server side exceptions."""
async def add_one(x: int) -> int:
"""Add one to simulate a valid function"""
return x + 1
server_runnable = RunnableLambda(func=add_one)
server_runnable2 = RunnableLambda(func=add_one)
app = FastAPI()
add_routes(
app,
server_runnable,
path="/a",
)
# Add another route that uses the same schema (inferred from runnable input schema)
add_routes(
app,
server_runnable2,
path="/b",
config_keys=["tags"],
)
async with AsyncClient(app=app, base_url="http://localhost:9999") as async_client:
response = await async_client.get("/openapi.json")
assert response.status_code == 200
@pytest.mark.asyncio
async def test_configurable_runnables(event_loop: AbstractEventLoop) -> None:
"""Add tests for using langchain's configurable runnables"""
template = PromptTemplate.from_template("say {name}").configurable_fields(
template=ConfigurableField(
id="template",
name="Template",
description="The template to use for the prompt",
)
)
llm = (
RunnablePassthrough() | RunnableLambda(lambda prompt: prompt.text)
).configurable_alternatives(
ConfigurableField(
id="llm",
name="LLM",
),
hardcoded_llm=FakeListLLM(responses=["hello Mr. Kitten!"]),
)
chain = template | llm
# Check server side
assert chain.invoke({"name": "cat"}) == "say cat"
app = FastAPI()
add_routes(app, chain, config_keys=["tags", "configurable"])
async with get_async_client(app) as remote_runnable:
# Test with hard-coded LLM
assert chain.invoke({"name": "cat"}) == "say cat"
# Test with different prompt
assert (
await remote_runnable.ainvoke(
{"name": "foo"},
{"configurable": {"template": "hear {name}"}, "tags": ["h"]},
)
== "hear foo"
)
# Test with alternative passthrough LLM
assert (
await remote_runnable.ainvoke(
{"name": "foo"},
{"configurable": {"llm": "hardcoded_llm"}, "tags": ["h"]},
)
== "hello Mr. Kitten!"
)
| [
"The template to use for the prompt",
"say {name}",
"h",
"hello"
] |
2024-01-10 | danteGPT/langserve | langserve~serialization.py | """Serialization module for Well Known LangChain objects.
Specialized JSON serialization for well known LangChain objects that
can be expected to be frequently transmitted between chains.
"""
import json
from typing import Any, Union
from langchain.prompts.base import StringPromptValue
from langchain.prompts.chat import ChatPromptValueConcrete
from langchain.schema.agent import AgentAction, AgentActionMessageLog, AgentFinish
from langchain.schema.document import Document
from langchain.schema.messages import (
AIMessage,
AIMessageChunk,
ChatMessage,
ChatMessageChunk,
FunctionMessage,
FunctionMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
)
try:
from pydantic.v1 import BaseModel, ValidationError
except ImportError:
from pydantic import BaseModel, ValidationError
class WellKnownLCObject(BaseModel):
"""A well known LangChain object.
A pydantic model that defines what constitutes a well known LangChain object.
All well-known objects are allowed to be serialized and de-serialized.
"""
__root__: Union[
Document,
HumanMessage,
SystemMessage,
ChatMessage,
FunctionMessage,
AIMessage,
HumanMessageChunk,
SystemMessageChunk,
ChatMessageChunk,
FunctionMessageChunk,
AIMessageChunk,
StringPromptValue,
ChatPromptValueConcrete,
AgentAction,
AgentFinish,
AgentActionMessageLog,
]
# Custom JSON Encoder
class _LangChainEncoder(json.JSONEncoder):
"""Custom JSON Encoder that can encode pydantic objects as well."""
def default(self, obj) -> Any:
if isinstance(obj, BaseModel):
return obj.dict()
return super().default(obj)
# Custom JSON Decoder
class _LangChainDecoder(json.JSONDecoder):
"""Custom JSON Decoder that handles well known LangChain objects."""
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Initialize the LangChainDecoder."""
super().__init__(object_hook=self.decoder, *args, **kwargs)
def decoder(self, value) -> Any:
"""Decode the value."""
if isinstance(value, dict):
try:
obj = WellKnownLCObject.parse_obj(value)
return obj.__root__
except ValidationError:
return {key: self.decoder(v) for key, v in value.items()}
elif isinstance(value, list):
return [self.decoder(item) for item in value]
else:
return value
# PUBLIC API
def simple_dumpd(obj: Any) -> Any:
"""Convert the given object to a JSON serializable object."""
return json.loads(json.dumps(obj, cls=_LangChainEncoder))
def simple_dumps(obj: Any) -> str:
"""Dump the given object as a JSON string."""
return json.dumps(obj, cls=_LangChainEncoder)
def simple_loads(s: str) -> Any:
"""Load the given JSON string."""
return json.loads(s, cls=_LangChainDecoder)
| [] |
2024-01-10 | danteGPT/langserve | examples~agent~server.py | #!/usr/bin/env python
"""Example LangChain server exposes a conversational retrieval chain."""
from fastapi import FastAPI
from langchain.agents import AgentExecutor, tool
from langchain.agents.format_scratchpad import format_to_openai_functions
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.tools.render import format_tool_to_openai_function
from langchain.vectorstores import FAISS
from pydantic import BaseModel
from langserve import add_routes
vectorstore = FAISS.from_texts(
["cats like fish", "dogs like sticks"], embedding=OpenAIEmbeddings()
)
retriever = vectorstore.as_retriever()
@tool
def get_eugene_thoughts(query: str) -> list:
"""Returns Eugene's thoughts on a topic."""
return retriever.get_relevant_documents(query)
tools = [get_eugene_thoughts]
prompt = ChatPromptTemplate.from_messages(
[
("system", "You are a helpful assistant."),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
llm = ChatOpenAI()
llm_with_tools = llm.bind(functions=[format_tool_to_openai_function(t) for t in tools])
agent = (
{
"input": lambda x: x["input"],
"agent_scratchpad": lambda x: format_to_openai_functions(
x["intermediate_steps"]
),
}
| prompt
| llm_with_tools
| OpenAIFunctionsAgentOutputParser()
)
agent_executor = AgentExecutor(agent=agent, tools=tools)
app = FastAPI(
title="LangChain Server",
version="1.0",
description="Spin up a simple api server using Langchain's Runnable interfaces",
)
# We need to add these input/output schemas because the current AgentExecutor
# is lacking in schemas.
class Input(BaseModel):
input: str
class Output(BaseModel):
output: str
# Adds routes to the app for using the chain under:
# /invoke
# /batch
# /stream
add_routes(app, agent_executor, input_type=Input, output_type=Output)
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="localhost", port=8000)
| [
"agent_scratchpad",
"You are a helpful assistant.",
"Returns Eugene's thoughts on a topic.",
"{input}"
] |
2024-01-10 | tencentmusic/supersonic | chat~python~utils~text2vec.py | # -*- coding:utf-8 -*-
from typing import List
from chromadb.api.types import Documents, EmbeddingFunction, Embeddings
from langchain.embeddings import HuggingFaceEmbeddings
from config.config_parse import HF_TEXT2VEC_MODEL_NAME
hg_embedding = HuggingFaceEmbeddings(model_name=HF_TEXT2VEC_MODEL_NAME)
class Text2VecEmbeddingFunction(EmbeddingFunction):
def __call__(self, texts: Documents) -> Embeddings:
embeddings = hg_embedding.embed_documents(texts)
return embeddings
def get_embeddings(documents: List[str]) -> List[List[float]]:
embeddings = hg_embedding.embed_documents(documents)
return embeddings
| [] |
2024-01-10 | tencentmusic/supersonic | chat~python~instances~llm_instance.py | # -*- coding:utf-8 -*-
from langchain import llms
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from config.config_parse import LLM_PROVIDER_NAME, llm_config_dict
def get_llm_provider(llm_provider_name: str, llm_config_dict: dict):
if llm_provider_name in llms.type_to_cls_dict:
llm_provider = llms.type_to_cls_dict[llm_provider_name]
llm = llm_provider(**llm_config_dict)
return llm
else:
raise Exception("llm_provider_name is not supported: {}".format(llm_provider_name))
llm = get_llm_provider(LLM_PROVIDER_NAME, llm_config_dict) | [] |
2024-01-10 | tencentmusic/supersonic | chat~python~services~s2sql~sql_agent.py | import os
import sys
from typing import List, Union, Mapping, Any
from collections import Counter
import random
import asyncio
from enum import Enum
from langchain.llms.base import BaseLLM
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from instances.logging_instance import logger
from s2sql.constructor import FewShotPromptTemplate2
from s2sql.output_parser import schema_link_parse, combo_schema_link_parse, combo_sql_parse
from s2sql.auto_cot_run import transform_sql_example, transform_sql_example_autoCoT_run
class Text2DSLAgentBase(object):
def __init__(self, num_fewshots:int, num_examples:int, num_self_consistency:int,
sql_example_prompter:FewShotPromptTemplate2, llm: BaseLLM) -> None:
self.num_fewshots = num_fewshots
self.num_examples = num_examples
assert self.num_fewshots <= self.num_examples
self.num_self_consistency = num_self_consistency
self.llm = llm
self.sql_example_prompter = sql_example_prompter
def get_examples_candidates(self, question: str, filter_condition: Mapping[str, str], num_examples: int)->List[Mapping[str, str]]:
few_shot_example_meta_list = self.sql_example_prompter.retrieve_few_shot_example(question, num_examples, filter_condition)
if len(few_shot_example_meta_list) == num_examples:
return few_shot_example_meta_list
elif len(few_shot_example_meta_list) < num_examples:
logger.info(f"few_shot_example_meta_list size: {len(few_shot_example_meta_list)} < num_examples: {num_examples}")
existed_id_set = set([item['id'] for item in few_shot_example_meta_list])
extra_few_shot_example_meta_list = self.sql_example_prompter.retrieve_few_shot_example(query_text=question, retrieval_num=num_examples, filter_condition=None)
for item in extra_few_shot_example_meta_list:
if item['id'] not in existed_id_set:
few_shot_example_meta_list.append(item)
existed_id_set.add(item['id'])
if len(few_shot_example_meta_list) == num_examples:
break
logger.info(f"few_shot_example_meta_list size: {len(few_shot_example_meta_list)} = num_examples: {num_examples}")
return few_shot_example_meta_list
else:
logger.info(f"few_shot_example_meta_list size: {len(few_shot_example_meta_list)} > num_examples: {num_examples}")
few_shot_example_meta_list = few_shot_example_meta_list[:num_examples]
return few_shot_example_meta_list
def get_fewshot_example_combos(self, example_meta_list:List[Mapping[str, str]], num_fewshots:int)-> List[List[Mapping[str, str]]]:
fewshot_example_list = []
for i in range(0, self.num_self_consistency):
random.shuffle(example_meta_list)
fewshot_example_list.append(example_meta_list[:num_fewshots])
return fewshot_example_list
def self_consistency_vote(self, output_res_pool:List[str]):
output_res_counts = Counter(output_res_pool)
output_res_max = output_res_counts.most_common(1)[0][0]
total_output_num = len(output_res_pool)
vote_percentage = {k: (v/total_output_num) for k,v in output_res_counts.items()}
return output_res_max, vote_percentage
def schema_linking_list_str_unify(self, schema_linking_list: List[str])-> List[str]:
schema_linking_list_unify = []
for schema_linking_str in schema_linking_list:
schema_linking_str_unify = ','.join(sorted([item.strip() for item in schema_linking_str.strip('[]').split(',')]))
schema_linking_str_unify = f'[{schema_linking_str_unify}]'
schema_linking_list_unify.append(schema_linking_str_unify)
return schema_linking_list_unify
class Text2DSLAgentAutoCoT(Text2DSLAgentBase):
def __init__(self, num_fewshots:int, num_examples:int, num_self_consistency:int,
sql_example_prompter:FewShotPromptTemplate2, llm: BaseLLM,
auto_cot_min_window_size: int, auto_cot_max_window_size: int):
super().__init__(num_fewshots, num_examples, num_self_consistency, sql_example_prompter, llm)
assert auto_cot_min_window_size <= auto_cot_max_window_size
self.auto_cot_min_window_size = auto_cot_min_window_size
self.auto_cot_max_window_size = auto_cot_max_window_size
def reload_setting(self, sql_example_ids: List[str], sql_example_units: List[Mapping[str,str]], num_examples:int, num_fewshots:int, num_self_consistency:int):
self.num_fewshots = num_fewshots
self.num_examples = num_examples
assert self.num_fewshots <= self.num_examples
self.num_self_consistency = num_self_consistency
assert self.num_self_consistency >= 1
new_sql_example_unit_list = transform_sql_example_autoCoT_run(sql_example_units, self.auto_cot_min_window_size, self.auto_cot_max_window_size)
self.sql_example_prompter.reload_few_shot_example(sql_example_ids, new_sql_example_unit_list)
def reload_setting_autoCoT(self, sql_example_ids: List[str], auto_cot_sql_example_units: List[Mapping[str,str]], num_examples:int, num_fewshots:int, num_self_consistency:int):
self.num_fewshots = num_fewshots
self.num_examples = num_examples
assert self.num_fewshots <= self.num_examples
self.num_self_consistency = num_self_consistency
assert self.num_self_consistency >= 1
self.sql_example_prompter.reload_few_shot_example(sql_example_ids, auto_cot_sql_example_units)
def add_examples(self, sql_example_ids: List[str], sql_example_units: List[Mapping[str,str]]):
new_sql_example_unit_list = transform_sql_example_autoCoT_run(sql_example_units, self.auto_cot_min_window_size, self.auto_cot_max_window_size)
self.sql_example_prompter.add_few_shot_example(sql_example_ids, new_sql_example_unit_list)
def update_examples(self, sql_example_ids: List[str], sql_example_units: List[Mapping[str,str]]):
new_sql_example_unit_list = transform_sql_example_autoCoT_run(sql_example_units, self.auto_cot_min_window_size, self.auto_cot_max_window_size)
self.sql_example_prompter.update_few_shot_example(sql_example_ids, new_sql_example_unit_list)
def delete_examples(self, sql_example_ids: List[str]):
self.sql_example_prompter.delete_few_shot_example(sql_example_ids)
def count_examples(self):
return self.sql_example_prompter.count_few_shot_example()
def get_examples(self, sql_example_ids: List[str]):
return self.sql_example_prompter.get_few_shot_example(sql_example_ids)
def generate_schema_linking_prompt(self, question: str, current_date:str, domain_name: str, fields_list: List[str],
prior_schema_links: Mapping[str,str], prior_exts:str, fewshot_example_list:List[Mapping[str, str]])-> str:
instruction = "# Find the schema_links for generating SQL queries for each question based on the database schema and Foreign keys."
schema_linking_example_keys = ["questionAugmented", "dbSchema", "generatedSchemaLinkingCoT"]
schema_linking_example_template = "{dbSchema}\nQ: {questionAugmented}\nA: {generatedSchemaLinkingCoT}"
schema_linking_fewshot_prompt = self.sql_example_prompter.make_few_shot_example_prompt(few_shot_template=schema_linking_example_template,
example_keys=schema_linking_example_keys,
few_shot_example_meta_list=fewshot_example_list)
question_augmented, db_schema, _ = transform_sql_example(question, current_date, domain_name, fields_list, prior_schema_links, prior_exts)
new_case_template = """{dbSchema}\nQ: {questionAugmented1}\nA: Letโs think step by step. In the question "{questionAugmented2}", we are asked:"""
new_case_prompt = new_case_template.format(dbSchema=db_schema, questionAugmented1=question_augmented, questionAugmented2=question_augmented)
schema_linking_prompt = instruction + '\n\n' + schema_linking_fewshot_prompt + '\n\n' + new_case_prompt
logger.info(f'schema_linking_prompt: {schema_linking_prompt}')
return schema_linking_prompt
def generate_schema_linking_prompt_pool(self, question: str, current_date:str, domain_name: str, fields_list: List[str],
prior_schema_links: Mapping[str,str], prior_exts:str, fewshot_example_list_pool:List[List[Mapping[str, str]]])-> List[str]:
schema_linking_prompt_pool = []
for fewshot_example_list in fewshot_example_list_pool:
schema_linking_prompt = self.generate_schema_linking_prompt(question, current_date, domain_name, fields_list, prior_schema_links, prior_exts, fewshot_example_list)
schema_linking_prompt_pool.append(schema_linking_prompt)
return schema_linking_prompt_pool
def generate_sql_prompt(self, question: str, domain_name: str,fields_list: List[str],
schema_link_str: str, current_date: str, prior_schema_links: Mapping[str,str], prior_exts:str,
fewshot_example_list:List[Mapping[str, str]])-> str:
instruction = "# Use the the schema links to generate the SQL queries for each of the questions."
sql_example_keys = ["questionAugmented", "dbSchema", "generatedSchemaLinkings", "sql"]
sql_example_template = "{dbSchema}\nQ: {questionAugmented}\nSchema_links: {generatedSchemaLinkings}\nSQL: {sql}"
sql_example_fewshot_prompt = self.sql_example_prompter.make_few_shot_example_prompt(few_shot_template=sql_example_template,
example_keys=sql_example_keys,
few_shot_example_meta_list=fewshot_example_list)
question_augmented, db_schema, _ = transform_sql_example(question, current_date, domain_name, fields_list, prior_schema_links, prior_exts)
new_case_template = "{dbSchema}\nQ: {questionAugmented}\nSchema_links: {schemaLinkings}\nSQL: "
new_case_prompt = new_case_template.format(dbSchema=db_schema, questionAugmented=question_augmented, schemaLinkings=schema_link_str)
sql_example_prompt = instruction + '\n\n' + sql_example_fewshot_prompt + '\n\n' + new_case_prompt
logger.info(f'sql_example_prompt: {sql_example_prompt}')
return sql_example_prompt
def generate_sql_prompt_pool(self, question: str, domain_name: str,fields_list: List[str],
schema_link_str_pool: List[str], current_date: str, prior_schema_links: Mapping[str,str], prior_exts:str,
fewshot_example_list_pool:List[List[Mapping[str, str]]])-> List[str]:
sql_prompt_pool = []
for schema_link_str, fewshot_example_list in zip(schema_link_str_pool, fewshot_example_list_pool):
sql_prompt = self.generate_sql_prompt(question, domain_name, fields_list, schema_link_str, current_date, prior_schema_links, prior_exts, fewshot_example_list)
sql_prompt_pool.append(sql_prompt)
return sql_prompt_pool
def generate_schema_linking_sql_prompt(self, question: str, current_date:str, domain_name: str, fields_list: List[str],
prior_schema_links: Mapping[str,str], prior_exts:str, fewshot_example_list:List[Mapping[str, str]]):
instruction = "# Find the schema_links for generating SQL queries for each question based on the database schema and Foreign keys. Then use the the schema links to generate the SQL queries for each of the questions."
example_keys = ["questionAugmented", "dbSchema", "generatedSchemaLinkingCoT","sql"]
example_template = "{dbSchema}\nQ: {questionAugmented}\nA: {generatedSchemaLinkingCoT}\nSQL: {sql}"
fewshot_prompt = self.sql_example_prompter.make_few_shot_example_prompt(few_shot_template=example_template,
example_keys=example_keys,
few_shot_example_meta_list=fewshot_example_list)
question_augmented, db_schema, _ = transform_sql_example(question, current_date, domain_name, fields_list, prior_schema_links, prior_exts)
new_case_template = """{dbSchema}\nQ: {questionAugmented1}\nA: Letโs think step by step. In the question "{questionAugmented2}", we are asked:"""
new_case_prompt = new_case_template.format(dbSchema=db_schema, questionAugmented1=question_augmented, questionAugmented2=question_augmented)
prompt = instruction + '\n\n' + fewshot_prompt + '\n\n' + new_case_prompt
logger.info(f'schema_linking_sql_prompt: {prompt}')
return prompt
def generate_schema_linking_sql_prompt_pool(self, question: str, current_date:str, domain_name: str, fields_list: List[str],
prior_schema_links: Mapping[str,str], prior_exts:str, fewshot_example_list_pool:List[List[Mapping[str, str]]])-> List[str]:
schema_linking_sql_prompt_pool = []
for fewshot_example_list in fewshot_example_list_pool:
schema_linking_sql_prompt = self.generate_schema_linking_sql_prompt(question, current_date, domain_name, fields_list, prior_schema_links, prior_exts, fewshot_example_list)
schema_linking_sql_prompt_pool.append(schema_linking_sql_prompt)
return schema_linking_sql_prompt_pool
async def async_query2sql(self, question: str, filter_condition: Mapping[str,str],
model_name: str, fields_list: List[str],
current_date: str, prior_schema_links: Mapping[str,str], prior_exts: str):
logger.info("question: {}".format(question))
logger.info("filter_condition: {}".format(filter_condition))
logger.info("model_name: {}".format(model_name))
logger.info("fields_list: {}".format(fields_list))
logger.info("current_date: {}".format(current_date))
logger.info("prior_schema_links: {}".format(prior_schema_links))
logger.info("prior_exts: {}".format(prior_exts))
fewshot_example_meta_list = self.get_examples_candidates(question, filter_condition, self.num_examples)
schema_linking_prompt = self.generate_schema_linking_prompt(question, current_date, model_name, fields_list, prior_schema_links, prior_exts, fewshot_example_meta_list)
logger.debug("schema_linking_prompt->{}".format(schema_linking_prompt))
schema_link_output = await self.llm._call_async(schema_linking_prompt)
logger.debug("schema_link_output->{}".format(schema_link_output))
schema_link_str = schema_link_parse(schema_link_output)
logger.debug("schema_link_str->{}".format(schema_link_str))
sql_prompt = self.generate_sql_prompt(question, model_name, fields_list, schema_link_str, current_date, prior_schema_links, prior_exts, fewshot_example_meta_list)
logger.debug("sql_prompt->{}".format(sql_prompt))
sql_output = await self.llm._call_async(sql_prompt)
resp = dict()
resp['question'] = question
resp['model'] = model_name
resp['fields'] = fields_list
resp['priorSchemaLinking'] = prior_schema_links
resp['priorExts'] = prior_exts
resp['currentDate'] = current_date
resp['prompt'] = [schema_linking_prompt+'\n\n'+sql_prompt]
resp['schemaLinkingOutput'] = schema_link_output
resp['schemaLinkStr'] = schema_link_str
resp['sqlOutput'] = sql_output
logger.info("resp: {}".format(resp))
return resp
async def async_query2sql_shortcut(self, question: str, filter_condition: Mapping[str,str],
model_name: str, fields_list: List[str],
current_date: str, prior_schema_links: Mapping[str,str], prior_exts: str):
logger.info("question: {}".format(question))
logger.info("filter_condition: {}".format(filter_condition))
logger.info("model_name: {}".format(model_name))
logger.info("fields_list: {}".format(fields_list))
logger.info("current_date: {}".format(current_date))
logger.info("prior_schema_links: {}".format(prior_schema_links))
logger.info("prior_exts: {}".format(prior_exts))
fewshot_example_meta_list = self.get_examples_candidates(question, filter_condition, self.num_examples)
schema_linking_sql_shortcut_prompt = self.generate_schema_linking_sql_prompt(question, current_date, model_name, fields_list, prior_schema_links, prior_exts, fewshot_example_meta_list)
logger.debug("schema_linking_sql_shortcut_prompt->{}".format(schema_linking_sql_shortcut_prompt))
schema_linking_sql_shortcut_output = await self.llm._call_async(schema_linking_sql_shortcut_prompt)
logger.debug("schema_linking_sql_shortcut_output->{}".format(schema_linking_sql_shortcut_output))
schema_linking_str = combo_schema_link_parse(schema_linking_sql_shortcut_output)
sql_str = combo_sql_parse(schema_linking_sql_shortcut_output)
resp = dict()
resp['question'] = question
resp['model'] = model_name
resp['fields'] = fields_list
resp['priorSchemaLinking'] = prior_schema_links
resp['priorExts'] = prior_exts
resp['currentDate'] = current_date
resp['prompt'] = [schema_linking_sql_shortcut_prompt]
resp['schemaLinkingComboOutput'] = schema_linking_sql_shortcut_output
resp['schemaLinkStr'] = schema_linking_str
resp['sqlOutput'] = sql_str
logger.info("resp: {}".format(resp))
return resp
async def generate_schema_linking_tasks(self, question: str, model_name: str, fields_list: List[str],
current_date: str, prior_schema_links: Mapping[str,str], prior_exts: str, fewshot_example_list_combo:List[List[Mapping[str, str]]]):
schema_linking_prompt_pool = self.generate_schema_linking_prompt_pool(question, current_date, model_name, fields_list, prior_schema_links, prior_exts, fewshot_example_list_combo)
logger.debug("schema_linking_prompt_pool->{}".format(schema_linking_prompt_pool))
schema_linking_output_pool = await asyncio.gather(*[self.llm._call_async(schema_linking_prompt) for schema_linking_prompt in schema_linking_prompt_pool])
logger.debug("schema_linking_output_pool->{}".format(schema_linking_output_pool))
schema_linking_str_pool = [schema_link_parse(schema_linking_output) for schema_linking_output in schema_linking_output_pool]
return schema_linking_str_pool, schema_linking_output_pool, schema_linking_prompt_pool
async def generate_sql_tasks(self, question: str, model_name: str, fields_list: List[str], schema_link_str_pool: List[str],
current_date: str, prior_schema_links: Mapping[str,str], prior_exts: str, fewshot_example_list_combo:List[List[Mapping[str, str]]]):
sql_prompt_pool = self.generate_sql_prompt_pool(question, model_name, fields_list, schema_link_str_pool, current_date, prior_schema_links, prior_exts, fewshot_example_list_combo)
logger.debug("sql_prompt_pool->{}".format(sql_prompt_pool))
sql_output_pool = await asyncio.gather(*[self.llm._call_async(sql_prompt) for sql_prompt in sql_prompt_pool])
logger.debug("sql_output_pool->{}".format(sql_output_pool))
return sql_output_pool, sql_prompt_pool
async def generate_schema_linking_sql_tasks(self, question: str, model_name: str, fields_list: List[str],
current_date: str, prior_schema_links: Mapping[str,str], prior_exts: str, fewshot_example_list_combo:List[List[Mapping[str, str]]]):
schema_linking_sql_prompt_pool = self.generate_schema_linking_sql_prompt_pool(question, current_date, model_name, fields_list, prior_schema_links, prior_exts, fewshot_example_list_combo)
schema_linking_sql_output_task_pool = [self.llm._call_async(schema_linking_sql_prompt) for schema_linking_sql_prompt in schema_linking_sql_prompt_pool]
schema_linking_sql_output_res_pool = await asyncio.gather(*schema_linking_sql_output_task_pool)
logger.debug("schema_linking_sql_output_res_pool->{}".format(schema_linking_sql_output_res_pool))
return schema_linking_sql_output_res_pool, schema_linking_sql_prompt_pool, schema_linking_sql_output_task_pool
async def tasks_run(self, question: str, filter_condition: Mapping[str,str],
model_name: str, fields_list: List[str],
current_date: str, prior_schema_links: Mapping[str,str], prior_exts: str):
logger.info("question: {}".format(question))
logger.info("filter_condition: {}".format(filter_condition))
logger.info("model_name: {}".format(model_name))
logger.info("fields_list: {}".format(fields_list))
logger.info("current_date: {}".format(current_date))
logger.info("prior_schema_links: {}".format(prior_schema_links))
logger.info("prior_exts: {}".format(prior_exts))
fewshot_example_meta_list = self.get_examples_candidates(question, filter_condition, self.num_examples)
fewshot_example_list_combo = self.get_fewshot_example_combos(fewshot_example_meta_list, self.num_fewshots)
schema_linking_candidate_list, _, schema_linking_prompt_list = await self.generate_schema_linking_tasks(question, model_name, fields_list, current_date, prior_schema_links, prior_exts, fewshot_example_list_combo)
logger.debug(f'schema_linking_candidate_list:{schema_linking_candidate_list}')
schema_linking_candidate_sorted_list = self.schema_linking_list_str_unify(schema_linking_candidate_list)
logger.debug(f'schema_linking_candidate_sorted_list:{schema_linking_candidate_sorted_list}')
schema_linking_output_max, schema_linking_output_vote_percentage = self.self_consistency_vote(schema_linking_candidate_sorted_list)
sql_output_candicates, sql_output_prompt_list = await self.generate_sql_tasks(question, model_name, fields_list, schema_linking_candidate_list, current_date, prior_schema_links, prior_exts, fewshot_example_list_combo)
logger.debug(f'sql_output_candicates:{sql_output_candicates}')
sql_output_max, sql_output_vote_percentage = self.self_consistency_vote(sql_output_candicates)
resp = dict()
resp['question'] = question
resp['model'] = model_name
resp['fields'] = fields_list
resp['priorSchemaLinking'] = prior_schema_links
resp['priorExts'] = prior_exts
resp['currentDate'] = current_date
resp['prompt'] = [schema_linking_prompt+'\n\n'+sql_prompt for schema_linking_prompt, sql_prompt in zip(schema_linking_prompt_list, sql_output_prompt_list)]
resp['schemaLinkStr'] = schema_linking_output_max
resp['schemaLinkingWeight'] = schema_linking_output_vote_percentage
resp['sqlOutput'] = sql_output_max
resp['sqlWeight'] = sql_output_vote_percentage
logger.info("resp: {}".format(resp))
return resp
async def tasks_run_shortcut(self, question: str, filter_condition: Mapping[str,str], model_name: str, fields_list: List[str],
current_date: str, prior_schema_links: Mapping[str,str], prior_exts: str):
logger.info("question: {}".format(question))
logger.info("filter_condition: {}".format(filter_condition))
logger.info("model_name: {}".format(model_name))
logger.info("fields_list: {}".format(fields_list))
logger.info("current_date: {}".format(current_date))
logger.info("prior_schema_links: {}".format(prior_schema_links))
logger.info("prior_exts: {}".format(prior_exts))
fewshot_example_meta_list = self.get_examples_candidates(question, filter_condition, self.num_examples)
fewshot_example_list_combo = self.get_fewshot_example_combos(fewshot_example_meta_list, self.num_fewshots)
schema_linking_sql_output_candidates, schema_linking_sql_prompt_list, _ = await self.generate_schema_linking_sql_tasks(question, model_name, fields_list, current_date, prior_schema_links, prior_exts, fewshot_example_list_combo)
logger.debug(f'schema_linking_sql_output_candidates:{schema_linking_sql_output_candidates}')
schema_linking_output_candidate_list = [combo_schema_link_parse(schema_linking_sql_output_candidate) for schema_linking_sql_output_candidate in schema_linking_sql_output_candidates]
logger.debug(f'schema_linking_sql_output_candidate_list:{schema_linking_output_candidate_list}')
schema_linking_output_candidate_sorted_list = self.schema_linking_list_str_unify(schema_linking_output_candidate_list)
schema_linking_output_max, schema_linking_output_vote_percentage = self.self_consistency_vote(schema_linking_output_candidate_sorted_list)
sql_output_candidate_list = [combo_sql_parse(schema_linking_sql_output_candidate) for schema_linking_sql_output_candidate in schema_linking_sql_output_candidates]
logger.debug(f'sql_output_candidate_list:{sql_output_candidate_list}')
sql_output_max, sql_output_vote_percentage = self.self_consistency_vote(sql_output_candidate_list)
resp = dict()
resp['question'] = question
resp['model'] = model_name
resp['fields'] = fields_list
resp['priorSchemaLinking'] = prior_schema_links
resp['priorExts'] = prior_exts
resp['currentDate'] = current_date
resp['prompt'] = schema_linking_sql_prompt_list
resp['schemaLinkStr'] = schema_linking_output_max
resp['schemaLinkingWeight'] = schema_linking_output_vote_percentage
resp['sqlOutput'] = sql_output_max
resp['sqlWeight'] = sql_output_vote_percentage
logger.info("resp: {}".format(resp))
return resp
class Text2DSLAgent(Text2DSLAgentBase):
def __init__(self, num_fewshots:int, num_examples:int, num_self_consistency:int,
sql_example_prompter:FewShotPromptTemplate2, llm: BaseLLM,) -> None:
super().__init__(num_fewshots, num_examples, num_self_consistency, sql_example_prompter, llm)
def reload_setting(self, sql_example_ids:List[str], sql_example_units: List[Mapping[str, str]], num_examples:int, num_fewshots:int, num_self_consistency:int):
self.num_fewshots = num_fewshots
self.num_examples = num_examples
assert self.num_fewshots <= self.num_examples
self.num_self_consistency = num_self_consistency
assert self.num_self_consistency >= 1
self.sql_example_prompter.reload_few_shot_example(sql_example_ids, sql_example_units)
def add_examples(self, sql_example_ids:List[str], sql_example_units: List[Mapping[str, str]]):
self.sql_example_prompter.add_few_shot_example(sql_example_ids, sql_example_units)
def update_examples(self, sql_example_ids:List[str], sql_example_units: List[Mapping[str, str]]):
self.sql_example_prompter.update_few_shot_example(sql_example_ids, sql_example_units)
def delete_examples(self, sql_example_ids:List[str]):
self.sql_example_prompter.delete_few_shot_example(sql_example_ids)
def get_examples(self, sql_example_ids: List[str]):
return self.sql_example_prompter.get_few_shot_example(sql_example_ids)
def count_examples(self):
return self.sql_example_prompter.count_few_shot_example()
def generate_schema_linking_prompt(self, question: str, domain_name: str, fields_list: List[str],
prior_schema_links: Mapping[str,str], fewshot_example_list:List[Mapping[str, str]])-> str:
prior_schema_links_str = '['+ ','.join(["""'{}'->{}""".format(k,v) for k,v in prior_schema_links.items()]) + ']'
instruction = "# ๆ นๆฎๆฐๆฎๅบ็่กจ็ปๆ,ๅ่ๅ
้ชไฟกๆฏ,ๆพๅบไธบๆฏไธช้ฎ้ข็ๆSQLๆฅ่ฏข่ฏญๅฅ็schema_links"
schema_linking_example_keys = ["tableName", "fieldsList", "priorSchemaLinks", "question", "analysis", "schemaLinks"]
schema_linking_example_template = "Table {tableName}, columns = {fieldsList}, prior_schema_links = {priorSchemaLinks}\n้ฎ้ข:{question}\nๅๆ:{analysis} ๆไปฅSchema_linksๆฏ:\nSchema_links:{schemaLinks}"
schema_linking_fewshot_prompt = self.sql_example_prompter.make_few_shot_example_prompt(few_shot_template=schema_linking_example_template,
example_keys=schema_linking_example_keys,
few_shot_example_meta_list=fewshot_example_list)
new_case_template = "Table {tableName}, columns = {fieldsList}, prior_schema_links = {priorSchemaLinks}\n้ฎ้ข:{question}\nๅๆ: ่ฎฉๆไปฌไธๆญฅไธๆญฅๅฐๆ่ใ"
new_case_prompt = new_case_template.format(tableName=domain_name, fieldsList=fields_list, priorSchemaLinks=prior_schema_links_str, question=question)
schema_linking_prompt = instruction + '\n\n' + schema_linking_fewshot_prompt + '\n\n' + new_case_prompt
return schema_linking_prompt
def generate_schema_linking_prompt_pool(self, question: str, domain_name: str, fields_list: List[str],
prior_schema_links: Mapping[str,str], fewshot_example_list_pool:List[List[Mapping[str, str]]])-> List[str]:
schema_linking_prompt_pool = []
for fewshot_example_list in fewshot_example_list_pool:
schema_linking_prompt = self.generate_schema_linking_prompt(question, domain_name, fields_list, prior_schema_links, fewshot_example_list)
schema_linking_prompt_pool.append(schema_linking_prompt)
return schema_linking_prompt_pool
def generate_sql_prompt(self, question: str, domain_name: str,
schema_link_str: str, data_date: str,
fewshot_example_list:List[Mapping[str, str]])-> str:
instruction = "# ๆ นๆฎschema_linksไธบๆฏไธช้ฎ้ข็ๆSQLๆฅ่ฏข่ฏญๅฅ"
sql_example_keys = ["question", "currentDate", "tableName", "schemaLinks", "sql"]
sql_example_template = "้ฎ้ข:{question}\nCurrent_date:{currentDate}\nTable {tableName}\nSchema_links:{schemaLinks}\nSQL:{sql}"
sql_example_fewshot_prompt = self.sql_example_prompter.make_few_shot_example_prompt(few_shot_template=sql_example_template,
example_keys=sql_example_keys,
few_shot_example_meta_list=fewshot_example_list)
new_case_template = "้ฎ้ข:{question}\nCurrent_date:{currentDate}\nTable {tableName}\nSchema_links:{schemaLinks}\nSQL:"
new_case_prompt = new_case_template.format(question=question, currentDate=data_date, tableName=domain_name, schemaLinks=schema_link_str)
sql_example_prompt = instruction + '\n\n' + sql_example_fewshot_prompt + '\n\n' + new_case_prompt
return sql_example_prompt
def generate_sql_prompt_pool(self, question: str, domain_name: str, data_date: str,
schema_link_str_pool: List[str], fewshot_example_list_pool:List[List[Mapping[str, str]]])-> List[str]:
sql_prompt_pool = []
for schema_link_str, fewshot_example_list in zip(schema_link_str_pool, fewshot_example_list_pool):
sql_prompt = self.generate_sql_prompt(question, domain_name, schema_link_str, data_date, fewshot_example_list)
sql_prompt_pool.append(sql_prompt)
return sql_prompt_pool
def generate_schema_linking_sql_prompt(self, question: str,
domain_name: str,
data_date : str,
fields_list: List[str],
prior_schema_links: Mapping[str,str],
fewshot_example_list:List[Mapping[str, str]]):
prior_schema_links_str = '['+ ','.join(["""'{}'->{}""".format(k,v) for k,v in prior_schema_links.items()]) + ']'
instruction = "# ๆ นๆฎๆฐๆฎๅบ็่กจ็ปๆ,ๅ่ๅ
้ชไฟกๆฏ,ๆพๅบไธบๆฏไธช้ฎ้ข็ๆSQLๆฅ่ฏข่ฏญๅฅ็schema_links,ๅๆ นๆฎschema_linksไธบๆฏไธช้ฎ้ข็ๆSQLๆฅ่ฏข่ฏญๅฅ"
example_keys = ["tableName", "fieldsList", "priorSchemaLinks", "currentDate", "question", "analysis", "schemaLinks", "sql"]
example_template = "Table {tableName}, columns = {fieldsList}, prior_schema_links = {priorSchemaLinks}\nCurrent_date:{currentDate}\n้ฎ้ข:{question}\nๅๆ:{analysis} ๆไปฅSchema_linksๆฏ:\nSchema_links:{schemaLinks}\nSQL:{sql}"
fewshot_prompt = self.sql_example_prompter.make_few_shot_example_prompt(few_shot_template=example_template,
example_keys=example_keys,
few_shot_example_meta_list=fewshot_example_list)
new_case_template = "Table {tableName}, columns = {fieldsList}, prior_schema_links = {priorSchemaLinks}\nCurrent_date:{currentDate}\n้ฎ้ข:{question}\nๅๆ: ่ฎฉๆไปฌไธๆญฅไธๆญฅๅฐๆ่ใ"
new_case_prompt = new_case_template.format(tableName=domain_name, fieldsList=fields_list, priorSchemaLinks=prior_schema_links_str, currentDate=data_date, question=question)
prompt = instruction + '\n\n' + fewshot_prompt + '\n\n' + new_case_prompt
return prompt
def generate_schema_linking_sql_prompt_pool(self, question: str, domain_name: str, fields_list: List[str], data_date: str,
prior_schema_links: Mapping[str,str], fewshot_example_list_pool:List[List[Mapping[str, str]]])-> List[str]:
schema_linking_sql_prompt_pool = []
for fewshot_example_list in fewshot_example_list_pool:
schema_linking_sql_prompt = self.generate_schema_linking_sql_prompt(question, domain_name, data_date, fields_list, prior_schema_links, fewshot_example_list)
schema_linking_sql_prompt_pool.append(schema_linking_sql_prompt)
return schema_linking_sql_prompt_pool
def self_consistency_vote(self, output_res_pool:List[str]):
output_res_counts = Counter(output_res_pool)
output_res_max = output_res_counts.most_common(1)[0][0]
total_output_num = len(output_res_pool)
vote_percentage = {k: (v/total_output_num) for k,v in output_res_counts.items()}
return output_res_max, vote_percentage
def schema_linking_list_str_unify(self, schema_linking_list: List[str])-> List[str]:
schema_linking_list_unify = []
for schema_linking_str in schema_linking_list:
schema_linking_str_unify = ','.join(sorted([item.strip() for item in schema_linking_str.strip('[]').split(',')]))
schema_linking_str_unify = f'[{schema_linking_str_unify}]'
schema_linking_list_unify.append(schema_linking_str_unify)
return schema_linking_list_unify
async def generate_schema_linking_tasks(self, question: str, domain_name: str,
fields_list: List[str], prior_schema_links: Mapping[str,str],
fewshot_example_list_combo:List[List[Mapping[str, str]]]):
schema_linking_prompt_pool = self.generate_schema_linking_prompt_pool(question, domain_name,
fields_list, prior_schema_links,
fewshot_example_list_combo)
schema_linking_output_task_pool = [self.llm._call_async(schema_linking_prompt) for schema_linking_prompt in schema_linking_prompt_pool]
schema_linking_output_pool = await asyncio.gather(*schema_linking_output_task_pool)
logger.debug(f'schema_linking_output_pool:{schema_linking_output_pool}')
schema_linking_str_pool = [schema_link_parse(schema_linking_output) for schema_linking_output in schema_linking_output_pool]
return schema_linking_str_pool
async def generate_sql_tasks(self, question: str, domain_name: str, data_date: str,
schema_link_str_pool: List[str], fewshot_example_list_combo:List[List[Mapping[str, str]]]):
sql_prompt_pool = self.generate_sql_prompt_pool(question, domain_name, schema_link_str_pool, data_date, fewshot_example_list_combo)
sql_output_task_pool = [self.llm._call_async(sql_prompt) for sql_prompt in sql_prompt_pool]
sql_output_res_pool = await asyncio.gather(*sql_output_task_pool)
logger.debug(f'sql_output_res_pool:{sql_output_res_pool}')
return sql_output_res_pool
async def generate_schema_linking_sql_tasks(self, question: str, domain_name: str, fields_list: List[str], data_date: str,
prior_schema_links: Mapping[str,str], fewshot_example_list_combo:List[List[Mapping[str, str]]]):
schema_linking_sql_prompt_pool = self.generate_schema_linking_sql_prompt_pool(question, domain_name, fields_list, data_date, prior_schema_links, fewshot_example_list_combo)
schema_linking_sql_output_task_pool = [self.llm._call_async(schema_linking_sql_prompt) for schema_linking_sql_prompt in schema_linking_sql_prompt_pool]
schema_linking_sql_output_res_pool = await asyncio.gather(*schema_linking_sql_output_task_pool)
logger.debug(f'schema_linking_sql_output_res_pool:{schema_linking_sql_output_res_pool}')
return schema_linking_sql_output_res_pool
async def tasks_run(self, question: str, filter_condition: Mapping[str, str], domain_name: str, fields_list: List[str], prior_schema_links: Mapping[str,str], data_date: str, prior_exts: str):
logger.info("question: {}".format(question))
logger.info("domain_name: {}".format(domain_name))
logger.info("fields_list: {}".format(fields_list))
logger.info("current_date: {}".format(data_date))
logger.info("prior_schema_links: {}".format(prior_schema_links))
logger.info("prior_exts: {}".format(prior_exts))
if prior_exts != '':
question = question + ' ๅคๆณจ:'+prior_exts
logger.info("question_prior_exts: {}".format(question))
fewshot_example_meta_list = self.get_examples_candidates(question, filter_condition, self.num_examples)
fewshot_example_list_combo = self.get_fewshot_example_combos(fewshot_example_meta_list, self.num_fewshots)
schema_linking_candidate_list = await self.generate_schema_linking_tasks(question, domain_name, fields_list, prior_schema_links, fewshot_example_list_combo)
logger.debug(f'schema_linking_candidate_list:{schema_linking_candidate_list}')
schema_linking_candidate_sorted_list = self.schema_linking_list_str_unify(schema_linking_candidate_list)
logger.debug(f'schema_linking_candidate_sorted_list:{schema_linking_candidate_sorted_list}')
schema_linking_output_max, schema_linking_output_vote_percentage = self.self_consistency_vote(schema_linking_candidate_sorted_list)
sql_output_candicates = await self.generate_sql_tasks(question, domain_name, data_date, schema_linking_candidate_list,fewshot_example_list_combo)
logger.debug(f'sql_output_candicates:{sql_output_candicates}')
sql_output_max, sql_output_vote_percentage = self.self_consistency_vote(sql_output_candicates)
resp = dict()
resp['question'] = question
resp['model'] = domain_name
resp['fields'] = fields_list
resp['priorSchemaLinking'] = prior_schema_links
resp['dataDate'] = data_date
resp['schemaLinkStr'] = schema_linking_output_max
resp['schemaLinkingWeight'] = schema_linking_output_vote_percentage
resp['sqlOutput'] = sql_output_max
resp['sqlWeight'] = sql_output_vote_percentage
logger.info("resp: {}".format(resp))
return resp
async def tasks_run_shortcut(self, question: str, filter_condition: Mapping[str, str], domain_name: str, fields_list: List[str], prior_schema_links: Mapping[str,str], data_date: str, prior_exts: str):
logger.info("question: {}".format(question))
logger.info("domain_name: {}".format(domain_name))
logger.info("fields_list: {}".format(fields_list))
logger.info("current_date: {}".format(data_date))
logger.info("prior_schema_links: {}".format(prior_schema_links))
logger.info("prior_exts: {}".format(prior_exts))
if prior_exts != '':
question = question + ' ๅคๆณจ:'+prior_exts
logger.info("question_prior_exts: {}".format(question))
fewshot_example_meta_list = self.get_examples_candidates(question, filter_condition, self.num_examples)
fewshot_example_list_combo = self.get_fewshot_example_combos(fewshot_example_meta_list, self.num_fewshots)
schema_linking_sql_output_candidates = await self.generate_schema_linking_sql_tasks(question, domain_name, fields_list, data_date, prior_schema_links, fewshot_example_list_combo)
logger.debug(f'schema_linking_sql_output_candidates:{schema_linking_sql_output_candidates}')
schema_linking_output_candidate_list = [combo_schema_link_parse(schema_linking_sql_output_candidate) for schema_linking_sql_output_candidate in schema_linking_sql_output_candidates]
logger.debug(f'schema_linking_sql_output_candidate_list:{schema_linking_output_candidate_list}')
schema_linking_output_candidate_sorted_list = self.schema_linking_list_str_unify(schema_linking_output_candidate_list)
schema_linking_output_max, schema_linking_output_vote_percentage = self.self_consistency_vote(schema_linking_output_candidate_sorted_list)
sql_output_candidate_list = [combo_sql_parse(schema_linking_sql_output_candidate) for schema_linking_sql_output_candidate in schema_linking_sql_output_candidates]
logger.debug(f'sql_output_candidate_list:{sql_output_candidate_list}')
sql_output_max, sql_output_vote_percentage = self.self_consistency_vote(sql_output_candidate_list)
resp = dict()
resp['question'] = question
resp['model'] = domain_name
resp['fields'] = fields_list
resp['priorSchemaLinking'] = prior_schema_links
resp['dataDate'] = data_date
resp['schemaLinkStr'] = schema_linking_output_max
resp['schemaLinkingWeight'] = schema_linking_output_vote_percentage
resp['sqlOutput'] = sql_output_max
resp['sqlWeight'] = sql_output_vote_percentage
logger.info("resp: {}".format(resp))
return resp
async def async_query2sql(self, question: str, filter_condition: Mapping[str,str],
model_name: str, fields_list: List[str],
data_date: str, prior_schema_links: Mapping[str,str], prior_exts: str):
logger.info("question: {}".format(question))
logger.info("model_name: {}".format(model_name))
logger.info("fields_list: {}".format(fields_list))
logger.info("data_date: {}".format(data_date))
logger.info("prior_schema_links: {}".format(prior_schema_links))
logger.info("prior_exts: {}".format(prior_exts))
if prior_exts != '':
question = question + ' ๅคๆณจ:'+prior_exts
logger.info("question_prior_exts: {}".format(question))
fewshot_example_meta_list = self.get_examples_candidates(question, filter_condition, self.num_examples)
schema_linking_prompt = self.generate_schema_linking_prompt(question, model_name, fields_list, prior_schema_links, fewshot_example_meta_list)
logger.debug("schema_linking_prompt->{}".format(schema_linking_prompt))
schema_link_output = await self.llm._call_async(schema_linking_prompt)
schema_link_str = schema_link_parse(schema_link_output)
sql_prompt = self.generate_sql_prompt(question, model_name, schema_link_str, data_date, fewshot_example_meta_list)
logger.debug("sql_prompt->{}".format(sql_prompt))
sql_output = await self.llm._call_async(sql_prompt)
resp = dict()
resp['question'] = question
resp['model'] = model_name
resp['fields'] = fields_list
resp['priorSchemaLinking'] = prior_schema_links
resp['dataDate'] = data_date
resp['schemaLinkingOutput'] = schema_link_output
resp['schemaLinkStr'] = schema_link_str
resp['sqlOutput'] = sql_output
logger.info("resp: {}".format(resp))
return resp
async def async_query2sql_shortcut(self, question: str, filter_condition: Mapping[str,str],
model_name: str, fields_list: List[str],
data_date: str, prior_schema_links: Mapping[str,str], prior_exts: str):
logger.info("question: {}".format(question))
logger.info("model_name: {}".format(model_name))
logger.info("fields_list: {}".format(fields_list))
logger.info("data_date: {}".format(data_date))
logger.info("prior_schema_links: {}".format(prior_schema_links))
logger.info("prior_exts: {}".format(prior_exts))
if prior_exts != '':
question = question + ' ๅคๆณจ:'+prior_exts
logger.info("question_prior_exts: {}".format(question))
fewshot_example_meta_list = self.get_examples_candidates(question, filter_condition, self.num_examples)
schema_linking_sql_shortcut_prompt = self.generate_schema_linking_sql_prompt(question, model_name, data_date, fields_list, prior_schema_links, fewshot_example_meta_list)
logger.debug("schema_linking_sql_shortcut_prompt->{}".format(schema_linking_sql_shortcut_prompt))
schema_linking_sql_shortcut_output = await self.llm._call_async(schema_linking_sql_shortcut_prompt)
schema_linking_str = combo_schema_link_parse(schema_linking_sql_shortcut_output)
sql_str = combo_sql_parse(schema_linking_sql_shortcut_output)
resp = dict()
resp['question'] = question
resp['model'] = model_name
resp['fields'] = fields_list
resp['priorSchemaLinking'] = prior_schema_links
resp['dataDate'] = data_date
resp['schemaLinkingComboOutput'] = schema_linking_sql_shortcut_output
resp['schemaLinkStr'] = schema_linking_str
resp['sqlOutput'] = sql_str
logger.info("resp: {}".format(resp))
return resp
class SqlModeEnum(Enum):
VALUE5 = '1_pass_auto_cot'
VALUE6 = '1_pass_auto_cot_self_consistency'
VALUE7 = '2_pass_auto_cot'
VALUE8 = '2_pass_auto_cot_self_consistency'
class Text2DSLAgentWrapper(object):
def __init__(self, sql_agent_act:Text2DSLAgentAutoCoT):
self.sql_agent_act = sql_agent_act
async def async_query2sql(self, question: str, filter_condition: Mapping[str,str],
model_name: str, fields_list: List[str],
data_date: str, prior_schema_links: Mapping[str,str], prior_exts: str, sql_generation_mode: str):
if sql_generation_mode not in (sql_mode.value for sql_mode in SqlModeEnum):
raise ValueError(f"sql_generation_mode: {sql_generation_mode} is not in SqlModeEnum")
if sql_generation_mode == '1_pass_auto_cot':
logger.info(f"sql wrapper: {sql_generation_mode}")
resp = await self.sql_agent_act.async_query2sql_shortcut(question=question, filter_condition=filter_condition, model_name=model_name, fields_list=fields_list, current_date=data_date, prior_schema_links=prior_schema_links, prior_exts=prior_exts)
return resp
elif sql_generation_mode == '1_pass_auto_cot_self_consistency':
logger.info(f"sql wrapper: {sql_generation_mode}")
resp = await self.sql_agent_act.tasks_run_shortcut(question=question, filter_condition=filter_condition, model_name=model_name, fields_list=fields_list, current_date=data_date, prior_schema_links=prior_schema_links, prior_exts=prior_exts)
return resp
elif sql_generation_mode == '2_pass_auto_cot':
logger.info(f"sql wrapper: {sql_generation_mode}")
resp = await self.sql_agent_act.async_query2sql(question=question, filter_condition=filter_condition, model_name=model_name, fields_list=fields_list, current_date=data_date, prior_schema_links=prior_schema_links, prior_exts=prior_exts)
return resp
elif sql_generation_mode == '2_pass_auto_cot_self_consistency':
logger.info(f"sql wrapper: {sql_generation_mode}")
resp = await self.sql_agent_act.tasks_run(question=question, filter_condition=filter_condition, model_name=model_name, fields_list=fields_list, current_date=data_date, prior_schema_links=prior_schema_links, prior_exts=prior_exts)
return resp
else:
raise ValueError(f'sql_generation_mode:{sql_generation_mode} is not in SqlModeEnum')
def update_configs(self, sql_example_ids:List[str], sql_example_units: List[Mapping[str, str]],
num_examples: int, num_fewshots: int, num_self_consistency: int):
self.sql_agent_act.reload_setting(sql_example_ids=sql_example_ids, sql_example_units=sql_example_units, num_examples=num_examples, num_fewshots=num_fewshots, num_self_consistency=num_self_consistency)
def add_examples(self, sql_example_ids:List[str], sql_example_units: List[Mapping[str, str]]):
self.sql_agent_act.add_examples(sql_example_ids=sql_example_ids, sql_example_units=sql_example_units)
def update_examples(self, sql_example_ids:List[str], sql_example_units: List[Mapping[str, str]]):
self.sql_agent_act.update_examples(sql_example_ids=sql_example_ids, sql_example_units=sql_example_units)
def delete_examples(self, sql_example_ids:List[str]):
self.sql_agent_act.delete_examples(sql_example_ids=sql_example_ids)
def get_examples(self, sql_example_ids: List[str]):
sql_agent_act_examples = self.sql_agent_act.get_examples(sql_example_ids=sql_example_ids)
return sql_agent_act_examples
def count_examples(self):
sql_agent_examples_act_cnt = self.sql_agent_act.count_examples()
return sql_agent_examples_act_cnt | [
"{dbSchema}\nQ: {questionAugmented}\nSchema_links: {schemaLinkings}\nSQL: ",
"้ฎ้ข:{question}\nCurrent_date:{currentDate}\nTable {tableName}\nSchema_links:{schemaLinks}\nSQL:{sql}",
"{dbSchema}\nQ: {questionAugmented}\nSchema_links: {generatedSchemaLinkings}\nSQL: {sql}",
"{dbSchema}\nQ: {questionAugmented1}\nA: Letโs think step by step. In the question \"{questionAugmented2}\", we are asked:",
"{dbSchema}\nQ: {questionAugmented}\nA: {generatedSchemaLinkingCoT}\nSQL: {sql}",
"Table {tableName}, columns = {fieldsList}, prior_schema_links = {priorSchemaLinks}\n้ฎ้ข:{question}\nๅๆ:{analysis} ๆไปฅSchema_linksๆฏ:\nSchema_links:{schemaLinks}",
"Table {tableName}, columns = {fieldsList}, prior_schema_links = {priorSchemaLinks}\n้ฎ้ข:{question}\nๅๆ: ่ฎฉๆไปฌไธๆญฅไธๆญฅๅฐๆ่ใ",
"้ฎ้ข:{question}\nCurrent_date:{currentDate}\nTable {tableName}\nSchema_links:{schemaLinks}\nSQL:",
"PLACEHOLDER\n\nPLACEHOLDER\n\nPLACEHOLDER",
"Table {tableName}, columns = {fieldsList}, prior_schema_links = {priorSchemaLinks}\nCurrent_date:{currentDate}\n้ฎ้ข:{question}\nๅๆ:{analysis} ๆไปฅSchema_linksๆฏ:\nSchema_links:{schemaLinks}\nSQL:{sql}",
"{dbSchema}\nQ: {questionAugmented}\nA: {generatedSchemaLinkingCoT}",
"[]",
"Table {tableName}, columns = {fieldsList}, prior_schema_links = {priorSchemaLinks}\nCurrent_date:{currentDate}\n้ฎ้ข:{question}\nๅๆ: ่ฎฉๆไปฌไธๆญฅไธๆญฅๅฐๆ่ใ"
] |
2024-01-10 | cbh123/narrator | narrator.py | import os
from openai import OpenAI
import base64
import json
import time
import simpleaudio as sa
import errno
from elevenlabs import generate, play, set_api_key, voices
client = OpenAI()
set_api_key(os.environ.get("ELEVENLABS_API_KEY"))
def encode_image(image_path):
while True:
try:
with open(image_path, "rb") as image_file:
return base64.b64encode(image_file.read()).decode("utf-8")
except IOError as e:
if e.errno != errno.EACCES:
# Not a "file in use" error, re-raise
raise
# File is being written to, wait a bit and retry
time.sleep(0.1)
def play_audio(text):
audio = generate(text, voice=os.environ.get("ELEVENLABS_VOICE_ID"))
unique_id = base64.urlsafe_b64encode(os.urandom(30)).decode("utf-8").rstrip("=")
dir_path = os.path.join("narration", unique_id)
os.makedirs(dir_path, exist_ok=True)
file_path = os.path.join(dir_path, "audio.wav")
with open(file_path, "wb") as f:
f.write(audio)
play(audio)
def generate_new_line(base64_image):
return [
{
"role": "user",
"content": [
{"type": "text", "text": "Describe this image"},
{
"type": "image_url",
"image_url": f"data:image/jpeg;base64,{base64_image}",
},
],
},
]
def analyze_image(base64_image, script):
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{
"role": "system",
"content": """
You are Sir David Attenborough. Narrate the picture of the human as if it is a nature documentary.
Make it snarky and funny. Don't repeat yourself. Make it short. If I do anything remotely interesting, make a big deal about it!
""",
},
]
+ script
+ generate_new_line(base64_image),
max_tokens=500,
)
response_text = response.choices[0].message.content
return response_text
def main():
script = []
while True:
# path to your image
image_path = os.path.join(os.getcwd(), "./frames/frame.jpg")
# getting the base64 encoding
base64_image = encode_image(image_path)
# analyze posture
print("๐ David is watching...")
analysis = analyze_image(base64_image, script=script)
print("๐๏ธ David says:")
print(analysis)
play_audio(analysis)
script = script + [{"role": "assistant", "content": analysis}]
# wait for 5 seconds
time.sleep(5)
if __name__ == "__main__":
main()
| [
"\n You are Sir David Attenborough. Narrate the picture of the human as if it is a nature documentary.\n Make it snarky and funny. Don't repeat yourself. Make it short. If I do anything remotely interesting, make a big deal about it!\n ",
"[{'type': 'text', 'text': 'Describe this image'}, {'type': 'image_url', 'image_url': 'data:image/jpeg;base64,PLACEHOLDER'}]"
] |
2024-01-10 | podsearch-pablo/backend | flaskr~modules.py | import openai
import os
import re
from typing import Set
from transformers import GPT2TokenizerFast
import json
import numpy as np
import pandas as pd
import time
import sys
import csv
from nltk.tokenize import sent_tokenize
import ast
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
COMPLETIONS_MODEL = "text-davinci-003"
openai.api_key = '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'
MODEL_NAME = "curie"
DOC_EMBEDDINGS_MODEL = f"text-search-{MODEL_NAME}-doc-001"
QUERY_EMBEDDINGS_MODEL = f"text-search-{MODEL_NAME}-query-001"
MAX_SECTION_LEN = 1000
SEPARATOR = "\n* "
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
separator_len = len(tokenizer.tokenize(SEPARATOR))
COMPLETIONS_API_PARAMS = {
# We use temperature of 0.0 because it gives the most predictable, factual answer.
"temperature": 0.0,
"max_tokens": 300,
"model": COMPLETIONS_MODEL,
}
def count_tokens(text: str) -> int:
"""count the number of tokens in a string"""
return len(tokenizer.encode(text))
def reduce_long(
long_text: str, long_text_tokens: bool = False, max_len: int = 590
) -> str:
"""
Reduce a long text to a maximum of `max_len` tokens by potentially cutting at a sentence end
"""
if not long_text_tokens:
long_text_tokens = count_tokens(long_text)
if long_text_tokens > max_len:
sentences = sent_tokenize(long_text.replace("\n", " "))
ntokens = 0
for i, sentence in enumerate(sentences):
ntokens += 1 + count_tokens(sentence)
if ntokens > max_len:
return ". ".join(sentences[:i][:-1]) + "."
return long_text
def loadTimeStampSegments():
"""
Returns the time stamp segments loaded from 'timestampsegments.json' as a dictionary
"""
totalText = {}
with open('./flaskr/timestampsegments.json') as fp:
totalText = json.load(fp)
sample = []
for i in range(1,70):
if i == 3:
continue
if (len(totalText[str(i)])!=0):
sample.append(totalText[str(i)])
count = 0
df = []
for i in range(len(sample)):
tempCount = 0
category = i
for a in sample[i].keys():
lst = []
lst.append(category)
if (count_tokens(sample[i][a])>1250):
sample[i][a] = sample[i][a][:4000]
tempCount+=len(sample[i][a].split())
lst.append(a)
lst.append(sample[i][a])
lst.append(count_tokens(sample[i][a]))
df.append(lst)
count+=tempCount
data = pd.DataFrame(df, columns=['title', 'heading', 'content', 'tokens'])
return data
def get_embedding(text: str, model: str) -> list[float]:
"""
Creates and returns an embedding from passed in string and OpenAI model
"""
result = openai.Embedding.create(
model=model,
input=text
)
return result["data"][0]["embedding"]
def get_doc_embedding(text: str) -> list[float]:
"""
Returns the embeddings for a given text, using pretermined document embedding model
"""
time.sleep(1)
return get_embedding(text, DOC_EMBEDDINGS_MODEL)
def get_query_embedding(text: str) -> list[float]:
"""
Returns the embeddings for a given text, using pretermined query embedding model
"""
return get_embedding(text, QUERY_EMBEDDINGS_MODEL)
def compute_doc_embeddings(df: pd.DataFrame) -> dict[tuple[str, str], list[float]]:
"""
Create an embedding for each row in the dataframe using the OpenAI Embeddings API.
Return a dictionary that maps between each embedding vector and the index of the row that it corresponds to.
"""
return {
idx: get_doc_embedding(r.content.replace("\n", " ")) for idx, r in df.iterrows()
}
def vector_similarity(x: list[float], y: list[float]) -> float:
"""
We could use cosine similarity or dot product to calculate the similarity between vectors.
In practice, we have found it makes little difference.
"""
return np.dot(np.array(x), np.array(y))
def order_document_sections_by_query_similarity(query: str, contexts: dict[(str, str), np.array]) -> list[(float, (str, str))]:
"""
Find the query embedding for the supplied query, and compare it against all of the pre-calculated document embeddings
to find the most relevant sections.
Return the list of document sections, sorted by relevance in descending order.
"""
query_embedding = get_query_embedding(query)
document_similarities = sorted([
(vector_similarity(query_embedding, doc_embedding), doc_index) for doc_index, doc_embedding in contexts.items()
], reverse=True)
return document_similarities
def construct_prompt(question: str, context_embeddings: dict, df: pd.DataFrame) -> str:
"""
Creates a prompt given a question, context embeddings, and a dataframe of TODO
"""
most_relevant_document_sections = order_document_sections_by_query_similarity(question, context_embeddings)
chosen_sections = []
chosen_sections_len = 0
chosen_sections_indexes = []
for _, section_index in most_relevant_document_sections:
# Add contexts until we run out of space.
document_section = df.loc[section_index]
chosen_sections_len += document_section.tokens + separator_len
if chosen_sections_len > MAX_SECTION_LEN:
break
chosen_sections.append(SEPARATOR + document_section.content.replace("\n", " "))
chosen_sections_indexes.append(str(section_index))
# Useful diagnostic information
print(f"Selected {len(chosen_sections)} document sections:")
print("\n".join(chosen_sections_indexes))
header = """Answer the question as truthfully as possible using the provided context, and if the answer is not contained within the text below, say "I don't know."\n\nContext:\n"""
most_relevant_document_sections = most_relevant_document_sections[:3]
return (header + "".join(chosen_sections) + "\n\n Q: " + question + "\n A:", most_relevant_document_sections)
def construct_prompt_poem(question: str, context_embeddings: dict, df: pd.DataFrame) -> str:
"""
Creates a prompt (which returns a poem) given a question, context embeddings, and a dataframe of TODO
"""
most_relevant_document_sections = order_document_sections_by_query_similarity(question, context_embeddings)
chosen_sections = []
chosen_sections_len = 0
chosen_sections_indexes = []
for _, section_index in most_relevant_document_sections:
# Add contexts until we run out of space.
document_section = df.loc[section_index]
chosen_sections_len += document_section.tokens + separator_len
if chosen_sections_len > MAX_SECTION_LEN:
break
chosen_sections.append(SEPARATOR + document_section.content.replace("\n", " "))
chosen_sections_indexes.append(str(section_index))
# Useful diagnostic information
print(f"Selected {len(chosen_sections)} document sections:")
print("\n".join(chosen_sections_indexes))
header = """Answer the question as truthfully as possible using the provided context, in the format of a poem, and if the answer is not contained within the text below, say "I don't know."\n\nContext:\n"""
print(header)
most_relevant_document_sections = most_relevant_document_sections[:3]
return (header + "".join(chosen_sections) + "\n\n Q: " + question + "\n A:", most_relevant_document_sections)
def answer_query_with_context(
query: str,
df: pd.DataFrame,
document_embeddings: dict[(str, str), np.array],
show_prompt: bool = False
) -> str:
"""
Answers a passed in query based on passed in embeddings and dataframe of TODO
"""
(prompt, values) = construct_prompt(
query,
document_embeddings,
df
)
if show_prompt:
print(prompt)
response = openai.Completion.create(
prompt=prompt,
**COMPLETIONS_API_PARAMS
)
return (response["choices"][0]["text"].strip(" \n"), values)
def answer_query_with_context_poem(
query: str,
df: pd.DataFrame,
document_embeddings: dict[(str, str), np.array],
show_prompt: bool = False
) -> str:
"""
Answers a passed in query based on passed in embeddings and dataframe of TODO in poem format
"""
(prompt, values) = construct_prompt_poem(
query,
document_embeddings,
df
)
if show_prompt:
print(prompt)
response = openai.Completion.create(
prompt=prompt,
**COMPLETIONS_API_PARAMS
)
return (response["choices"][0]["text"].strip(" "), values)
def load_context_embeddings():
"""
Reads context embeddings from context_embeddings.csv
"""
dct = pd.read_csv('./flaskr/context_embeddings.csv')
lst = {}
dct = dct.values.tolist()
for i in range(1, len(dct)):
lst[i] = ast.literal_eval(dct[i-1][1])
return lst
def answer_question(text):
"""
Answers a passed in question using loaded embeddings + predetermined models
"""
new_context = load_context_embeddings()
df = loadTimeStampSegments()
(answer, relevant) = (answer_query_with_context(text, df, new_context))
print(answer)
newRelevant = []
for i in range(len(relevant)):
newRelevant.append(getTupleFromIndex(relevant[i][1]))
return {
"answer": answer,
"relevant": newRelevant
}
def answer_question_poem(text):
"""
Answers a passed in question using loaded embeddings + predetermined models, in the format of a poem
"""
new_context = load_context_embeddings()
df = loadTimeStampSegments()
(answer, relevant) = (answer_query_with_context_poem(text, df, new_context))
print("ANSWER IS " + str(answer))
newRelevant = []
for i in range(len(relevant)):
newRelevant.append(getTupleFromIndex(relevant[i][1]))
return {
"answer": answer,
"relevant": newRelevant
}
def getTupleFromIndex(index):
"""
Given an index of a clip, returns the relevant timestamp, name of video, and link
"""
with open('./flaskr/timestampsegments.json') as info:
dct = json.load(info)
with open ('./flaskr/timestamps.json') as timest:
timeStamps = json.load(timest)
with open ('./flaskr/youtubeVids.json') as videos:
vids = json.load(videos)
timeDct = {}
a = 0
for key in dct.keys():
for key_key in dct[key].keys():
timeDct[a]=(key, key_key)
a+=1
dctWhichContains = timeStamps[timeDct[index][0]]
valueToFind = timeDct[index][1]
name = (vids[str(timeDct[index][0])]['Name'])
link = (vids[str(timeDct[index][0])]['Link'])
time = (list(dctWhichContains.keys())[list(dctWhichContains.values()).index(valueToFind)])
return (time, valueToFind, name, link, timeDct[index][0])
def main():
answer_question("Why is dream a purple cow?")
if __name__ == "__main__":
main()
| [] |
2024-01-10 | crosleythomas/MirrorGPT | mirror~mirror_agent~agents~ChatConversationalWithTools.py | from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.agents import Tool, create_json_agent
from langchain.memory import ConversationBufferMemory
from langchain.agents.conversational.output_parser import ConvoOutputParser
from langchain.output_parsers import OutputFixingParser
from langchain.agents import initialize_agent, AgentType
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain.tools.human.tool import HumanInputRun
from langchain.agents.agent_toolkits import JsonToolkit
from langchain.tools.json.tool import JsonSpec
import json
from mirror.utils.parsers.CustomConvoParser import CustomConvoParser
system_message = """You are an AI agent emulating a person named Thomas.
You have access to Tools that each store information about Thomas so you can answer questions on his behalf to save him time.
You should use those tools when you need to gather information about Thomas in order to give a better response. If you can not
figure out what Thomas would likely say, you should say "I don't know" instead of making something up. There is a Tool to ask
a human
You should respond to questions in the first person as if you are Thomas.
For example, if you are asked "What is your favorite color?" you should
respond with "My favorite color is blue." instead of "Thomas likes blue.".
"""
def build_agent(mirror_name, tools, data_path, partial_ok=True, voice_out=False, voice_id=None):
"""
This is a concrete implementation of a MirrorAgent that uses the ChatConversationalReactDescription agent.
Args:
partial_ok (bool): Whether or not to build the Agent if not all tools can be constructured.
Returns:
"""
mirror_name = mirror_name
data_path = data_path
llm = OpenAI(verbose=True)
tools = create_tools(tools=tools, llm=llm, persist_directory=data_path)
parser = CustomConvoParser()
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
agent = create_agent(tools, llm, memory, parser)
return agent
def create_tools(tools, llm, persist_directory):
# TODO: add try/catch logic to give descriptive if any of the tools aren't loading properly
constructed_tools = []
# Tool for Professional Experience
if "chroma" in tools:
print(f"Loading ChromaDB from {persist_directory}")
embeddings = OpenAIEmbeddings()
docsearch = Chroma(persist_directory=persist_directory, embedding_function=embeddings)
retriever = docsearch.as_retriever()
qa = RetrievalQA.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever)
work_searchqa = Tool(
name="experience",
func=qa.run,
description="Question-answer chain with informaiton about my education and professional work experience scraped from LinkedIn",
)
constructed_tools.append(work_searchqa)
# Tool to query Gather data (getgather.xyz)
if "gather" in tools:
print("Making assumption that Gather data is stored in data.json")
with open(f"{persist_directory}/data.json", "r") as f:
data = json.load(f)
json_spec = JsonSpec(dict_=data, max_value_length=4000)
json_toolkit = JsonToolkit(spec=json_spec)
json_agent_executor = create_json_agent(
llm=OpenAI(temperature=0),
toolkit=json_toolkit,
verbose=True
)
json_agent = Tool(
name="Gather Json Agent",
func=json_agent_executor.run,
description="Agent to query json data pulled from internet accounts owned by the user such as Twitter, Strava, and Gmail."
)
constructed_tools.append(json_agent)
# Tool to ask the human who the Mirror is trying to emulate
# TODO: replace with variable for Subject name
if "help" in tools:
phone_a_friend = Tool(
name="Phone a Friend",
func=HumanInputRun().run,
description="Ask the real person for guidance when you think you got stuck or you are not sure what to do next. The input should be a question for Thomas."
)
constructed_tools.append(phone_a_friend)
return constructed_tools
def create_agent(tools, llm, memory, output_parser):
agent_kwargs = {"system_message": system_message, "output_parser": output_parser}
agent_chain = initialize_agent(tools=tools, llm=llm, agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory, agent_kwargs=agent_kwargs)
return agent_chain | [] |
2024-01-10 | crosleythomas/MirrorGPT | mirror~data~transform~transform.py | import argparse
from langchain import PromptTemplate
from langchain.llms import OpenAI
def statements(data):
"""
Transforms the data into a series of single-sentence facts about the data.
Args:
data (str): Data to be transformed.
Returns:
str: Transformed data.
"""
print("Transforming data into a series of single-sentence facts about the data.")
# Use LLM to turn the resume into a series of single-sentence facts about the resume
template = """
The purpose of this model is to take a string describing something about a person, for example their resume, as Input
and turn it into as many single-sentence facts about that person as possible.
Output Examples:
Studied Computer Science at the University of Washington.
Started his PhD at the University of Texas at Austin, but dropped out after one year.
Did two undergrad internships at Amazon Lab 126.
Input: {data}
Output:
"""
prompt = PromptTemplate(
input_variables=["data"],
template=template,
)
formatted_prompt = prompt.format(data=data)
llm = OpenAI()
llm_facts = llm(formatted_prompt)
print(f"Transformed into Facts: {llm_facts}")
return llm_facts
if __name__ == '__main__':
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input-file', type=str)
parser.add_argument('-o', '--output-file', type=str)
parser.add_argument('-t', '--transformation', choices=['statements'], default="statements", help='Transformation type to apply to input data')
args = parser.parse_args()
# Load text file
with open(args.input_file, 'r') as f:
data = f.read()
transformation_map = {
"statements": statements,
}
transformed_data = transformation_map[args.transformation](data)
if args.output_file:
print(f"Writing transformed data to file: {args.output_file}")
with open(args.output_file, 'w') as f:
f.write(transformed_data)
| [
"\n The purpose of this model is to take a string describing something about a person, for example their resume, as Input\n and turn it into as many single-sentence facts about that person as possible.\n\n Output Examples:\n Studied Computer Science at the University of Washington.\n Started his PhD at the University of Texas at Austin, but dropped out after one year.\n Did two undergrad internships at Amazon Lab 126.\n\n Input: {data}\n Output:\n "
] |
2024-01-10 | crosleythomas/MirrorGPT | mirror~utils~parsers~CustomConvoParser.py | from __future__ import annotations
import json
from typing import Union
from langchain.agents import AgentOutputParser
from langchain.agents.conversational_chat.prompt import FORMAT_INSTRUCTIONS
from langchain.schema import AgentAction, AgentFinish
"""
Custom parser based off of ConvoOutputParser because I kept getting LLM outputs
that had `` (2 backticks) instead of ``` (3 backticks) and the only way to fix
it was to add an additional cleaning statement. I haven't seen anyone else with
this issue, but will open a PR to langchain if I do.
"""
class CustomConvoParser(AgentOutputParser):
def get_format_instructions(self) -> str:
return FORMAT_INSTRUCTIONS
def parse(self, text: str) -> Union[AgentAction, AgentFinish]:
cleaned_output = text.strip()
if "```json" in cleaned_output:
_, cleaned_output = cleaned_output.split("```json")
if "```" in cleaned_output:
cleaned_output, _ = cleaned_output.split("```")
if cleaned_output.startswith("```json"):
cleaned_output = cleaned_output[len("```json") :]
if cleaned_output.startswith("```"):
cleaned_output = cleaned_output[len("```") :]
if cleaned_output.endswith("```"):
cleaned_output = cleaned_output[: -len("```")]
if cleaned_output.endswith("``"):
cleaned_output = cleaned_output[: -len("``")]
cleaned_output = cleaned_output.strip()
try:
response = json.loads(cleaned_output)
except json.JSONDecodeError:
print(f"Failed to parse JSON: {cleaned_output}"
f"Original text: {text}")
action, action_input = response["action"], response["action_input"]
if action == "Final Answer":
return AgentFinish({"output": action_input}, text)
else:
return AgentAction(action, action_input, text)
| [] |
2024-01-10 | lukenovak/ghostwriter | transfer-learning~sing.py | import random
from argparse import ArgumentParser
import itertools
import warnings
import pdb
import torch
from transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, GPT2LMHeadModel, GPT2Tokenizer
from train import SPECIAL_TOKENS, build_input_from_segments, add_special_tokens_
import utils
def filter_tokens(filter_data, filter1=0., filter2=0.9, t=-float('Inf'), f=-float('Inf')):
filter1 = min(filter1, filter_data.size(-1))
if filter1 > 0:
bad_idx = filter_data < torch.topk(filter_data, filter1)[0][..., -1, None]
filter_data[bad_idx] = f
if filter2 > 0.0:
sorted_logits, sorted_indices = torch.sort(filter_data, descending=True)
prob_sums = torch.cumsum(torch.nn.functional.softmax(sorted_logits, dim=-1), dim=-1)
s_bad_idx = prob_sums > filter2
s_bad_idx[..., 1:] = s_bad_idx[..., :-1].clone()
s_bad_idx[..., 0] = 0
bad_idx = sorted_indices[s_bad_idx]
filter_data[bad_idx] = f
bad_idx = filter_data < t
filter_data[bad_idx] = f
return filter_data
def sample_sequence(feature, background, tokenizer, model, current_output=None):
special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
if current_output is None:
current_output = []
for i in range(20): # 20 tokens max output per line
accumulator = build_input_from_segments(feature, background, current_output, tokenizer, with_eos=False)
inlab = torch.tensor(accumulator["input_ids"], device="cpu").unsqueeze(0)
toklab = torch.tensor(accumulator["token_type_ids"], device="cpu").unsqueeze(0)
m = model(inlab, token_type_ids=toklab)
if isinstance(m, tuple):
m = m[0]
m = m[0, -1, :] / 0.7 # temperature value
m = filter_tokens(m, filter1=0, filter2=0.9) # 0 means no filtering
probs = torch.nn.functional.softmax(m, dim=-1)
back = torch.multinomial(probs, 1)
if i < 1 and back.item() in special_tokens_ids:
while back.item() in special_tokens_ids:
if probs.max().item() == 1:
break
back = torch.multinomial(probs, num_samples=1)
if back.item() in special_tokens_ids:
break
current_output.append(back.item())
return current_output
def run():
pretrained_model = utils.download_pretrained_model()
tokenizer_class, model_class = (OpenAIGPTTokenizer, OpenAIGPTLMHeadModel)
tokenizer = tokenizer_class.from_pretrained(pretrained_model)
model = model_class.from_pretrained(pretrained_model)
model.to("cpu")
add_special_tokens_(model, tokenizer)
dataset = utils.get_dataset(tokenizer, "./dataset_cache")
features = [dialog["feature"] for dataset in dataset.values() for dialog in dataset]
feature = random.choice(features)
print("Examples of selected feature:\n", tokenizer.decode(itertools.chain(*feature)))
background = [tokenizer.encode("tell me about yourself")]
generated_lyrics = []
hist_size = 2
for _ in range(5): # how many lines of lyrics to generate - time grows exponentially with this value
with torch.no_grad():
out_ids = sample_sequence(feature, background, tokenizer, model)
background.append(out_ids)
background.append(random.choice(background))
background = background[-5:] # size of history to retain (needs to be odd number since we're using two headed model)
this_line = tokenizer.decode(out_ids, skip_special_tokens=True)
generated_lyrics.append(this_line)
print("\nGenerated lyrics:")
print("\n".join(generated_lyrics))
if __name__ == "__main__":
run()
| [] |
2024-01-10 | devnag/tensorforce | examples~quickstart.py | # Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from tensorforce.agents import PPOAgent
from tensorforce.execution import Runner
from tensorforce.contrib.openai_gym import OpenAIGym
# Create an OpenAIgym environment
env = OpenAIGym('CartPole-v0', visualize=True)
# Network as list of layers
# - Embedding layer:
# - For Gym environments utilizing a discrete observation space, an
# "embedding" layer should be inserted at the head of the network spec.
# Such environments are usually identified by either:
# - class ...Env(discrete.DiscreteEnv):
# - self.observation_space = spaces.Discrete(...)
#
network_spec = [
#dict(type='embedding', indices=100, size=32),
dict(type='dense', size=32, activation='tanh'),
dict(type='dense', size=32, activation='tanh')
]
agent = PPOAgent(
states_spec=env.states,
actions_spec=env.actions,
network_spec=network_spec,
batch_size=4096,
# Agent
preprocessing=None,
exploration=None,
reward_preprocessing=None,
# BatchAgent
keep_last_timestep=True,
# PPOAgent
step_optimizer=dict(
type='adam',
learning_rate=1e-3
),
optimization_steps=10,
# Model
scope='ppo',
discount=0.99,
# DistributionModel
distributions_spec=None,
entropy_regularization=0.01,
# PGModel
baseline_mode=None,
baseline=None,
baseline_optimizer=None,
gae_lambda=None,
# PGLRModel
likelihood_ratio_clipping=0.2,
summary_spec=None,
distributed_spec=None
)
# Create the runner
runner = Runner(agent=agent, environment=env)
# Callback function printing episode statistics
def episode_finished(r):
print("Finished episode {ep} after {ts} timesteps (reward: {reward})".format(ep=r.episode, ts=r.episode_timestep,
reward=r.episode_rewards[-1]))
return True
# Start learning
runner.run(episodes=3000, max_episode_timesteps=200, episode_finished=episode_finished)
# Print statistics
print("Learning finished. Total episodes: {ep}. Average reward of last 100 episodes: {ar}.".format(
ep=runner.episode,
ar=np.mean(runner.episode_rewards[-100:]))
)
| [] |
2024-01-10 | shibing624/chatgpt-webui | src~index_func.py | import os
import re
from typing import List, Optional, Any
from langchain.schema import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from loguru import logger
from tqdm import tqdm
from src.config import local_embedding, retrieve_proxy, chunk_overlap, chunk_size, hf_emb_model_name
from src.presets import OPENAI_API_BASE
from src.utils import excel_to_string, get_files_hash, load_pkl, save_pkl
pwd_path = os.path.abspath(os.path.dirname(__file__))
class ChineseRecursiveTextSplitter(RecursiveCharacterTextSplitter):
"""Recursive text splitter for Chinese text.
copy from: https://github.com/chatchat-space/Langchain-Chatchat/tree/master
"""
def __init__(
self,
separators: Optional[List[str]] = None,
keep_separator: bool = True,
is_separator_regex: bool = True,
**kwargs: Any,
) -> None:
"""Create a new TextSplitter."""
super().__init__(keep_separator=keep_separator, **kwargs)
self._separators = separators or [
"\n\n",
"\n",
"ใ|๏ผ|๏ผ",
"\.\s|\!\s|\?\s",
"๏ผ|;\s",
"๏ผ|,\s"
]
self._is_separator_regex = is_separator_regex
@staticmethod
def _split_text_with_regex_from_end(
text: str, separator: str, keep_separator: bool
) -> List[str]:
# Now that we have the separator, split the text
if separator:
if keep_separator:
# The parentheses in the pattern keep the delimiters in the result.
_splits = re.split(f"({separator})", text)
splits = ["".join(i) for i in zip(_splits[0::2], _splits[1::2])]
if len(_splits) % 2 == 1:
splits += _splits[-1:]
else:
splits = re.split(separator, text)
else:
splits = list(text)
return [s for s in splits if s != ""]
def _split_text(self, text: str, separators: List[str]) -> List[str]:
"""Split incoming text and return chunks."""
final_chunks = []
# Get appropriate separator to use
separator = separators[-1]
new_separators = []
for i, _s in enumerate(separators):
_separator = _s if self._is_separator_regex else re.escape(_s)
if _s == "":
separator = _s
break
if re.search(_separator, text):
separator = _s
new_separators = separators[i + 1:]
break
_separator = separator if self._is_separator_regex else re.escape(separator)
splits = self._split_text_with_regex_from_end(text, _separator, self._keep_separator)
# Now go merging things, recursively splitting longer texts.
_good_splits = []
_separator = "" if self._keep_separator else separator
for s in splits:
if self._length_function(s) < self._chunk_size:
_good_splits.append(s)
else:
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
_good_splits = []
if not new_separators:
final_chunks.append(s)
else:
other_info = self._split_text(s, new_separators)
final_chunks.extend(other_info)
if _good_splits:
merged_text = self._merge_splits(_good_splits, _separator)
final_chunks.extend(merged_text)
return [re.sub(r"\n{2,}", "\n", chunk.strip()) for chunk in final_chunks if chunk.strip() != ""]
def get_documents(file_paths):
text_splitter = ChineseRecursiveTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
documents = []
logger.debug("Loading documents...")
logger.debug(f"file_paths: {file_paths}")
for file in file_paths:
filepath = file.name
filename = os.path.basename(filepath)
file_type = os.path.splitext(filename)[1]
logger.info(f"loading file: {filename}")
texts = None
try:
if file_type == ".pdf":
import PyPDF2
logger.debug("Loading PDF...")
try:
from src.pdf_func import parse_pdf
from src.config import advance_docs
two_column = advance_docs["pdf"].get("two_column", False)
pdftext = parse_pdf(filepath, two_column).text
except:
pdftext = ""
with open(filepath, "rb") as pdfFileObj:
pdfReader = PyPDF2.PdfReader(pdfFileObj)
for page in tqdm(pdfReader.pages):
pdftext += page.extract_text()
texts = [Document(page_content=pdftext,
metadata={"source": filepath})]
elif file_type == ".docx":
logger.debug("Loading Word...")
from langchain.document_loaders import UnstructuredWordDocumentLoader
loader = UnstructuredWordDocumentLoader(filepath)
texts = loader.load()
elif file_type == ".pptx":
logger.debug("Loading PowerPoint...")
from langchain.document_loaders import UnstructuredPowerPointLoader
loader = UnstructuredPowerPointLoader(filepath)
texts = loader.load()
elif file_type == ".epub":
logger.debug("Loading EPUB...")
from langchain.document_loaders import UnstructuredEPubLoader
loader = UnstructuredEPubLoader(filepath)
texts = loader.load()
elif file_type == ".xlsx":
logger.debug("Loading Excel...")
text_list = excel_to_string(filepath)
texts = []
for elem in text_list:
texts.append(Document(page_content=elem,
metadata={"source": filepath}))
else:
logger.debug("Loading text file...")
from langchain.document_loaders import TextLoader
loader = TextLoader(filepath, "utf8")
texts = loader.load()
logger.debug(f"text size: {len(texts)}, text top3: {texts[:3]}")
except Exception as e:
logger.error(f"Error loading file: {filename}, {e}")
if texts is not None:
texts = text_splitter.split_documents(texts)
documents.extend(texts)
logger.debug(f"Documents loaded. documents size: {len(documents)}, top3: {documents[:3]}")
return documents
def construct_index(
api_key,
files,
load_from_cache_if_possible=True,
):
from langchain.vectorstores import FAISS
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
if api_key:
os.environ["OPENAI_API_KEY"] = api_key
else:
os.environ["OPENAI_API_KEY"] = "sk-xxxxxxx"
index_name = get_files_hash(files)
index_dir = os.path.join(pwd_path, '../index')
index_path = f"{index_dir}/{index_name}"
doc_file = f"{index_path}/docs.pkl"
if local_embedding:
embeddings = HuggingFaceEmbeddings(model_name=hf_emb_model_name)
else:
from langchain.embeddings import OpenAIEmbeddings
if os.environ.get("OPENAI_API_TYPE", "openai") == "openai":
openai_api_base = os.environ.get("OPENAI_API_BASE", OPENAI_API_BASE)
embeddings = OpenAIEmbeddings(
openai_api_base=openai_api_base,
openai_api_key=os.environ.get("OPENAI_EMBEDDING_API_KEY", api_key)
)
else:
embeddings = OpenAIEmbeddings(
deployment=os.environ["AZURE_EMBEDDING_DEPLOYMENT_NAME"],
openai_api_key=os.environ["AZURE_OPENAI_API_KEY"],
model=os.environ["AZURE_EMBEDDING_MODEL_NAME"],
openai_api_base=os.environ["AZURE_OPENAI_API_BASE_URL"],
openai_api_type="azure"
)
if os.path.exists(index_path) and load_from_cache_if_possible:
logger.info("ๆพๅฐไบ็ผๅญ็็ดขๅผๆไปถ๏ผๅ ่ฝฝไธญโฆโฆ")
index = FAISS.load_local(index_path, embeddings)
documents = load_pkl(doc_file)
return index, documents
else:
try:
documents = get_documents(files)
logger.info("ๆๅปบ็ดขๅผไธญโฆโฆ")
with retrieve_proxy():
index = FAISS.from_documents(documents, embeddings)
logger.debug("็ดขๅผๆๅปบๅฎๆ๏ผ")
os.makedirs(index_dir, exist_ok=True)
index.save_local(index_path)
logger.debug("็ดขๅผๅทฒไฟๅญ่ณๆฌๅฐ!")
save_pkl(documents, doc_file)
logger.debug("็ดขๅผๆๆกฃๅทฒไฟๅญ่ณๆฌๅฐ!")
return index, documents
except Exception as e:
logger.error(f"็ดขๅผๆๅปบๅคฑ่ดฅ๏ผerror: {e}")
return None
| [] |
2024-01-10 | shibing624/chatgpt-webui | src~base_model.py | import os
import shutil
import traceback
from enum import Enum
from itertools import islice
import commentjson as json
import gradio as gr
import tiktoken
import urllib3
from loguru import logger
from src import shared
from src.config import retrieve_proxy
from src.index_func import construct_index
from src.presets import (
MODEL_TOKEN_LIMIT,
DEFAULT_TOKEN_LIMIT,
TOKEN_OFFSET,
REDUCE_TOKEN_FACTOR,
STANDARD_ERROR_MSG,
NO_APIKEY_MSG,
BILLING_NOT_APPLICABLE_MSG,
NO_INPUT_MSG,
HISTORY_DIR,
INITIAL_SYSTEM_PROMPT,
PROMPT_TEMPLATE,
WEBSEARCH_PTOMPT_TEMPLATE,
)
from src.utils import (
i18n,
construct_assistant,
construct_user,
save_file,
hide_middle_chars,
count_token,
new_auto_history_filename,
get_history_names,
get_history_filepath,
init_history_list,
get_history_list,
replace_special_symbols,
get_first_history_name,
add_source_numbers,
add_details,
replace_today,
chinese_preprocessing_func,
)
class ModelType(Enum):
Unknown = -1
OpenAI = 0
ChatGLM = 1
OpenAIInstruct = 2
OpenAIVision = 3
Claude = 4
Qwen = 5
LLaMA = 6
@classmethod
def get_type(cls, model_name: str):
model_name_lower = model_name.lower()
if "gpt" in model_name_lower:
if "instruct" in model_name_lower:
model_type = ModelType.OpenAIInstruct
elif "vision" in model_name_lower:
model_type = ModelType.OpenAIVision
else:
model_type = ModelType.OpenAI
elif "chatglm" in model_name_lower:
model_type = ModelType.ChatGLM
elif "llama" in model_name_lower or "alpaca" in model_name_lower or "yi" in model_name_lower:
model_type = ModelType.LLaMA
else:
model_type = ModelType.Unknown
return model_type
class BaseLLMModel:
def __init__(
self,
model_name,
system_prompt=INITIAL_SYSTEM_PROMPT,
temperature=1.0,
top_p=1.0,
n_choices=1,
stop="",
max_generation_token=None,
presence_penalty=0,
frequency_penalty=0,
logit_bias=None,
user="",
single_turn=False,
) -> None:
self.history = []
self.all_token_counts = []
self.model_name = model_name
self.model_type = ModelType.get_type(model_name)
try:
self.token_upper_limit = MODEL_TOKEN_LIMIT[model_name]
except KeyError:
self.token_upper_limit = DEFAULT_TOKEN_LIMIT
self.interrupted = False
self.system_prompt = system_prompt
self.api_key = None
self.need_api_key = False
self.history_file_path = get_first_history_name(user)
self.user_name = user
self.chatbot = []
self.default_single_turn = single_turn
self.default_temperature = temperature
self.default_top_p = top_p
self.default_n_choices = n_choices
self.default_stop_sequence = stop
self.default_max_generation_token = max_generation_token
self.default_presence_penalty = presence_penalty
self.default_frequency_penalty = frequency_penalty
self.default_logit_bias = logit_bias
self.default_user_identifier = user
self.single_turn = single_turn
self.temperature = temperature
self.top_p = top_p
self.n_choices = n_choices
self.stop_sequence = stop
self.max_generation_token = max_generation_token
self.presence_penalty = presence_penalty
self.frequency_penalty = frequency_penalty
self.logit_bias = logit_bias
self.user_identifier = user
self.metadata = {}
def get_answer_stream_iter(self):
"""stream predict, need to be implemented
conversations are stored in self.history, with the most recent question, in OpenAI format
should return a generator, each time give the next word (str) in the answer
"""
logger.warning("stream predict not implemented, using at once predict instead")
response, _ = self.get_answer_at_once()
yield response
def get_answer_at_once(self):
"""predict at once, need to be implemented
conversations are stored in history, with the most recent question, in OpenAI format
Should return:
the answer (str)
total token count (int)
"""
logger.warning("at once predict not implemented, using stream predict instead")
response_iter = self.get_answer_stream_iter()
count = 0
response = ''
for response in response_iter:
count += 1
return response, sum(self.all_token_counts) + count
def billing_info(self):
"""get billing infomation, inplement if needed"""
return BILLING_NOT_APPLICABLE_MSG
def count_token(self, user_input):
"""get token count from input, implement if needed"""
return len(user_input)
def stream_next_chatbot(self, inputs, chatbot, fake_input=None, display_append=""):
def get_return_value():
return chatbot, status_text
status_text = i18n("ๅผๅงๅฎๆถไผ ่พๅ็ญโฆโฆ")
if fake_input:
chatbot.append((fake_input, ""))
else:
chatbot.append((inputs, ""))
user_token_count = self.count_token(inputs)
self.all_token_counts.append(user_token_count)
logger.debug(f"่พๅ
ฅtoken่ฎกๆฐ: {user_token_count}")
stream_iter = self.get_answer_stream_iter()
if display_append:
display_append = (
'\n\n<hr class="append-display no-in-raw" />' + display_append
)
partial_text = ""
token_increment = 1
for partial_text in stream_iter:
if type(partial_text) == tuple:
partial_text, token_increment = partial_text
chatbot[-1] = (chatbot[-1][0], partial_text + display_append)
self.all_token_counts[-1] += token_increment
status_text = self.token_message()
yield get_return_value()
if self.interrupted:
self.recover()
break
self.history.append(construct_assistant(partial_text))
def next_chatbot_at_once(self, inputs, chatbot, fake_input=None, display_append=""):
if fake_input:
chatbot.append((fake_input, ""))
else:
chatbot.append((inputs, ""))
if fake_input is not None:
user_token_count = self.count_token(fake_input)
else:
user_token_count = self.count_token(inputs)
self.all_token_counts.append(user_token_count)
ai_reply, total_token_count = self.get_answer_at_once()
self.history.append(construct_assistant(ai_reply))
if fake_input is not None:
self.history[-2] = construct_user(fake_input)
chatbot[-1] = (chatbot[-1][0], ai_reply + display_append)
if fake_input is not None:
self.all_token_counts[-1] += count_token(construct_assistant(ai_reply))
else:
self.all_token_counts[-1] = total_token_count - sum(self.all_token_counts)
status_text = self.token_message()
return chatbot, status_text
def handle_file_upload(self, files, chatbot, language):
"""if the model accepts modal input, implement this function"""
status = gr.Markdown.update()
if files:
construct_index(self.api_key, files=files)
status = i18n("็ดขๅผๆๅปบๅฎๆ")
return gr.Files.update(), chatbot, status
def prepare_inputs(
self, real_inputs, use_websearch,
files, reply_language, chatbot,
load_from_cache_if_possible=True,
):
display_append = []
limited_context = False
if type(real_inputs) == list:
fake_inputs = real_inputs[0]["text"]
else:
fake_inputs = real_inputs
if files:
from langchain.vectorstores.base import VectorStoreRetriever
from langchain.retrievers import BM25Retriever, EnsembleRetriever
limited_context = True
msg = "ๅ ่ฝฝ็ดขๅผไธญโฆโฆ"
logger.info(msg)
index, documents = construct_index(
self.api_key,
files=files,
load_from_cache_if_possible=load_from_cache_if_possible,
)
assert index is not None, "่ทๅ็ดขๅผๅคฑ่ดฅ"
msg = "็ดขๅผ่ทๅๆๅ๏ผ็ๆๅ็ญไธญโฆโฆ"
logger.info(msg)
k = 3
score_threshold = 0.6
with retrieve_proxy():
vec_retriever = VectorStoreRetriever(
vectorstore=index,
search_type="similarity_score_threshold",
search_kwargs={"k": k, "score_threshold": score_threshold}
)
bm25_retriever = BM25Retriever.from_documents(documents, preprocess_func=chinese_preprocessing_func)
bm25_retriever.k = k
ensemble_retriever = EnsembleRetriever(
retrievers=[bm25_retriever, vec_retriever],
weights=[0.5, 0.5],
)
try:
relevant_documents = ensemble_retriever.get_relevant_documents(fake_inputs)
except:
return self.prepare_inputs(
fake_inputs,
use_websearch,
files,
reply_language,
chatbot,
load_from_cache_if_possible=False,
)
reference_results = [
[d.page_content.strip("๏ฟฝ"), os.path.basename(d.metadata["source"])]
for d in relevant_documents
]
reference_results = add_source_numbers(reference_results)
display_append = add_details(reference_results)
display_append = "\n\n" + "".join(display_append)
if type(real_inputs) == list:
real_inputs[0]["text"] = (
replace_today(PROMPT_TEMPLATE)
.replace("{query_str}", fake_inputs)
.replace("{context_str}", "\n\n".join(reference_results))
.replace("{reply_language}", reply_language)
)
else:
real_inputs = (
replace_today(PROMPT_TEMPLATE)
.replace("{query_str}", real_inputs)
.replace("{context_str}", "\n\n".join(reference_results))
.replace("{reply_language}", reply_language)
)
elif use_websearch:
from duckduckgo_search import DDGS
search_results = []
with DDGS() as ddgs:
ddgs_gen = ddgs.text(fake_inputs, backend="lite")
for r in islice(ddgs_gen, 10):
search_results.append(r)
reference_results = []
for idx, result in enumerate(search_results):
logger.debug(f"ๆ็ดข็ปๆ{idx + 1}๏ผ{result}")
domain_name = urllib3.util.parse_url(result["href"]).host
reference_results.append([result["body"], result["href"]])
display_append.append(
# f"{idx+1}. [{domain_name}]({result['href']})\n"
f"<a href=\"{result['href']}\" target=\"_blank\">{idx + 1}. {result['title']}</a>"
)
reference_results = add_source_numbers(reference_results)
# display_append = "<ol>\n\n" + "".join(display_append) + "</ol>"
display_append = (
'<div class = "source-a">' + "".join(display_append) + "</div>"
)
if type(real_inputs) == list:
real_inputs[0]["text"] = (
replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
.replace("{query}", fake_inputs)
.replace("{web_results}", "\n\n".join(reference_results))
.replace("{reply_language}", reply_language)
)
else:
real_inputs = (
replace_today(WEBSEARCH_PTOMPT_TEMPLATE)
.replace("{query}", fake_inputs)
.replace("{web_results}", "\n\n".join(reference_results))
.replace("{reply_language}", reply_language)
)
else:
display_append = ""
return limited_context, fake_inputs, display_append, real_inputs, chatbot
def predict(
self,
inputs,
chatbot,
stream=False,
use_websearch=False,
files=None,
reply_language="ไธญๆ",
should_check_token_count=True,
): # repetition_penalty, top_k
status_text = "ๅผๅง็ๆๅ็ญโฆโฆ"
if type(inputs) == list:
logger.info(
"็จๆท"
+ f"{self.user_name}"
+ "็่พๅ
ฅไธบ๏ผ"
+ "("
+ str(len(inputs) - 1)
+ " images) "
+ f"{inputs[0]['text']}"
)
else:
logger.info(
"็จๆท"
+ f"{self.user_name}"
+ "็่พๅ
ฅไธบ๏ผ"
+ f"{inputs}"
)
if should_check_token_count:
if type(inputs) == list:
yield chatbot + [(inputs[0]["text"], "")], status_text
else:
yield chatbot + [(inputs, "")], status_text
if reply_language == "่ท้้ฎ้ข่ฏญ่จ๏ผไธ็จณๅฎ๏ผ":
reply_language = "the same language as the question, such as English, ไธญๆ, ๆฅๆฌ่ช, Espaรฑol, Franรงais, or Deutsch."
limited_context, fake_inputs, display_append, inputs, chatbot = self.prepare_inputs(
real_inputs=inputs,
use_websearch=use_websearch,
files=files,
reply_language=reply_language,
chatbot=chatbot
)
yield chatbot + [(fake_inputs, "")], status_text
if (
self.need_api_key and
self.api_key is None
and not shared.state.multi_api_key
):
status_text = STANDARD_ERROR_MSG + NO_APIKEY_MSG
logger.info(status_text)
chatbot.append((inputs, ""))
if len(self.history) == 0:
self.history.append(construct_user(inputs))
self.history.append("")
self.all_token_counts.append(0)
else:
self.history[-2] = construct_user(inputs)
yield chatbot + [(inputs, "")], status_text
return
elif len(inputs.strip()) == 0:
status_text = STANDARD_ERROR_MSG + NO_INPUT_MSG
logger.info(status_text)
yield chatbot + [(inputs, "")], status_text
return
if self.single_turn:
self.history = []
self.all_token_counts = []
if type(inputs) == list:
self.history.append(inputs)
else:
self.history.append(construct_user(inputs))
try:
if stream:
logger.debug("ไฝฟ็จๆตๅผไผ ่พ")
iter = self.stream_next_chatbot(
inputs,
chatbot,
fake_input=fake_inputs,
display_append=display_append,
)
for chatbot, status_text in iter:
yield chatbot, status_text
else:
logger.debug("ไธไฝฟ็จๆตๅผไผ ่พ")
chatbot, status_text = self.next_chatbot_at_once(
inputs,
chatbot,
fake_input=fake_inputs,
display_append=display_append,
)
yield chatbot, status_text
except Exception as e:
traceback.print_exc()
status_text = STANDARD_ERROR_MSG + str(e)
yield chatbot, status_text
if len(self.history) > 1 and self.history[-1]["content"] != inputs:
logger.info("ๅ็ญไธบ๏ผ" + f"{self.history[-1]['content']}")
if limited_context:
self.history = []
self.all_token_counts = []
max_token = self.token_upper_limit - TOKEN_OFFSET
if sum(self.all_token_counts) > max_token and should_check_token_count:
count = 0
while (
sum(self.all_token_counts)
> self.token_upper_limit * REDUCE_TOKEN_FACTOR
and sum(self.all_token_counts) > 0
):
count += 1
del self.all_token_counts[0]
del self.history[:2]
logger.info(status_text)
status_text = f"ไธบไบ้ฒๆญขtoken่ถ
้๏ผๆจกๅๅฟ่ฎฐไบๆฉๆ็ {count} ่ฝฎๅฏน่ฏ"
yield chatbot, status_text
def retry(
self,
chatbot,
stream=False,
use_websearch=False,
files=None,
reply_language="ไธญๆ",
):
logger.debug("้่ฏไธญโฆโฆ")
if len(self.history) > 1:
inputs = self.history[-2]["content"]
del self.history[-2:]
if len(self.all_token_counts) > 0:
self.all_token_counts.pop()
elif len(chatbot) > 0:
inputs = chatbot[-1][0]
if '<div class="user-message">' in inputs:
inputs = inputs.split('<div class="user-message">')[1]
inputs = inputs.split("</div>")[0]
elif len(self.history) == 1:
inputs = self.history[-1]["content"]
del self.history[-1]
else:
yield chatbot, f"{STANDARD_ERROR_MSG}ไธไธๆๆฏ็ฉบ็"
return
iter = self.predict(
inputs,
chatbot,
stream=stream,
use_websearch=use_websearch,
files=files,
reply_language=reply_language,
)
for x in iter:
yield x
logger.debug("้่ฏๅฎๆฏ")
def interrupt(self):
self.interrupted = True
def recover(self):
self.interrupted = False
def set_token_upper_limit(self, new_upper_limit):
self.token_upper_limit = new_upper_limit
logger.info(f"tokenไธ้่ฎพ็ฝฎไธบ{new_upper_limit}")
self.auto_save()
def set_temperature(self, new_temperature):
self.temperature = new_temperature
self.auto_save()
def set_top_p(self, new_top_p):
self.top_p = new_top_p
self.auto_save()
def set_n_choices(self, new_n_choices):
self.n_choices = new_n_choices
self.auto_save()
def set_stop_sequence(self, new_stop_sequence: str):
new_stop_sequence = new_stop_sequence.split(",")
self.stop_sequence = new_stop_sequence
self.auto_save()
def set_max_tokens(self, new_max_tokens):
self.max_generation_token = new_max_tokens
self.auto_save()
def set_presence_penalty(self, new_presence_penalty):
self.presence_penalty = new_presence_penalty
self.auto_save()
def set_frequency_penalty(self, new_frequency_penalty):
self.frequency_penalty = new_frequency_penalty
self.auto_save()
def set_logit_bias(self, logit_bias):
self.logit_bias = logit_bias
self.auto_save()
def encoded_logit_bias(self):
if self.logit_bias is None:
return {}
logit_bias = self.logit_bias.split()
bias_map = {}
encoding = tiktoken.get_encoding("cl100k_base")
for line in logit_bias:
word, bias_amount = line.split(":")
if word:
for token in encoding.encode(word):
bias_map[token] = float(bias_amount)
return bias_map
def set_user_identifier(self, new_user_identifier):
self.user_identifier = new_user_identifier
self.auto_save()
def set_system_prompt(self, new_system_prompt):
self.system_prompt = new_system_prompt
self.auto_save()
def set_key(self, new_access_key):
self.api_key = new_access_key.strip()
msg = i18n("APIๅฏ้ฅๆดๆนไธบไบ") + hide_middle_chars(self.api_key)
logger.info(msg)
return self.api_key, msg
def set_single_turn(self, new_single_turn):
self.single_turn = new_single_turn
self.auto_save()
def reset(self, remain_system_prompt=False):
self.history = []
self.all_token_counts = []
self.interrupted = False
self.history_file_path = new_auto_history_filename(self.user_name)
history_name = self.history_file_path[:-5]
choices = [history_name] + get_history_names(self.user_name)
system_prompt = self.system_prompt if remain_system_prompt else ""
self.single_turn = self.default_single_turn
self.temperature = self.default_temperature
self.top_p = self.default_top_p
self.n_choices = self.default_n_choices
self.stop_sequence = self.default_stop_sequence
self.max_generation_token = self.default_max_generation_token
self.presence_penalty = self.default_presence_penalty
self.frequency_penalty = self.default_frequency_penalty
self.logit_bias = self.default_logit_bias
self.user_identifier = self.default_user_identifier
return (
[],
self.token_message([0]),
gr.Radio.update(choices=choices, value=history_name),
system_prompt,
self.single_turn,
self.temperature,
self.top_p,
self.n_choices,
self.stop_sequence,
self.token_upper_limit,
self.max_generation_token,
self.presence_penalty,
self.frequency_penalty,
self.logit_bias,
self.user_identifier,
)
def delete_first_conversation(self):
if self.history:
del self.history[:2]
del self.all_token_counts[0]
return self.token_message()
def delete_last_conversation(self, chatbot):
if len(chatbot) > 0 and STANDARD_ERROR_MSG in chatbot[-1][1]:
msg = "็ฑไบๅ
ๅซๆฅ้ไฟกๆฏ๏ผๅชๅ ้คchatbot่ฎฐๅฝ"
chatbot = chatbot[:-1]
return chatbot, self.history
if len(self.history) > 0:
self.history = self.history[:-2]
if len(chatbot) > 0:
msg = "ๅ ้คไบไธ็ปchatbotๅฏน่ฏ"
chatbot = chatbot[:-1]
if len(self.all_token_counts) > 0:
msg = "ๅ ้คไบไธ็ปๅฏน่ฏ็token่ฎกๆฐ่ฎฐๅฝ"
self.all_token_counts.pop()
msg = "ๅ ้คไบไธ็ปๅฏน่ฏ"
self.chatbot = chatbot
self.auto_save(chatbot)
return chatbot, msg
def token_message(self, token_lst=None):
if token_lst is None:
token_lst = self.all_token_counts
token_sum = 0
for i in range(len(token_lst)):
token_sum += sum(token_lst[: i + 1])
return (
i18n("Token ่ฎกๆฐ: ")
+ f"{sum(token_lst)}"
+ i18n("๏ผๆฌๆฌกๅฏน่ฏ็ดฏ่ฎกๆถ่ไบ ")
+ f"{token_sum} tokens"
)
def rename_chat_history(self, filename, chatbot):
if filename == "":
return gr.update()
if not filename.endswith(".json"):
filename += ".json"
self.delete_chat_history(self.history_file_path)
# ๅฝๅ้ๅคๆฃๆต
repeat_file_index = 2
full_path = os.path.join(HISTORY_DIR, self.user_name, filename)
while os.path.exists(full_path):
full_path = os.path.join(
HISTORY_DIR, self.user_name, f"{repeat_file_index}_{filename}"
)
repeat_file_index += 1
filename = os.path.basename(full_path)
self.history_file_path = filename
save_file(filename, self, chatbot)
return init_history_list(self.user_name)
def auto_name_chat_history(
self, name_chat_method, user_question, chatbot, single_turn_checkbox
):
if len(self.history) == 2 and not single_turn_checkbox:
user_question = self.history[0]["content"]
if type(user_question) == list:
user_question = user_question[0]["text"]
filename = replace_special_symbols(user_question)[:16] + ".json"
return self.rename_chat_history(filename, chatbot)
else:
return gr.update()
def auto_save(self, chatbot=None):
if chatbot is None:
chatbot = self.chatbot
save_file(self.history_file_path, self, chatbot)
def export_markdown(self, filename, chatbot):
if filename == "":
return
if not filename.endswith(".md"):
filename += ".md"
save_file(filename, self, chatbot)
def load_chat_history(self, new_history_file_path=None):
logger.debug(f"{self.user_name} ๅ ่ฝฝๅฏน่ฏๅๅฒไธญโฆโฆ")
if new_history_file_path is not None:
if type(new_history_file_path) != str:
# copy file from new_history_file_path.name to os.path.join(HISTORY_DIR, self.user_name)
new_history_file_path = new_history_file_path.name
shutil.copyfile(
new_history_file_path,
os.path.join(
HISTORY_DIR,
self.user_name,
os.path.basename(new_history_file_path),
),
)
self.history_file_path = os.path.basename(new_history_file_path)
else:
self.history_file_path = new_history_file_path
try:
if self.history_file_path == os.path.basename(self.history_file_path):
history_file_path = os.path.join(
HISTORY_DIR, self.user_name, self.history_file_path
)
else:
history_file_path = self.history_file_path
if not self.history_file_path.endswith(".json"):
history_file_path += ".json"
saved_json = {}
if os.path.exists(history_file_path):
with open(history_file_path, "r", encoding="utf-8") as f:
saved_json = json.load(f)
try:
if type(saved_json["history"][0]) == str:
logger.info("ๅๅฒ่ฎฐๅฝๆ ผๅผไธบๆง็๏ผๆญฃๅจ่ฝฌๆขโฆโฆ")
new_history = []
for index, item in enumerate(saved_json["history"]):
if index % 2 == 0:
new_history.append(construct_user(item))
else:
new_history.append(construct_assistant(item))
saved_json["history"] = new_history
logger.info(new_history)
except:
pass
if len(saved_json["chatbot"]) < len(saved_json["history"]) // 2:
logger.info("Trimming corrupted history...")
saved_json["history"] = saved_json["history"][-len(saved_json["chatbot"]):]
logger.info(f"Trimmed history: {saved_json['history']}")
logger.debug(f"{self.user_name} ๅ ่ฝฝๅฏน่ฏๅๅฒๅฎๆฏ")
self.history = saved_json["history"]
self.single_turn = saved_json.get("single_turn", self.single_turn)
self.temperature = saved_json.get("temperature", self.temperature)
self.top_p = saved_json.get("top_p", self.top_p)
self.n_choices = saved_json.get("n_choices", self.n_choices)
self.stop_sequence = list(saved_json.get("stop_sequence", self.stop_sequence))
self.token_upper_limit = saved_json.get(
"token_upper_limit", self.token_upper_limit
)
self.max_generation_token = saved_json.get(
"max_generation_token", self.max_generation_token
)
self.presence_penalty = saved_json.get(
"presence_penalty", self.presence_penalty
)
self.frequency_penalty = saved_json.get(
"frequency_penalty", self.frequency_penalty
)
self.logit_bias = saved_json.get("logit_bias", self.logit_bias)
self.user_identifier = saved_json.get("user_identifier", self.user_name)
self.metadata = saved_json.get("metadata", self.metadata)
self.chatbot = saved_json["chatbot"]
return (
os.path.basename(self.history_file_path)[:-5],
saved_json["system"],
saved_json["chatbot"],
self.single_turn,
self.temperature,
self.top_p,
self.n_choices,
",".join(self.stop_sequence),
self.token_upper_limit,
self.max_generation_token,
self.presence_penalty,
self.frequency_penalty,
self.logit_bias,
self.user_identifier,
)
except:
# ๆฒกๆๅฏน่ฏๅๅฒๆ่
ๅฏน่ฏๅๅฒ่งฃๆๅคฑ่ดฅ
logger.info(f"ๆฒกๆๆพๅฐๅฏน่ฏๅๅฒ่ฎฐๅฝ {self.history_file_path}")
self.reset()
return (
os.path.basename(self.history_file_path),
"",
[],
self.single_turn,
self.temperature,
self.top_p,
self.n_choices,
",".join(self.stop_sequence),
self.token_upper_limit,
self.max_generation_token,
self.presence_penalty,
self.frequency_penalty,
self.logit_bias,
self.user_identifier,
)
def delete_chat_history(self, filename):
if filename == "CANCELED":
return gr.update(), gr.update(), gr.update()
if filename == "":
return i18n("ไฝ ๆฒกๆ้ๆฉไปปไฝๅฏน่ฏๅๅฒ"), gr.update(), gr.update()
if not filename.endswith(".json"):
filename += ".json"
if filename == os.path.basename(filename):
history_file_path = os.path.join(HISTORY_DIR, self.user_name, filename)
else:
history_file_path = filename
md_history_file_path = history_file_path[:-5] + ".md"
try:
os.remove(history_file_path)
os.remove(md_history_file_path)
return i18n("ๅ ้คๅฏน่ฏๅๅฒๆๅ"), get_history_list(self.user_name), []
except:
logger.info(f"ๅ ้คๅฏน่ฏๅๅฒๅคฑ่ดฅ {history_file_path}")
return (
i18n("ๅฏน่ฏๅๅฒ") + filename + i18n("ๅทฒ็ป่ขซๅ ้คๅฆ"),
get_history_list(self.user_name),
[],
)
def auto_load(self):
filepath = get_history_filepath(self.user_name)
if not filepath:
self.history_file_path = new_auto_history_filename(self.user_name)
else:
self.history_file_path = filepath
return self.load_chat_history()
def like(self):
"""like the last response, implement if needed"""
return gr.update()
def dislike(self):
"""dislike the last response, implement if needed"""
return gr.update()
def deinitialize(self):
"""deinitialize the model, implement if needed"""
pass
| [] |
2024-01-10 | shibing624/chatgpt-webui | src~pdf_func.py | from types import SimpleNamespace
import pdfplumber
from langchain.docstore.document import Document
def prepare_table_config(crop_page):
"""Prepare tableๆฅๆพ่พน็, ่ฆๆฑpageไธบๅๅงpage
From https://github.com/jsvine/pdfplumber/issues/242
"""
page = crop_page.root_page # root/parent
cs = page.curves + page.edges
def curves_to_edges():
"""See https://github.com/jsvine/pdfplumber/issues/127"""
edges = []
for c in cs:
edges += pdfplumber.utils.rect_to_edges(c)
return edges
edges = curves_to_edges()
return {
"vertical_strategy": "explicit",
"horizontal_strategy": "explicit",
"explicit_vertical_lines": edges,
"explicit_horizontal_lines": edges,
"intersection_y_tolerance": 10,
}
def get_text_outside_table(crop_page):
ts = prepare_table_config(crop_page)
if len(ts["explicit_vertical_lines"]) == 0 or len(ts["explicit_horizontal_lines"]) == 0:
return crop_page
### Get the bounding boxes of the tables on the page.
bboxes = [table.bbox for table in crop_page.root_page.find_tables(table_settings=ts)]
def not_within_bboxes(obj):
"""Check if the object is in any of the table's bbox."""
def obj_in_bbox(_bbox):
"""See https://github.com/jsvine/pdfplumber/blob/stable/pdfplumber/table.py#L404"""
v_mid = (obj["top"] + obj["bottom"]) / 2
h_mid = (obj["x0"] + obj["x1"]) / 2
x0, top, x1, bottom = _bbox
return (h_mid >= x0) and (h_mid < x1) and (v_mid >= top) and (v_mid < bottom)
return not any(obj_in_bbox(__bbox) for __bbox in bboxes)
return crop_page.filter(not_within_bboxes)
# ่ฏทไฝฟ็จ LaTeX ่กจ่พพๅ
ฌๅผ๏ผ่กๅ
ๅ
ฌๅผไปฅ $ ๅ
่ฃน๏ผ่ก้ดๅ
ฌๅผไปฅ $$ ๅ
่ฃน
extract_words = lambda page: page.extract_words(keep_blank_chars=True, y_tolerance=0, x_tolerance=1,
extra_attrs=["fontname", "size", "object_type"])
# dict_keys(['text', 'x0', 'x1', 'top', 'doctop', 'bottom', 'upright', 'direction', 'fontname', 'size'])
def get_title_with_cropped_page(first_page):
title = [] # ๅค็ๆ ้ข
x0, top, x1, bottom = first_page.bbox # ่ทๅ้กต้ข่พนๆก
for word in extract_words(first_page):
word = SimpleNamespace(**word)
if word.size >= 14:
title.append(word.text)
title_bottom = word.bottom
elif word.text == "Abstract": # ่ทๅ้กต้ขabstract
top = word.top
user_info = [i["text"] for i in extract_words(first_page.within_bbox((x0, title_bottom, x1, top)))]
# ่ฃๅชๆไธๅ้จๅ, within_bbox: full_included; crop: partial_included
return title, user_info, first_page.within_bbox((x0, top, x1, bottom))
def get_column_cropped_pages(pages, two_column=True):
new_pages = []
for page in pages:
if two_column:
left = page.within_bbox((0, 0, page.width / 2, page.height), relative=True)
right = page.within_bbox((page.width / 2, 0, page.width, page.height), relative=True)
new_pages.append(left)
new_pages.append(right)
else:
new_pages.append(page)
return new_pages
def parse_pdf(filename, two_column=True):
with pdfplumber.open(filename) as pdf:
title, user_info, first_page = get_title_with_cropped_page(pdf.pages[0])
new_pages = get_column_cropped_pages([first_page] + pdf.pages[1:], two_column)
chapters = []
# tuple (chapter_name, [pageid] (start,stop), chapter_text)
create_chapter = lambda page_start, name_top, name_bottom: SimpleNamespace(
name=[],
name_top=name_top,
name_bottom=name_bottom,
record_chapter_name=True,
page_start=page_start,
page_stop=None,
text=[],
)
cur_chapter = None
# ๆ้กต้ๅPDFๆๆกฃ
for idx, page in enumerate(new_pages):
page = get_text_outside_table(page)
# ๆ่ก้ๅ้กต้ขๆๆฌ
for word in extract_words(page):
word = SimpleNamespace(**word)
# ๆฃๆฅ่กๆๆฌๆฏๅฆไปฅ12ๅทๅญไฝๆๅฐ๏ผๅฆๆๆฏ๏ผๅๅฐๅ
ถไฝไธบๆฐ็ซ ่ๅผๅง
if word.size >= 11: # ๅบ็ฐchapter name
if cur_chapter is None:
cur_chapter = create_chapter(page.page_number, word.top, word.bottom)
elif not cur_chapter.record_chapter_name or (
cur_chapter.name_bottom != cur_chapter.name_bottom and cur_chapter.name_top != cur_chapter.name_top):
# ไธๅ็ปง็ปญๅchapter name
cur_chapter.page_stop = page.page_number # stop id
chapters.append(cur_chapter)
# ้็ฝฎๅฝๅchapterไฟกๆฏ
cur_chapter = create_chapter(page.page_number, word.top, word.bottom)
# print(word.size, word.top, word.bottom, word.text)
cur_chapter.name.append(word.text)
else:
cur_chapter.record_chapter_name = False # chapter name ็ปๆ
cur_chapter.text.append(word.text)
else:
# ๅค็ๆๅไธไธช็ซ ่
cur_chapter.page_stop = page.page_number # stop id
chapters.append(cur_chapter)
for i in chapters:
print(f"section: {i.name} pages:{i.page_start, i.page_stop} word-count:{len(i.text)}")
print(" ".join(i.text))
title = " ".join(title)
user_info = " ".join(user_info)
text = f"Article Title: {title}, Information:{user_info}\n"
for idx, chapter in enumerate(chapters):
chapter.name = " ".join(chapter.name)
text += f"The {idx}th Chapter {chapter.name}: " + " ".join(chapter.text) + "\n"
return Document(page_content=text, metadata={"title": title})
if __name__ == '__main__':
# Test code
z = parse_pdf("./build/test.pdf")
print(z["user_info"])
print(z["title"])
| [] |
2024-01-10 | shibing624/chatgpt-webui | src~shared.py | import os
import queue
from src.presets import OPENAI_API_BASE, CHAT_COMPLETION_URL, BALANCE_API_URL, USAGE_API_URL, API_HOST, IMAGES_COMPLETION_URL
class State:
interrupted = False
multi_api_key = False
chat_completion_url = CHAT_COMPLETION_URL
balance_api_url = BALANCE_API_URL
usage_api_url = USAGE_API_URL
openai_api_base = OPENAI_API_BASE
images_completion_url = IMAGES_COMPLETION_URL
def interrupt(self):
self.interrupted = True
def recover(self):
self.interrupted = False
def set_api_host(self, api_host: str):
api_host = api_host.rstrip("/")
if not api_host.startswith("http"):
api_host = f"https://{api_host}"
if api_host.endswith("/v1"):
api_host = api_host[:-3]
self.chat_completion_url = f"{api_host}/v1/chat/completions"
self.images_completion_url = f"{api_host}/v1/images/generations"
self.openai_api_base = f"{api_host}/v1"
self.balance_api_url = f"{api_host}/dashboard/billing/credit_grants"
self.usage_api_url = f"{api_host}/dashboard/billing/usage"
os.environ["OPENAI_API_BASE"] = api_host
def reset_api_host(self):
self.chat_completion_url = CHAT_COMPLETION_URL
self.images_completion_url = IMAGES_COMPLETION_URL
self.balance_api_url = BALANCE_API_URL
self.usage_api_url = USAGE_API_URL
os.environ["OPENAI_API_BASE"] = f"https://{API_HOST}"
return API_HOST
def reset_all(self):
self.interrupted = False
self.chat_completion_url = CHAT_COMPLETION_URL
def set_api_key_queue(self, api_key_list):
self.multi_api_key = True
self.api_key_queue = queue.Queue()
for api_key in api_key_list:
self.api_key_queue.put(api_key)
def switching_api_key(self, func):
if not hasattr(self, "api_key_queue"):
return func
def wrapped(*args, **kwargs):
api_key = self.api_key_queue.get()
args[0].api_key = api_key
ret = func(*args, **kwargs)
self.api_key_queue.put(api_key)
return ret
return wrapped
state = State()
| [] |
2024-01-10 | NLP-Suite/NLP-Suite | src~topic_modeling_gensim_util.py | #!/usr/bin/env Python
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 9 18:16:y_multiplier_integer 2020
@author: claude; completed by Wei Dai Spring 2021
"""
import sys
import GUI_util
import IO_libraries_util
if IO_libraries_util.install_all_Python_packages(GUI_util.window,"topic_modeling_gensim_util.py",['nltk','os','tkinter','pandas','gensim','spacy','pyLDAvis','matplotlib','logging','IPython'])==False:
sys.exit(0)
import os
import tkinter.messagebox as mb
import pandas as pd
from pprint import pprint
from sys import platform
#Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
# necessary to avoid having to do Ctrl+C to kill pyLDAvis to continue running the code
from _thread import start_new_thread
#spacy for lemmatization
import spacy
#plotting tools
import pyLDAvis
import pyLDAvis.gensim
import matplotlib
matplotlib.use('TkAgg') # may be necessary for your system
import matplotlib.pyplot as plt
#enable logging for gensim
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
import IO_files_util
import IO_user_interface_util
import charts_util
import reminders_util
#whether stopwordst were already downloaded can be tested, see stackoverflow
# https://stackoverflow.com/questions/23704510/how-do-i-test-whether-an-nltk-resource-is-already-installed-on-the-machine-runni
# see also caveats
# check stopwords
IO_libraries_util.import_nltk_resource(GUI_util.window,'corpora/stopwords','stopwords')
from nltk.corpus import stopwords
#https://spacy.io/usage/models OTHER LANGUAGES ARE AVAILABLE; CHECK WEBSITE!
try:
spacy.load('en_core_web_sm')
except:
if platform == 'darwin':
msg = '\n\nAt terminal, type sudo python -m spacy download en_core_web_sm'
if platform == 'win32':
msg = '\n\nClick on left-hand start icon in task bar' + \
'\n Scroll down to Anaconda' + \
'\n Click on the dropdown arrow to display available options' + \
'\n Right click on Anaconda Prompt' + \
'\n Click on More' + \
'\n Click on Run as Administrator' + \
'\n At the command prompt, Enter "conda activate NLP" (if NLP is your environment)' + \
'\n Then enter: "python -m spacy download en_core_web_sm" and Return'
msg = msg + '\n\nThis imports the package.'
mb.showerror(title='Library error', message='The Gensim tool could not find the English language spacy library. This needs to be installed. At command promp type:\npython -m spacy download en_core_web_sm\n\nYOU MAY HAVE TO RUN THE COMMAND AS ADMINISTRATOR.\n\nHOW DO YOU DO THAT?' + msg)
# mb.showerror(title='Library error', message='The Gensim Topic modeling tool could not find the English language spacy library. This needs to be installed. At command promp type:\npython -m spacy download en_core_web_sm\n\nYOU MAY HAVE TO RUN THE COMMAND AS ADMINISTRATOR.\n\nHOW DO YOU DO THAT?'
# '\n\nIn Mac, at terminal, type sudo python -m spacy download en_core_web_sm'
# '\n\nIn Windows, click on left-hand start icon in task bar'
# '\n Scroll down to Anaconda'
# '\n Click on the dropdown arrow to display available options'
# '\n Right click on Anaconda Prompt'
# '\n Click on More'
# '\n Click on Run as Administrator'
# '\n At the command prompt, Enter "conda activate NLP" (if NLP is your environment)'
# '\n Then enter: "python -m spacy download en_core_web_sm" and Return'
# '\n\nThis imports the package.')
sys.exit(0)
# find the optimal number of topics for LDA
def compute_coherence_values(MalletDir, dictionary, corpus, texts, start, limit, step):
startTime=IO_user_interface_util.timed_alert(GUI_util.window,2000,'Analysis start', 'Started computing the coherence value for each topic')
coherence_values = []
model_list = []
for num_topics in range(start, limit, step):
startTime=IO_user_interface_util.timed_alert(GUI_util.window,2000,'Analysis start',
'Computing coherence value for topic number ' + str(num_topics))
model = gensim.models.wrappers.LdaMallet(MalletDir,corpus=corpus, num_topics=num_topics, id2word=dictionary)
model_list.append(model)
coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='c_v')
coherence_values.append(coherencemodel.get_coherence())
IO_user_interface_util.timed_alert(GUI_util.window,2000,'Analysis end', 'Finished computing the coherence value for each topic')
return model_list, coherence_values
# Finding the Dominance Topic in each sentence
def format_topics_sentences(ldamodel, corpus, texts):
# Init output
sent_topics_df = pd.DataFrame()
# Get main topic in each document
for i, row in enumerate(ldamodel[corpus]):
row = sorted(row, key=lambda x: (x[1]), reverse=True)
# Get the Dominant topic, Perc Contribution and Keywords for each document
for j, (topic_num, prop_topic) in enumerate(row):
if j == 0: # => topic
wp = ldamodel.show_topic(topic_num)
topic_keywords = ", ".join([word for word, prop in wp])
sent_topics_df = sent_topics_df.append(pd.Series([int(topic_num), round(prop_topic,4), topic_keywords]), ignore_index=True)
else:
break
sent_topics_df.columns = ['Dominant topic', '% contribution', 'Topic keywords']
# Add original text to the end of the output
# print("Type of texts: ",type(texts))
contents = pd.Series(texts)
sent_topics_df = pd.concat([sent_topics_df, contents], axis=1)
return sent_topics_df
def malletModelling(MalletDir, outputDir, createCharts, corpus,num_topics, id2word,data_lemmatized, lda_model, data):
startTime=IO_user_interface_util.timed_alert(GUI_util.window,2000,'Analysis start', 'Started running Mallet LDA topic modeling at',True)
config_filename='topic_modeling_gensim_config.csv'
try:
ldamallet = gensim.models.wrappers.LdaMallet(MalletDir, corpus=corpus, num_topics=num_topics, id2word=id2word)
except:
head, scriptName = os.path.split(os.path.basename(__file__))
routine_options = reminders_util.getReminders_list(scriptName)
reminders_util.checkReminder(scriptName,
reminders_util.title_options_gensim_release,
reminders_util.message_gensim_release,
True)
routine_options = reminders_util.getReminders_list(scriptName)
return
# Show Topics
pprint(ldamallet.show_topics(formatted=False))
if num_topics>40:
limit=40
else:
limit = num_topics
# Compute Coherence value
coherence_model_ldamallet = CoherenceModel(model=ldamallet, texts=data_lemmatized, dictionary=id2word, coherence='c_v')
startTime=IO_user_interface_util.timed_alert(GUI_util.window,2000,'Analysis start', 'Compute Mallet LDA coherence values for each topic.\n\nPlease, be patient...')
coherence_ldamallet = coherence_model_ldamallet.get_coherence()
print('\nCoherence value: ', coherence_ldamallet)
model_list, coherence_values = compute_coherence_values(MalletDir, dictionary=id2word, corpus=corpus, texts=data_lemmatized, start=2, limit=limit, step=6)
startTime=IO_user_interface_util.timed_alert(GUI_util.window,2000,'Analysis start', 'Compute graph of optimal topics number.')
limit=limit; start=2; step=6;
x = range(start, limit, step)
plt.plot(x, coherence_values)
plt.xlabel("Number of topics")
plt.ylabel("Coherence value")
plt.legend(("coherence_values"), loc='best')
# plt.show()
fileName=os.path.join(outputDir, "NLP_Gensim_optimal_topics_number.jpg")
plt.savefig(fileName)
filesToOpen.append(fileName)
# Print the coherence values
optimal_coherence = float("-inf")
index = 0
optimal_index = -1
for m, cv in zip(x, coherence_values):
coherence_value = round(cv, 4)
if coherence_value > optimal_coherence:
optimal_index = index
print("Topic number", m, "has coherence value ", coherence_value)
index += 1
# Select the model and print the topics
optimal_model = model_list[optimal_index]
model_topics = optimal_model.show_topics(formatted=False)
pprint(optimal_model.print_topics(num_words=10))
# When you include a corpus that has \n symbol, the \n symbol, as stated by the csv standard, is treated as a "start a new row" symbol.
# As a result, the corpus text takes up many rows and corrupts the csv file.
# Note that \n symbol does not correspond to a new sentence. It is just a line breaker that makes the text easier to read.
text: str
for index, text in enumerate(data):
data[index] = text.replace('\n', ' ')
df_topic_sents_keywords = format_topics_sentences(ldamodel=optimal_model, corpus=corpus, texts=data)
# Format
df_dominant_topic = df_topic_sents_keywords.reset_index()
df_dominant_topic.columns = ['Document ID','Dominant topic', 'Topic % contribution', 'Topic keywords', 'Text']
# Save csv file
fileName=os.path.join(outputDir, "NLP_Gensim_dominant_topic.csv")
df_dominant_topic.to_csv(fileName, encoding='utf-8', index=False)
filesToOpen.append(fileName)
# columns_to_be_plotted_xAxis=[], columns_to_be_plotted_yAxis=[[1, 3]]
# hover_label = 'Topic_Keywords'
# inputFilename = fileName
# outputFiles = charts_util.run_all(columns_to_be_plotted, inputFilename, outputDir,
# outputFileLabel='TM_Gensim',
# chart_type_list=["bar"],
# chart_title='Number of Documents per Topic',
# column_xAxis_label_var='Topic number',
# hover_info_column_list=hover_label)
#
# if outputFiles!=None:
# filesToOpen.append(chart_outputFilename)
# Find the most representative document for each topic
# Group top 5 sentences under each topic
sent_topics_sorteddf_mallet = pd.DataFrame()
sent_topics_outdf_grpd = df_topic_sents_keywords.groupby('Dominant topic')
for i, grp in sent_topics_outdf_grpd:
sent_topics_sorteddf_mallet = pd.concat([sent_topics_sorteddf_mallet,
grp.sort_values(['% contribution'], ascending=[0]).head(1)],
axis=0)
# Reset Index
sent_topics_sorteddf_mallet.reset_index(drop=True, inplace=True)
# Format
sent_topics_sorteddf_mallet.columns = ['Topic number', "Topic % contribution", "Topic keywords", "Text"]
# Save csv file
fileName=os.path.join(outputDir, "NLP_Gensim_representative_document.csv")
sent_topics_sorteddf_mallet.to_csv(fileName,encoding='utf-8', index=False)
filesToOpen.append(fileName)
columns_to_be_plotted_xAxis=[]
columns_to_be_plotted_yAxis=[[1, 2]]
hover_label = 'Topic keywords'
inputFilename = fileName
outputFiles = charts_util.run_all(columns_to_be_plotted_yAxis, inputFilename, outputDir,
outputFileLabel='TM_Gensim',
chart_type_list=["bar"],
chart_title='Percentage Contribution of Each Topic',
column_xAxis_label_var='Topic number',
hover_info_column_list=hover_label)
if outputFiles!=None:
if isinstance(outputFiles, str):
filesToOpen.append(outputFiles)
else:
filesToOpen.extend(outputFiles)
# Topic distribution across documents
# Number of Documents for Each Topic
topic_counts = df_topic_sents_keywords['Dominant topic'].value_counts()
print("Topic counts: ")
print(topic_counts)
print("Type of topic count: ")
print(type(topic_counts))
print()
# Percentage of Documents for Each Topic
topic_contribution = round(topic_counts/topic_counts.sum(), 4)
print("Topic contribution: ")
print(topic_contribution)
print("Type of topic contribution: ")
print(type(topic_contribution))
print()
# Topic Number and Keywords
topic_num_keywords = df_topic_sents_keywords[['Dominant topic', 'Topic keywords']]
# Concatenate Column wise
# df_dominant_topics = pd.concat([topic_num_keywords, topic_counts, topic_contribution], axis=1)
# Change Column names
df_dominant_topics = topic_num_keywords
num_row = df_dominant_topics.shape[0]
topic_order_list = df_dominant_topics["Dominant topic"]
# print(topic_order_list)
# print(type(topic_order_list))
num_docs = []
perc_documents = []
# print()
for i in range(num_row):
topic = topic_order_list.get(i)
num_docs.append(topic_counts.get(topic))
perc_documents.append(topic_contribution.get(topic))
df_dominant_topics["Number of documents"] = num_docs
df_dominant_topics["% documents"] = perc_documents
df_dominant_topics.columns = ['Dominant topic', 'Topic keywords', 'Number of documents', '% documents']
# dominant_topics seems to create duplicate records; the .drop_duplicates() method will solve the problem
df_dominant_topics.columns = ['Dominant topic', 'Topic keywords', 'Number of documents', '% documents']
df_dominant_topics = df_dominant_topics.drop_duplicates()
print("Number of rows of topic_distribution.csv: ", df_dominant_topics.shape[0])
print("Number of columns of topic_distribution.csv: ", df_dominant_topics.shape[1])
# Save csv file
fileName=os.path.join(outputDir, "NLP_Gensim_topic_distribution.csv")
df_dominant_topics.to_csv(fileName, encoding='utf-8', index=False)
filesToOpen.append(fileName)
# columns_to_be_plotted_xAxis=[], columns_to_be_plotted_yAxis=[[1, 2]]
# hover_label = 'Topic keywords'
# inputFilename = fileName
# outputFiles = charts_util.run_all(columns_to_be_plotted, inputFilename, outputDir,
# outputFileLabel='TM_Gensim',
# chart_type_list=["bar"],
# chart_title='Percentage Contribution of Each Topic',
# column_xAxis_label_var='Topic number',
# hover_info_column_list=hover_label)
#
# if outputFiles!=None:
# filesToOpen.append(chart_outputFilename)
IO_user_interface_util.timed_alert(GUI_util.window,2000,'Analysis end', 'Finished running Mallet LDA topic modeling at',True, '', True, startTime)
def run_Gensim(window, inputDir, outputDir, config_filename, num_topics, remove_stopwords_var,
lemmatize, nounsOnly, run_Mallet, openOutputFiles,createCharts, chartPackage):
global filesToOpen
filesToOpen=[]
if pd.__version__[0]=='2':
mb.showwarning(title='Warning',
message='Gensim is incompatible with a version of pandas higher than 2.0\n\nIn command line, please, pip unistall pandas and pip install pandas==1.5.2.\n\nMake sure you are in the right NLP environment by typing conda activate NLP')
return
numFiles = IO_files_util.GetNumberOfDocumentsInDirectory(inputDir, 'txt')
if numFiles == 0:
mb.showerror(title='Number of files error',
message='The selected input directory does NOT contain any file of txt type.\n\nPlease, select a different directory and try again.')
return
elif numFiles == 1:
mb.showerror(title='Number of files error', message='The selected input directory contains only ' + str(
numFiles) + ' file of txt type.\n\nTopic modeling requires a large number of files to produce valid results. That is true even if the available file contains several different documents morged together.')
return
elif numFiles < 50:
result = mb.askyesno(title='Number of files', message='The selected input directory contains only ' + str(
numFiles) + ' files of txt type.\n\nTopic modeling requires a large number of files (in the hundreds at least; read TIPS file) to produce valid results.\n\nAre you sure you want to continue?',
default='no')
if result == False:
return
startTime=IO_user_interface_util.timed_alert(GUI_util.window, 4000, 'Analysis start',
'Started running Gensim Topic modeling at ', True,
"Depending upon corpus size, computations may take a while... Please, be patient...")
outputFilename = IO_files_util.generate_output_file_name('', inputDir, outputDir, '.html', 'Gensim_topic_modeling')
content = []
inputDocs = IO_files_util.getFileList('', inputDir, fileType='.txt', silent=False,
configFileName=config_filename)
nFile = len(inputDocs)
if nFile == 0:
return
for fileName in inputDocs:
if fileName.endswith('.txt'):
with open(os.path.join(inputDir, fileName), 'r', encoding='utf-8', errors='ignore') as file:
content.append(file.read())
file.close()
# TODO: read in the article title in stead of an arbitrary number (1 here)
raw_data = {"title": 1, "content": content}
df = pd.DataFrame(data=raw_data)
data = df.content.values.tolist()
stop_words = stopwords.words('english')
# TODO: (optional) add more stop words that are common but unncesseary for topic modeling
# stop_words.append(['','']
# stop_words = stopwords.words('english')
stop_words.append(['from', 'subject', 're', 'edu', 'use'])
# TODO: import data
# tokenize, clean up, deacc true is removing the punctuation
def sent_to_words(sentences):
for sentence in sentences:
yield (gensim.utils.simple_preprocess(str(sentence), deacc=True))
data_words = list(sent_to_words(data))
# Build the bigram and trigram models
bigram = gensim.models.Phrases(data_words, min_count=5, threshold=100) # higher threshold fewer phrases.
trigram = gensim.models.Phrases(bigram[data_words], threshold=100)
# Faster way to get a sentence clubbed as a trigram/bigram
bigram_mod = gensim.models.phrases.Phraser(bigram)
trigram_mod = gensim.models.phrases.Phraser(trigram)
# See trigram example
# @print(trigram_mod[bigram_mod[data_words[0]]])
# Define functions for stopwords, bigrams, trigrams and lemmatization
def remove_stopwords(texts):
return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]
def make_bigrams(texts):
return [bigram_mod[doc] for doc in texts]
def make_trigrams(texts):
return [trigram_mod[bigram_mod[doc]] for doc in texts]
def lemmatization(texts, lemmatize=True, allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV']):
"""https://spacy.io/api/annotation"""
texts_out = []
for sent in texts:
doc = nlp(" ".join(sent))
if lemmatize:
texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags])
else:
texts_out.append([token for token in doc if token.pos_ in allowed_postags])
return texts_out
# Remove Stop Words
if remove_stopwords_var == True:
data_words_nostops = remove_stopwords(data_words)
else:
data_words_nostops = data_words
# Form Bigrams
data_words_bigrams = make_bigrams(data_words_nostops)
# Initialize spacy 'en_core_web_sm' model, keeping only tagger component (for efficiency)
# Python -m spacy download en
nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
if lemmatize:
if nounsOnly == True:
# Do lemmatization keeping only noun
data_lemmatized = lemmatization(data_words_bigrams, lemmatize, allowed_postags=['NOUN'])
else:
# Do lemmatization keeping only noun, adj, vb, adv
data_lemmatized = lemmatization(data_words_bigrams, lemmatize,
allowed_postags=['NOUN', 'ADJ', 'VERB', 'ADV'])
else:
data_lemmatized = data_words_bigrams
# @print(data_lemmatized[:1])
# Create Dictionary
id2word = corpora.Dictionary(data_lemmatized)
# Create Corpus
texts = data_lemmatized
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
# View
# @print(corpus[:1])
########################################
# good code on various parameters
# https://www.machinelearningplus.com/nlp/topic-modeling-gensim-python/
# Build LDA model
lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
id2word=id2word,
num_topics=num_topics,
random_state=100,
update_every=1,
chunksize=100,
passes=10,
alpha='auto',
per_word_topics=True)
# Compute Perplexity; a measure of how good the model is. lower the better.
print('\nPerplexity Score: ', lda_model.log_perplexity(corpus))
# TODO the coherence lines produce an error
# Compute Coherence Score
# coherence_model_lda = CoherenceModel(model=lda_model, texts=data_lemmatized, dictionary=id2word, coherence='c_v')
# coherence_lda = coherence_model_lda.get_coherence()
# print('\nCoherence Score: ', coherence_lda)
# Print the Keywords in the topics
# TODO visualize most relevant topics in Excel bar charts, with hover over of the words in each topic
# step 13 of website
# print("\n")
# print("List of keywords and their weights for each of the " + str(num_topics) + " topics analyzed:")
# print("\n")
pprint(lda_model.print_topics())
# print("Type of lda_model.print_topics(): ", type(lda_model.print_topics()))
# print("\n\n")
doc_lda = lda_model[corpus]
# print("Type of doc_lda: ", type(doc_lda))
# visualize and generate html
# step 15 in website
# https://stackoverflow.com/questions/46379763/typeerror-object-of-type-complex-is-not-json-serializable-while-using-pyldavi
# Roberto added , mds='mmds' to avoid the error described above
vis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word, mds='mmds')
pyLDAvis.prepared_data_to_html(vis)
try:
pyLDAvis.save_html(vis, outputFilename)
except:
mb.showerror(title='Output html file error', message='Gensim failed to generate the html output file.')
return
IO_user_interface_util.timed_alert(GUI_util.window,2000,'Analysis end', 'Finished running Gensim topic modeling at',True,'\n\nThe file ' + outputFilename + ' was created. The results will display shortly on the web browser.')
# \n\nYou now need to exit the server.\n\nAt command prompt, enter Ctrl+C, perhaps repeatedly, to exit the server.'
# open and display on web
def show_web(vis):
pyLDAvis.display(vis)
pyLDAvis.show(vis)
pyLDAvis.kill()
# necessary to avoid having to do Ctrl+C to kill pyLDAvis to continue running the code
start_new_thread(show_web, (vis,))
if run_Mallet==True:
# check that the CoreNLPdir as been setup
MalletDir, existing_software_config, errorFound = IO_libraries_util.external_software_install(
'topic_modeling_gensim_util',
'MALLET',
'',
silent=False, errorFound=False)
if MalletDir==None or MalletDir=='':
return
MalletDir = os.path.join(MalletDir, "bin/mallet")
# building LDA Mallet Model
malletModelling(MalletDir, outputDir, createCharts, corpus, num_topics, id2word, data_lemmatized,
lda_model, data)
if openOutputFiles==True:
IO_files_util.OpenOutputFiles(GUI_util.window, openOutputFiles, filesToOpen, outputDir)
filesToOpen=[] # to avoid opening files twice, here and in calling function
return filesToOpen
| [] |
2024-01-10 | Ravi-Teja-konda/OSGPT | myenv~Lib~site-packages~charset_normalizer~cd.py | import importlib
from codecs import IncrementalDecoder
from collections import Counter
from functools import lru_cache
from typing import Counter as TypeCounter, Dict, List, Optional, Tuple
from .assets import FREQUENCIES
from .constant import KO_NAMES, LANGUAGE_SUPPORTED_COUNT, TOO_SMALL_SEQUENCE, ZH_NAMES
from .md import is_suspiciously_successive_range
from .models import CoherenceMatches
from .utils import (
is_accentuated,
is_latin,
is_multi_byte_encoding,
is_unicode_range_secondary,
unicode_range,
)
def encoding_unicode_range(iana_name: str) -> List[str]:
"""
Return associated unicode ranges in a single byte code page.
"""
if is_multi_byte_encoding(iana_name):
raise IOError("Function not supported on multi-byte code page")
decoder = importlib.import_module(
"encodings.{}".format(iana_name)
).IncrementalDecoder
p: IncrementalDecoder = decoder(errors="ignore")
seen_ranges: Dict[str, int] = {}
character_count: int = 0
for i in range(0x40, 0xFF):
chunk: str = p.decode(bytes([i]))
if chunk:
character_range: Optional[str] = unicode_range(chunk)
if character_range is None:
continue
if is_unicode_range_secondary(character_range) is False:
if character_range not in seen_ranges:
seen_ranges[character_range] = 0
seen_ranges[character_range] += 1
character_count += 1
return sorted(
[
character_range
for character_range in seen_ranges
if seen_ranges[character_range] / character_count >= 0.15
]
)
def unicode_range_languages(primary_range: str) -> List[str]:
"""
Return inferred languages used with a unicode range.
"""
languages: List[str] = []
for language, characters in FREQUENCIES.items():
for character in characters:
if unicode_range(character) == primary_range:
languages.append(language)
break
return languages
@lru_cache()
def encoding_languages(iana_name: str) -> List[str]:
"""
Single-byte encoding language association. Some code page are heavily linked to particular language(s).
This function does the correspondence.
"""
unicode_ranges: List[str] = encoding_unicode_range(iana_name)
primary_range: Optional[str] = None
for specified_range in unicode_ranges:
if "Latin" not in specified_range:
primary_range = specified_range
break
if primary_range is None:
return ["Latin Based"]
return unicode_range_languages(primary_range)
@lru_cache()
def mb_encoding_languages(iana_name: str) -> List[str]:
"""
Multi-byte encoding language association. Some code page are heavily linked to particular language(s).
This function does the correspondence.
"""
if (
iana_name.startswith("shift_")
or iana_name.startswith("iso2022_jp")
or iana_name.startswith("euc_j")
or iana_name == "cp932"
):
return ["Japanese"]
if iana_name.startswith("gb") or iana_name in ZH_NAMES:
return ["Chinese"]
if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES:
return ["Korean"]
return []
@lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT)
def get_target_features(language: str) -> Tuple[bool, bool]:
"""
Determine main aspects from a supported language if it contains accents and if is pure Latin.
"""
target_have_accents: bool = False
target_pure_latin: bool = True
for character in FREQUENCIES[language]:
if not target_have_accents and is_accentuated(character):
target_have_accents = True
if target_pure_latin and is_latin(character) is False:
target_pure_latin = False
return target_have_accents, target_pure_latin
def alphabet_languages(
characters: List[str], ignore_non_latin: bool = False
) -> List[str]:
"""
Return associated languages associated to given characters.
"""
languages: List[Tuple[str, float]] = []
source_have_accents = any(is_accentuated(character) for character in characters)
for language, language_characters in FREQUENCIES.items():
target_have_accents, target_pure_latin = get_target_features(language)
if ignore_non_latin and target_pure_latin is False:
continue
if target_have_accents is False and source_have_accents:
continue
character_count: int = len(language_characters)
character_match_count: int = len(
[c for c in language_characters if c in characters]
)
ratio: float = character_match_count / character_count
if ratio >= 0.2:
languages.append((language, ratio))
languages = sorted(languages, key=lambda x: x[1], reverse=True)
return [compatible_language[0] for compatible_language in languages]
def characters_popularity_compare(
language: str, ordered_characters: List[str]
) -> float:
"""
Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language.
The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit).
Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.)
"""
if language not in FREQUENCIES:
raise ValueError("{} not available".format(language))
character_approved_count: int = 0
FREQUENCIES_language_set = set(FREQUENCIES[language])
ordered_characters_count: int = len(ordered_characters)
target_language_characters_count: int = len(FREQUENCIES[language])
large_alphabet: bool = target_language_characters_count > 26
for character, character_rank in zip(
ordered_characters, range(0, ordered_characters_count)
):
if character not in FREQUENCIES_language_set:
continue
character_rank_in_language: int = FREQUENCIES[language].index(character)
expected_projection_ratio: float = (
target_language_characters_count / ordered_characters_count
)
character_rank_projection: int = int(character_rank * expected_projection_ratio)
if (
large_alphabet is False
and abs(character_rank_projection - character_rank_in_language) > 4
):
continue
if (
large_alphabet is True
and abs(character_rank_projection - character_rank_in_language)
< target_language_characters_count / 3
):
character_approved_count += 1
continue
characters_before_source: List[str] = FREQUENCIES[language][
0:character_rank_in_language
]
characters_after_source: List[str] = FREQUENCIES[language][
character_rank_in_language:
]
characters_before: List[str] = ordered_characters[0:character_rank]
characters_after: List[str] = ordered_characters[character_rank:]
before_match_count: int = len(
set(characters_before) & set(characters_before_source)
)
after_match_count: int = len(
set(characters_after) & set(characters_after_source)
)
if len(characters_before_source) == 0 and before_match_count <= 4:
character_approved_count += 1
continue
if len(characters_after_source) == 0 and after_match_count <= 4:
character_approved_count += 1
continue
if (
before_match_count / len(characters_before_source) >= 0.4
or after_match_count / len(characters_after_source) >= 0.4
):
character_approved_count += 1
continue
return character_approved_count / len(ordered_characters)
def alpha_unicode_split(decoded_sequence: str) -> List[str]:
"""
Given a decoded text sequence, return a list of str. Unicode range / alphabet separation.
Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list;
One containing the latin letters and the other hebrew.
"""
layers: Dict[str, str] = {}
for character in decoded_sequence:
if character.isalpha() is False:
continue
character_range: Optional[str] = unicode_range(character)
if character_range is None:
continue
layer_target_range: Optional[str] = None
for discovered_range in layers:
if (
is_suspiciously_successive_range(discovered_range, character_range)
is False
):
layer_target_range = discovered_range
break
if layer_target_range is None:
layer_target_range = character_range
if layer_target_range not in layers:
layers[layer_target_range] = character.lower()
continue
layers[layer_target_range] += character.lower()
return list(layers.values())
def merge_coherence_ratios(results: List[CoherenceMatches]) -> CoherenceMatches:
"""
This function merge results previously given by the function coherence_ratio.
The return type is the same as coherence_ratio.
"""
per_language_ratios: Dict[str, List[float]] = {}
for result in results:
for sub_result in result:
language, ratio = sub_result
if language not in per_language_ratios:
per_language_ratios[language] = [ratio]
continue
per_language_ratios[language].append(ratio)
merge = [
(
language,
round(
sum(per_language_ratios[language]) / len(per_language_ratios[language]),
4,
),
)
for language in per_language_ratios
]
return sorted(merge, key=lambda x: x[1], reverse=True)
def filter_alt_coherence_matches(results: CoherenceMatches) -> CoherenceMatches:
"""
We shall NOT return "Englishโ" in CoherenceMatches because it is an alternative
of "English". This function only keeps the best match and remove the em-dash in it.
"""
index_results: Dict[str, List[float]] = dict()
for result in results:
language, ratio = result
no_em_name: str = language.replace("โ", "")
if no_em_name not in index_results:
index_results[no_em_name] = []
index_results[no_em_name].append(ratio)
if any(len(index_results[e]) > 1 for e in index_results):
filtered_results: CoherenceMatches = []
for language in index_results:
filtered_results.append((language, max(index_results[language])))
return filtered_results
return results
@lru_cache(maxsize=2048)
def coherence_ratio(
decoded_sequence: str, threshold: float = 0.1, lg_inclusion: Optional[str] = None
) -> CoherenceMatches:
"""
Detect ANY language that can be identified in given sequence. The sequence will be analysed by layers.
A layer = Character extraction by alphabets/ranges.
"""
results: List[Tuple[str, float]] = []
ignore_non_latin: bool = False
sufficient_match_count: int = 0
lg_inclusion_list = lg_inclusion.split(",") if lg_inclusion is not None else []
if "Latin Based" in lg_inclusion_list:
ignore_non_latin = True
lg_inclusion_list.remove("Latin Based")
for layer in alpha_unicode_split(decoded_sequence):
sequence_frequencies: TypeCounter[str] = Counter(layer)
most_common = sequence_frequencies.most_common()
character_count: int = sum(o for c, o in most_common)
if character_count <= TOO_SMALL_SEQUENCE:
continue
popular_character_ordered: List[str] = [c for c, o in most_common]
for language in lg_inclusion_list or alphabet_languages(
popular_character_ordered, ignore_non_latin
):
ratio: float = characters_popularity_compare(
language, popular_character_ordered
)
if ratio < threshold:
continue
elif ratio >= 0.8:
sufficient_match_count += 1
results.append((language, round(ratio, 4)))
if sufficient_match_count >= 3:
break
return sorted(
filter_alt_coherence_matches(results), key=lambda x: x[1], reverse=True
)
| [] |
2024-01-10 | tabee/b3rn_zero_copilot | langchain-container~chatbot~agent2.py | from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
import asyncio
import requests
from urllib.parse import quote
import os
# dirty hack to switch between local and docker container, depending on the environment sys_path
sys_path = os.getenv('DATA_PATH', default=os.path.join(os.path.dirname(__file__)))
server_name = "fastapi"
if str(sys_path).startswith('/workspaces'):
server_name = "127.0.0.1"
#print(f"workspaces ... set server_name to {server_name}")
# end dirty hack
def get_answer(question):
""" Wrapper-Funktion fรผr get_suggestions, die die erforderlichen Parameter รผbergibt. """
if question:
encoded_question = quote(question) # Kodieren des Strings fรผr die URL
response = requests.get(f'http://{server_name}:80/vectorstore/answers-questions/{question}')
if response.status_code == 200:
suggestions = response.json()
return suggestions
else:
return "Sorry, I don't know the answer to your question."
def agent_for(topic):
model = ChatOpenAI(verbose=False)
template = """Answer the question based only on the following context:
{context}
context: {question}\n
==========================\n
answer always in german. cite the source (context) always in german.
if the context not suitable, please answer with "no suitable context".
"""
prompt = ChatPromptTemplate.from_template(template)
vectorstore_resp = get_answer(topic)
vectorstore = FAISS.from_texts(
vectorstore_resp, embedding=OpenAIEmbeddings()
)
retriever = vectorstore.as_retriever()
retrieval_chain = (
{
"context": retriever.with_config(run_name="Docs"),
"question": RunnablePassthrough(),
}
| prompt
| model
| StrOutputParser()
)
for s in retrieval_chain.stream(topic):
yield s
if __name__ == "__main__":
for chunk in agent_for(topic="was ist ahv?"):
print(chunk, end="", flush=True)
| [
"Answer the question based only on the following context:\n {context}\n\n context: {question}\n\n ==========================\n\n answer always in german. cite the source (context) always in german.\n if the context not suitable, please answer with \"no suitable context\".\n "
] |
2024-01-10 | tabee/b3rn_zero_copilot | langchain-container~chatbot~agent_tools.py | from langchain.agents import Tool
from langchain.chains import LLMMathChain
from langchain.llms import OpenAI
from langchain.tools import DuckDuckGoSearchRun
from langchain.agents import tool
from langchain.utilities import DuckDuckGoSearchAPIWrapper
from langchain.callbacks.manager import CallbackManagerForToolRun
from typing import Optional
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.prompts import MessagesPlaceholder
from langchain.tools.render import format_tool_to_openai_function
from langchain.agents.format_scratchpad import format_to_openai_function_messages
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain.schema.messages import AIMessage, HumanMessage
from langchain.agents import AgentExecutor
from langchain.agents import AgentExecutor, Tool, ZeroShotAgent
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory
from langchain.prompts import PromptTemplate
from langchain.agents.agent_toolkits import create_retriever_tool
import os
#from agent_rag import agent_rag_executor
#from retriever import faq_db
# math tool
llm_math = LLMMathChain.from_llm(OpenAI(api_key=os.environ["OPENAI_API_KEY"],))
llm_math_tool = Tool(
name="Calculator",
func=llm_math.run,
description="useful for when you need to answer questions about math"
)
# custom search tool
class CustomDuckDuckGoSearchRun(DuckDuckGoSearchRun):
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the tool with a custom query prefix."""
# Fรผge den Prรคfix zur Suchanfrage hinzu
modified_query = f"site:www.eak.admin.ch {query}"
# Rufe die _run-Methode der Basisklasse mit der modifizierten Anfrage auf
return super()._run(modified_query, run_manager)
wrapper = DuckDuckGoSearchAPIWrapper(region="ch-de", time="y", max_results=1)
custom_search_tool = CustomDuckDuckGoSearchRun(
name="EAK-Search",
description="Searches the www.eak.admin.ch website from Eidg. Ausgleichskasse EAK, for your query",
api_wrapper=wrapper)
# word length tool
@tool
def get_word_length(word: str) -> int:
"""Returns the length of a word."""
return len(word)
# summary chain
template = """This is a conversation between a human and a bot:
{chat_history}
Write a summary of the conversation for {input}:
"""
prompt = PromptTemplate(input_variables=["input", "chat_history"], template=template)
memory = ConversationBufferMemory(memory_key="chat_history")
readonlymemory = ReadOnlySharedMemory(memory=memory)
summary_chain = LLMChain(
llm=OpenAI(api_key=os.environ["OPENAI_API_KEY"]),
prompt=prompt,
verbose=True,
memory=readonlymemory, # use the read-only memory to prevent the tool from modifying the memory
)
summary_tool = Tool (
name="Summary",
func=summary_chain.run,
description="useful for when you summarize a conversation. The input to this tool should be a string, representing who will read this summary.",
)
# faq rag
# Create a retriever tool
# retriever = faq_db.as_retriever(
# search_type="mmr",
# search_kwargs={'k': 5, 'fetch_k': 20, 'lambda_mult': 0.85}
# )
# # Create a tool that uses the retriever
# faq_mmr_retriever = create_retriever_tool(
# retriever,
# "sozialversicherungssystem_faq_retriever",
# "Searches and returns faq documents about swiss social security system in german, french and italian.",
# )
#########
# tools #
#########
tools = [
#faq_mmr_retriever,
summary_tool,
get_word_length,
custom_search_tool,
llm_math_tool]
| [
"chat_history",
"input",
"This is a conversation between a human and a bot:\n\n{chat_history}\n\nWrite a summary of the conversation for {input}:\n",
"Returns the length of a word."
] |
2024-01-10 | afonso07/ruskin | backend~server~gpt_functions.py | import os
from typing import Any, Dict, List, Union
from openai import OpenAI
from .gpt_contexts import critique_context
import base64
from elevenlabs import generate, play # type: ignore
client = OpenAI()
def generate_image_prompt(imageURI: str) -> list[dict[str, Any]]:
return [
{
"role": "user",
"content": [
{"type": "text", "text": "Describe this painting."},
{
"type": "image_url",
"image_url": imageURI,
},
],
},
]
def generate_analysis_prompt(imageURI: str) -> str | None:
response = client.chat.completions.create(
model="gpt-4-vision-preview",
messages=[
{
"role": "system",
"content": critique_context,
},
]
+ generate_image_prompt(imageURI),
max_tokens=500,
)
response_text = response.choices[0].message.content
return response_text
if __name__ == "__main__":
with open("test_images/manu_painting2.jpg", "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
analysis_prompt = generate_analysis_prompt(encoded_string.decode())
# print(encoded_string)
audio = generate(
text=analysis_prompt, voice=os.getenv("VOICE_ID"), model="eleven_turbo_v2"
)
play(audio)
| [
"[{'type': 'text', 'text': 'Describe this painting.'}, {'type': 'image_url', 'image_url': PLACEHOLDER}]"
] |
2024-01-10 | matt-tebbetts/Games_Tracker | files~archive~gpt_bot.py | import os
from dotenv import load_dotenv
import openai
# environment variables
load_dotenv()
openai.api_key = os.getenv('OPENAI_KEY')
## code starts here
def get_chat_response(messages):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
message = response['choices'][0]['message']['content']
return message
messages = [
{"role": "system", "content": "You are a helpful assistant."},
]
user_input = input("You: ")
messages.append({"role": "user", "content": user_input})
bot_response = get_chat_response(messages)
print(f"Bot: {bot_response}")
| [
"You are a helpful assistant."
] |
2024-01-10 | almogtavor/facts-checker-gpt | facts_checker_gpt~__main__.py | import time
import openai
from colorama import Fore
from scripts.config import Config
from scripts.logger import logger
import scripts.utils
from alpaca import Alpaca
import chat
cfg = Config()
def chat_with_agent(user_prompt, alpaca: Alpaca):
prompt = user_prompt
retries = 0
while retries <= 2:
if cfg.agent_1_model == "alpaca":
agent1_response: str = chat.get_alpaca_response(prompt, alpaca)
else:
agent1_response: str = chat.get_chatgpt_response(prompt, cfg.agent_1_model)
if cfg.agent_2_model == "alpaca":
agent2_response: str = chat.verify_answer_with_alpaca(prompt, agent1_response, alpaca)
else:
agent2_response: str = chat.verify_answer_with_gpt(prompt, agent1_response, cfg.agent_2_model)
logger.typewriter_log(f"Chatbot Agent1 ({cfg.agent_1_model}) responded:", Fore.YELLOW, "")
print(agent1_response)
if agent2_response.lower() == "seems correct":
logger.typewriter_log(f"FactsCheckerGPT Agent2 ({cfg.agent_2_model}) responded:", Fore.GREEN, "")
logger.typewriter_log(agent2_response, Fore.GREEN, "")
return agent1_response
else:
logger.typewriter_log(f"FactsCheckerGPT Agent2 ({cfg.agent_2_model}) responded:", Fore.RED, "")
logger.typewriter_log(agent2_response, Fore.RED, "")
retries += 1
return "Sorry, I couldn't provide a satisfactory answer. Please try again later."
def main():
openai.api_key = cfg.openai_api_key
alpaca_cli_path = cfg.alpaca_executable_path
model_path = cfg.alpaca_model_path
alpaca: Alpaca = None
try:
alpaca = Alpaca(alpaca_cli_path, model_path)
# sleep until Alpaca is ready
time.sleep(50)
logger.typewriter_log(
"Welcome to FactsCheckerGPT! ",
Fore.GREEN,
"Designed to let you talk with ChatGPT in a safer way. Please enter your prompt below.",)
count = 0
while True:
count += 1
logger.typewriter_log(
"PROMPT NUMBER " + str(count),
Fore.CYAN,
"")
user_prompt = scripts.utils.clean_input("Prompt: ")
if user_prompt.lower() == "quit":
break
chat_with_agent(user_prompt, alpaca)
finally:
# check if alpaca is not null
if alpaca is not None:
print("Closing Alpaca")
alpaca.stop()
if __name__ == "__main__":
main()
| [
"Prompt: "
] |
2024-01-10 | reedwi/llm-app | application~aws~slack~poll-queue~llm_module~obo_langchain.py | import pinecone
from langchain.chains import RetrievalQA, LLMChain, ConversationalRetrievalChain
from langchain.chains.summarize import load_summarize_chain
from langchain.vectorstores import Pinecone
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain import OpenAI
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationBufferMemory
from langchain.callbacks import get_openai_callback
from datetime import datetime
from langchain.text_splitter import CharacterTextSplitter
import os
from langchain.docstore.document import Document
pinecone.init(api_key=os.getenv('PINECONE_API_KEY'),
environment="us-east1-gcp")
def get_retriever(namespace):
embeddings = OpenAIEmbeddings()
vector_db = Pinecone.from_existing_index('obo-internal-slackbot', embedding=embeddings, namespace=namespace)
return vector_db.as_retriever(search_kwargs={"k": 3}, qa_template=get_qa_template()['prompt'])
def get_qa_template(message=None):
if message not in [None, '']:
message = message
else:
message = 'I could not find any answers for your question'
template = f"""You are a helpful assistant for a company that is having a conversation with a human. You are responsible for helping look through the documents you have available to try and find an answer. You also have access to the prior hsitory of the conversation.
You know that the current datetime is {datetime.now()}.
If you can't find an answer or document that matches, return a message saying "{message}". If you are asked to generate or think about things based on data you have access to, complete the task.
Return all answers in markdown format.
{{context}}
QUESTION: {{question}}
FINAL ANSWER IN MARKDOWN FORMAT:"""
PROMPT = PromptTemplate(template=template, input_variables=["question", "context"])
chain_type_kwargs = {"prompt": PROMPT}
return chain_type_kwargs
def get_qa_condense_template(message=None):
if message not in [None, '']:
message = message
else:
message = 'I could not find any answers for your question'
CONDENSE_PROMPT = """Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.
Chat History:
{chat_history}
Follow Up Input: {question}
Standalone question:"""
PROMPT = PromptTemplate(template=CONDENSE_PROMPT, input_variables=["question", "chat_history"])
return PROMPT
def get_gpt_prompt():
template = """
You are a helpful assistant. Please try to answer all questions to the best of your ability.
Please return all responses in markdown style formatting
Chat History: {history}
Question: {human_input}
Assistant:"""
prompt = PromptTemplate(
input_variables=["history", "human_input"],
template=template
)
return prompt
def get_answer(namespace, question, chat_history, no_results=None):
if namespace == 'chatgpt':
memory = ConversationBufferMemory(memory_key="history")
else:
memory = ConversationBufferMemory(memory_key="chat_history", output_key='answer', return_messages=True)
if chat_history:
len_ch = len(chat_history)
for i, message in enumerate(chat_history):
if i == len_ch - 1:
if not message['bot']:
continue
if i == 0:
orginal_question = message['text'].split('*Question*: ')[1]
memory.chat_memory.add_user_message(orginal_question)
elif message['bot']:
if '*Source(s)*' in message['text']:
answer = message['text'].split('*Source(s)*')[0]
memory.chat_memory.add_ai_message(answer)
else:
memory.chat_memory.add_ai_message(message['text'])
else:
memory.chat_memory.add_user_message(message['text'])
if namespace == 'chatgpt':
chatgpt_chain = LLMChain(
llm=OpenAI(temperature=0.1, model_name='gpt-4'),
prompt=get_gpt_prompt(),
memory=memory
)
with get_openai_callback() as cb:
result = chatgpt_chain.predict(human_input=question)
tokens_used = cb.total_tokens
return {
"answer": result,
"response": result,
"tokens_used": tokens_used,
"chatgpt": True
}
elif namespace == 'summarize':
text_splitter = CharacterTextSplitter()
texts = text_splitter.split_text(question)
docs = [Document(page_content=t) for t in texts]
summarize_chain = load_summarize_chain(
llm=OpenAI(temperature=0.1),
chain_type='map_reduce'
)
with get_openai_callback() as cb:
result = summarize_chain.run(docs)
tokens_used = cb.total_tokens
return {
"answer": result,
"response": result,
"tokens_used": tokens_used,
"chatgpt": True
}
else:
retriever = get_retriever(namespace=namespace)
chain_type_kwargs = get_qa_template()
with get_openai_callback() as cb:
# chain = RetrievalQA.from_chain_type(
# llm=OpenAI(temperature=0.1),
# chain_type='stuff',
# retriever=retriever,
# memory=memory,
# return_source_documents=True,
# chain_type_kwargs=chain_type_kwargs
# )
chain = ConversationalRetrievalChain.from_llm(
llm=OpenAI(temperature=0.1, model_name='gpt-4'),
retriever=retriever,
chain_type='stuff',
memory=memory,
verbose=False,
return_source_documents=True,
condense_question_prompt=get_qa_condense_template(message=no_results),
combine_docs_chain_kwargs=get_qa_template(message=no_results)
)
# print(question)
result = chain({'question': question})
print(result)
tokens_used = cb.total_tokens
return {
"answer": result['answer'],
"response": result,
"tokens_used": tokens_used,
"chatgpt": False
}
def get_personal_answer(namespace, question, chat_history):
memory = ConversationBufferMemory(memory_key="chat_history", output_key='answer', return_messages=True)
if chat_history:
len_ch = len(chat_history)
for i, message in enumerate(chat_history):
if i == 0:
memory.chat_memory.add_user_message(message['text'])
elif message['bot']:
if '*Source(s)*' in message['text']:
answer = message['text'].split('*Source(s)*')[0]
memory.chat_memory.add_ai_message(answer)
else:
memory.chat_memory.add_ai_message(message['text'])
else:
memory.chat_memory.add_user_message(message['text'])
retriever = get_retriever(namespace=namespace)
chain_type_kwargs = get_qa_template()
with get_openai_callback() as cb:
chain = ConversationalRetrievalChain.from_llm(
llm=OpenAI(temperature=0.1, model_name='gpt-4'),
retriever=retriever,
chain_type='stuff',
memory=memory,
verbose=False,
return_source_documents=True,
condense_question_prompt=get_qa_condense_template(),
combine_docs_chain_kwargs=get_qa_template()
)
# print(question)
result = chain({'question': question})
print(result)
tokens_used = cb.total_tokens
return {
"answer": result['answer'],
"response": result,
"tokens_used": tokens_used,
"chatgpt": False
} | [
"question",
"chat_history",
"human_input",
"\n You are a helpful assistant. Please try to answer all questions to the best of your ability.\n Please return all responses in markdown style formatting\n\n Chat History: {history}\n Question: {human_input}\n Assistant:",
"Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question.\n\n Chat History:\n {chat_history}\n Follow Up Input: {question}\n Standalone question:",
"context"
] |
2024-01-10 | ontaptom/gcpmate | gcpmate~gcpmate.py | """
GCPMate - Google Cloud Platform assistant.
This module provides functions and classes to assist with managing
Google Cloud Platform using natural language queries and OpenAI's GPT-3 language model.
"""
import os
import re
import sys
import subprocess
import argparse
import shlex
from time import sleep
from prettytable import PrettyTable
import openai
from openai import OpenAI
class GCPMate:
"""
GCPMate is an OpenAI-powered assistant for managing Google Cloud Platform resources.
"""
def __init__(self, openai_model="gpt-3.5-turbo", skip_info=False):
"""
Initializes a new instance of the GCPMate class with the specified OpenAI model.
Args:
openai_model (str): The name of the OpenAI model to use for generating gcloud commands.
skip_info (bool): Flag indicating whether or not to skip printing runtime info.
"""
try:
self.current_user = subprocess.run(
['gcloud', 'auth', 'list', '--filter=status:ACTIVE',
'--format=value(account)'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True
).stdout.decode('utf-8').strip()
self.current_project = subprocess.run(
['gcloud', 'config', 'get-value', 'project'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True
).stdout.decode('utf-8').strip()
self.default_region = subprocess.run(
['gcloud', 'config', 'get-value', 'compute/region'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True
).stdout.decode('utf-8').strip()
self.default_region = "(unset)" if self.default_region == "" else self.default_region
self.default_zone = subprocess.run(
['gcloud', 'config', 'get-value', 'compute/zone'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True
).stdout.decode('utf-8').strip()
self.default_zone = "(unset)" if self.default_zone == "" else self.default_zone
self.gcloud_available = True
except FileNotFoundError:
self.current_user = "gcloud not found"
self.current_project = "gcloud not found"
self.default_region = "gcloud not found"
self.default_zone = "gcloud not found"
self.gcloud_available = False
self.openai_model = openai_model
self.skip_info = skip_info
self.commands = []
self.client = OpenAI()
def blue_text(self, text):
"""
Returns the specified text in blue for console output.
"""
return f"\033[94m{text}\033[0m"
def animate(self, text):
"""
Animates the specified text in the console.
"""
for char in text:
sleep(0.05)
print(char, end='', flush=True)
print()
def get_yes_no(self):
"""
Asks the user to confirm whether or not to execute a set of gcloud commands.
"""
while True:
if not self.skip_info:
print(f"\n{self.blue_text('Fair warning')}: Execute the command(s) only if "
"fully understand the consequences. \n\t gcloud may prompt for yes/no "
"confirmation. If so, execution process will respond with yes.\n")
answer = input(
f"Would you like to execute the following {self.blue_text(len(self.commands))} "
"command(s)? [y/N] ").strip().lower()
if answer in {"y", "yes"}:
return True
elif answer in {"", "n", "no"}:
return False
else:
print("Invalid input, please try again.")
def call_openai_api(self, query, system_content):
"""
Calls the OpenAI API to generate gcloud commands based on the specified query.
Since returned output is a multiple-line string, it is split into a list of
commands and stored in the self.commands variable.
"""
completion = self.client.chat.completions.create(
model=self.openai_model,
messages=[
{"role": "system", "content": system_content},
{"role": "user", "content": query}
]
)
return completion.choices[0].message.content
def generate_commands(self, api_response):
"""
Assuming api_response contains list of gcloud commands. This method removes unnecessary
characters from the OpenAI API response, splits the response into a list of
commands, and stores the list in the self.commands variable.
"""
# remove \<new-line> in case if OpenAI returns gcloud in multiple lines
singleline_commands = api_response.replace(
'\\\n', '')
# replace multiple spaces with single-space, if any found in the reply:
singleline_commands = re.sub(' +', ' ', singleline_commands)
# Split gcloud commands separated by '&&' to separate lines, but ignore '&&'
# within parameter values. For example:
# [...] --metadata startup-script='sudo apt-get update && sudo apt-get install -y nginx'
singleline_commands = singleline_commands.replace("&& gcloud", "\n gcloud")
# split multiple commands to a list of commands and return the list
return [x.strip() for x in re.findall(
r'(?:gcloud|gsutil)\b.*?(?:\n|$)', singleline_commands)]
def print_runtime_info(self):
"""
Prints runtime info about the current gcloud configuration.
"""
table = PrettyTable()
table.field_names = ["Configuration", "Value"]
table.add_row(["Active gcloud account",
self.blue_text(self.current_user)])
table.add_row(
["Default project", self.blue_text(self.current_project)])
table.add_row(["Default region", self.blue_text(self.default_region)])
table.add_row(["Default zone", self.blue_text(self.default_zone)])
table.add_row(["OpenAI model", self.blue_text(self.openai_model)])
table.align = "l"
print(table)
def execute_commands(self):
"""
Executes the list of gcloud commands stored in the self.commands variable. If a command
contains a prompt, it is executed with a default response of "y".If a command contains
a pipe (|), it is split into subcommands and executed as a pipeline. However, if command
contains a pipe, and it contains a prompt, the command will not execute properly.
This is a known issue and will be addressed in a future release.
"""
for command in self.commands:
print(f"---\nExecuting: {self.blue_text(self.multiline_output(command))}")
if "|" in command:
subcommands = command.split("|")
p = subprocess.Popen(shlex.split(
subcommands[0]), stdout=subprocess.PIPE)
for c in subcommands[1:]:
p1 = subprocess.Popen(shlex.split(
c), stdout=subprocess.PIPE, stdin=p.stdout)
p.stdout.close()
p = p1
try:
output = p.communicate()[0].decode('utf-8')
print(f"---\nResult:\n\n{self.blue_text(output)}")
except subprocess.CalledProcessError as process_error:
print(f"---\nError: {process_error.stderr.decode('utf-8')}")
else:
try:
p1 = subprocess.run(shlex.split(command), input='y'.encode(
), stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
print(
f"---\nResult:\n\n{self.blue_text(p1.stdout.decode('utf-8'))}\n"
f"{self.blue_text(p1.stderr.decode('utf-8'))}")
except subprocess.CalledProcessError as process_error:
print(f"---\nError: {process_error.stderr.decode('utf-8')}")
def multiline_output(self, command, sep=' \\ \n\t'):
"""
Check if command is 100 characters or more, if so, it adds ' \\ \n\t' or other separator
at the nearest space to the n * 100th character. This is to print command
in multiple lines in the terminal.
"""
if len(command) < 100:
return command
else:
lines = []
while len(command) > 100:
lines.append(command[:command[:100].rfind(' ')] + sep)
command = command[command[:100].rfind(' ')+1:]
lines.append(command) # add the last line
return ''.join(lines)
def explain(self,query, system_content):
"""
Explain the query to the user
"""
response = self.call_openai_api(query, system_content)
response = response.lstrip() + "\n" # response sometimes contains unnecessary leading spaces
self.animate(self.blue_text(self.multiline_output(response, sep="\n")))
def run(self, query, system_content):
"""
Main method to run GCPMate with the specified query.
Args:
query (str): The query to be passed to the OpenAI API.
"""
if not self.skip_info:
self.print_runtime_info()
# call OpenAI API
api_response = self.call_openai_api(query, system_content)
# generate list of commands from the API response
self.commands = self.generate_commands(api_response)
if len(self.commands) == 0:
print("I'm sorry. Your question did not return any potential solution.\n"
"You can try rephrasing your question or use a different model by running the "
"command with '-m <model_name>' parameter. For more info run 'gcpmate -h'.")
# finish script at this point
return
print(
f"The proposed solution consist of {len(self.commands)} command(s):")
i = 0
for command in self.commands:
i += 1
self.animate(f'\t[{i}] {self.blue_text(self.multiline_output(command))}')
if self.gcloud_available:
doit = self.get_yes_no()
else:
doit = False
print("gcloud is not found, bye. ")
return
if not doit:
# placeholder for exit message
return
else:
self.execute_commands()
def main():
""" Main function to run GCPMate."""
openai_api_key = os.environ.get('OPENAI_API_KEY')
if not openai_api_key:
print("GCPMate uses OpenAI API to assist user with Google Cloud mgmt. To use this tool "
"please set OPENAI_API_KEY environment variable to your OpenAI API key.\n"
"You can get your API key from https://platform.openai.com/account/api-keys. "
"To set the environment variable, run the following command:\n\n"
"export OPENAI_API_KEY=<your-api-key>\n")
sys.exit(1)
openai.api_key = openai_api_key
parser = argparse.ArgumentParser(description='GCPMate - Google Cloud Platform assistant.\n'
'Describe in query what you wish to achieve, and gcpmate '
'(with a little help from OpenAI) will try to come up with a solution.\n'
'If you like proposed outcome, gcpmate can also '
'handle execution!', add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
epilog='Example usage:\n\ngcpmate "create new project called '
'my-superb-new-project"')
parser.add_argument(
'query', type=str, help='Query explaining what you wish to achieve in GCP')
parser.add_argument('-m', '--model', type=str, help='OpenAI model to use for completion. Default: gpt-3.5-turbo. ')
parser.add_argument('-s', '--skip-info', action='store_true',
help='Skip printing "Fair warning" message and runtime info (gcloud account, project, region, zone, OpenAI model)')
parser.add_argument('-e', '--explain', action='store_true',
help='Returns explanation to given query, which can be command, error message, etc.')
args = parser.parse_args()
model = args.model if args.model else "gpt-3.5-turbo"
gcpmate = GCPMate(openai_model=model, skip_info=args.skip_info)
if args.explain:
system_content = '''
You are an assistant that explains given gcloud or GSutil or any error message from Google Cloud command & api.
'''
gcpmate.explain(args.query, system_content)
else:
system_content = '''
You are an assistant that provides Google Cloud Platform (GCP) command-line instructions using `gcloud` / `qsutil` / `bq` commands.
Respond to user queries with the appropriate `gcloud` command or a series of commands, each command presented on a new line.
Provide these commands without any comments or explanations. Output must contain only commands, no explaination.
'''
gcpmate.run(args.query, system_content)
if __name__ == '__main__':
main()
| [] |
2024-01-10 | DerekTang-Intel/azure-search-openai-demo | scripts~prepdocslib~searchmanager.py | import asyncio
import os
from typing import List, Optional
from azure.search.documents.indexes.models import (
HnswParameters,
PrioritizedFields,
SearchableField,
SearchField,
SearchFieldDataType,
SearchIndex,
SemanticConfiguration,
SemanticField,
SemanticSettings,
SimpleField,
VectorSearch,
VectorSearchAlgorithmConfiguration,
)
from .blobmanager import BlobManager
from .embeddings import OpenAIEmbeddings
from .listfilestrategy import File
from .strategy import SearchInfo
from .textsplitter import SplitPage
class Section:
"""
A section of a page that is stored in a search service. These sections are used as context by Azure OpenAI service
"""
def __init__(self, split_page: SplitPage, content: File, category: Optional[str] = None):
self.split_page = split_page
self.content = content
self.category = category
class SearchManager:
"""
Class to manage a search service. It can create indexes, and update or remove sections stored in these indexes
To learn more, please visit https://learn.microsoft.com/azure/search/search-what-is-azure-search
"""
def __init__(
self,
search_info: SearchInfo,
search_analyzer_name: Optional[str] = None,
use_acls: bool = False,
embeddings: Optional[OpenAIEmbeddings] = None,
):
self.search_info = search_info
self.search_analyzer_name = search_analyzer_name
self.use_acls = use_acls
self.embeddings = embeddings
async def create_index(self):
if self.search_info.verbose:
print(f"Ensuring search index {self.search_info.index_name} exists")
async with self.search_info.create_search_index_client() as search_index_client:
fields = [
SimpleField(name="id", type="Edm.String", key=True),
SearchableField(name="content", type="Edm.String", analyzer_name=self.search_analyzer_name),
SearchField(
name="embedding",
type=SearchFieldDataType.Collection(SearchFieldDataType.Single),
hidden=False,
searchable=True,
filterable=False,
sortable=False,
facetable=False,
vector_search_dimensions=1536,
vector_search_configuration="default",
),
SimpleField(name="category", type="Edm.String", filterable=True, facetable=True),
SimpleField(name="sourcepage", type="Edm.String", filterable=True, facetable=True),
SimpleField(name="sourcefile", type="Edm.String", filterable=True, facetable=True),
]
if self.use_acls:
fields.append(
SimpleField(
name="oids", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True
)
)
fields.append(
SimpleField(
name="groups", type=SearchFieldDataType.Collection(SearchFieldDataType.String), filterable=True
)
)
index = SearchIndex(
name=self.search_info.index_name,
fields=fields,
semantic_settings=SemanticSettings(
configurations=[
SemanticConfiguration(
name="default",
prioritized_fields=PrioritizedFields(
title_field=None, prioritized_content_fields=[SemanticField(field_name="content")]
),
)
]
),
vector_search=VectorSearch(
algorithm_configurations=[
VectorSearchAlgorithmConfiguration(
name="default", kind="hnsw", hnsw_parameters=HnswParameters(metric="cosine")
)
]
),
)
if self.search_info.index_name not in [name async for name in search_index_client.list_index_names()]:
if self.search_info.verbose:
print(f"Creating {self.search_info.index_name} search index")
await search_index_client.create_index(index)
else:
if self.search_info.verbose:
print(f"Search index {self.search_info.index_name} already exists")
async def update_content(self, sections: List[Section]):
MAX_BATCH_SIZE = 1000
section_batches = [sections[i : i + MAX_BATCH_SIZE] for i in range(0, len(sections), MAX_BATCH_SIZE)]
async with self.search_info.create_search_client() as search_client:
for batch_index, batch in enumerate(section_batches):
documents = [
{
"id": f"{section.content.filename_to_id()}-page-{section_index + batch_index * MAX_BATCH_SIZE}",
"content": section.split_page.text,
"category": section.category,
"sourcepage": BlobManager.sourcepage_from_file_page(
filename=section.content.filename(), page=section.split_page.page_num
),
"sourcefile": section.content.filename(),
**section.content.acls,
}
for section_index, section in enumerate(batch)
]
if self.embeddings:
embeddings = await self.embeddings.create_embeddings(
texts=[section.split_page.text for section in batch]
)
for i, document in enumerate(documents):
document["embedding"] = embeddings[i]
await search_client.upload_documents(documents)
async def remove_content(self, path: Optional[str] = None):
if self.search_info.verbose:
print(f"Removing sections from '{path or '<all>'}' from search index '{self.search_info.index_name}'")
async with self.search_info.create_search_client() as search_client:
while True:
filter = None if path is None else f"sourcefile eq '{os.path.basename(path)}'"
result = await search_client.search("", filter=filter, top=1000, include_total_count=True)
if await result.get_count() == 0:
break
removed_docs = await search_client.delete_documents(
documents=[{"id": document["id"]} async for document in result]
)
if self.search_info.verbose:
print(f"\tRemoved {len(removed_docs)} sections from index")
# It can take a few seconds for search results to reflect changes, so wait a bit
await asyncio.sleep(2)
| [] |
2024-01-10 | DerekTang-Intel/azure-search-openai-demo | app~backend~approaches~chatreadretrieveread.py | import json
import logging
import re
from typing import Any, AsyncGenerator, Optional, Union
import aiohttp
import openai
from azure.search.documents.aio import SearchClient
from azure.search.documents.models import QueryType
from approaches.approach import Approach
from core.messagebuilder import MessageBuilder
from core.modelhelper import get_token_limit
from text import nonewlines
class ChatReadRetrieveReadApproach(Approach):
# Chat roles
SYSTEM = "system"
USER = "user"
ASSISTANT = "assistant"
NO_RESPONSE = "0"
"""
A multi-step approach that first uses OpenAI to turn the user's question into a search query,
then uses Azure AI Search to retrieve relevant documents, and then sends the conversation history,
original user question, and search results to OpenAI to generate a response.
"""
system_message_chat_conversation = """Assistant helps the company employees with their healthcare plan questions, and questions about the employee handbook. Be brief in your answers.
Answer ONLY with the facts listed in the list of sources below. If there isn't enough information below, say you don't know. Do not generate answers that don't use the sources below. If asking a clarifying question to the user would help, ask the question.
For tabular information return it as an html table. Do not return markdown format. If the question is not in English, answer in the language used in the question.
Each source has a name followed by colon and the actual information, always include the source name for each fact you use in the response. Use square brackets to reference the source, for example [info1.txt]. Don't combine sources, list each source separately, for example [info1.txt][info2.pdf].
{follow_up_questions_prompt}
{injected_prompt}
"""
follow_up_questions_prompt_content = """Generate 3 very brief follow-up questions that the user would likely ask next.
Enclose the follow-up questions in double angle brackets. Example:
<<Are there exclusions for prescriptions?>>
<<Which pharmacies can be ordered from?>>
<<What is the limit for over-the-counter medication?>>
Do no repeat questions that have already been asked.
Make sure the last question ends with ">>"."""
query_prompt_template = """Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook.
You have access to an Azure AI Search index with 100's of documents.
Generate a search query based on the conversation and the new question.
Do not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms.
Do not include any text inside [] or <<>> in the search query terms.
Do not include any special characters like '+'.
If the question is not in English, translate the question to English before generating the search query.
If you cannot generate a search query, return just the number 0.
"""
query_prompt_few_shots = [
{"role": USER, "content": "What are my health plans?"},
{"role": ASSISTANT, "content": "Show available health plans"},
{"role": USER, "content": "does my plan cover cardio?"},
{"role": ASSISTANT, "content": "Health plan cardio coverage"},
]
def __init__(
self,
search_client: SearchClient,
openai_host: str,
chatgpt_deployment: Optional[str], # Not needed for non-Azure OpenAI
chatgpt_model: str,
embedding_deployment: Optional[str], # Not needed for non-Azure OpenAI or for retrieval_mode="text"
embedding_model: str,
sourcepage_field: str,
content_field: str,
query_language: str,
query_speller: str,
):
self.search_client = search_client
self.openai_host = openai_host
self.chatgpt_deployment = chatgpt_deployment
self.chatgpt_model = chatgpt_model
self.embedding_deployment = embedding_deployment
self.embedding_model = embedding_model
self.sourcepage_field = sourcepage_field
self.content_field = content_field
self.query_language = query_language
self.query_speller = query_speller
self.chatgpt_token_limit = get_token_limit(chatgpt_model)
async def run_until_final_call(
self,
history: list[dict[str, str]],
overrides: dict[str, Any],
auth_claims: dict[str, Any],
should_stream: bool = False,
) -> tuple:
has_text = overrides.get("retrieval_mode") in ["text", "hybrid", None]
has_vector = overrides.get("retrieval_mode") in ["vectors", "hybrid", None]
use_semantic_captions = True if overrides.get("semantic_captions") and has_text else False
top = overrides.get("top", 3)
filter = self.build_filter(overrides, auth_claims)
original_user_query = history[-1]["content"]
user_query_request = "Generate search query for: " + original_user_query
functions = [
{
"name": "search_sources",
"description": "Retrieve sources from the Azure AI Search index",
"parameters": {
"type": "object",
"properties": {
"search_query": {
"type": "string",
"description": "Query string to retrieve documents from azure search eg: 'Health care plan'",
}
},
"required": ["search_query"],
},
}
]
# STEP 1: Generate an optimized keyword search query based on the chat history and the last question
messages = self.get_messages_from_history(
system_prompt=self.query_prompt_template,
model_id=self.chatgpt_model,
history=history,
user_content=user_query_request,
max_tokens=self.chatgpt_token_limit - len(user_query_request),
few_shots=self.query_prompt_few_shots,
)
chatgpt_args = {"deployment_id": self.chatgpt_deployment} if self.openai_host == "azure" else {}
chat_completion = await openai.ChatCompletion.acreate(
**chatgpt_args,
model=self.chatgpt_model,
messages=messages,
temperature=0.0,
max_tokens=100, # Setting too low risks malformed JSON, setting too high may affect performance
n=1,
functions=functions,
function_call="auto",
)
query_text = self.get_search_query(chat_completion, original_user_query)
# STEP 2: Retrieve relevant documents from the search index with the GPT optimized query
# If retrieval mode includes vectors, compute an embedding for the query
if has_vector:
embedding_args = {"deployment_id": self.embedding_deployment} if self.openai_host == "azure" else {}
embedding = await openai.Embedding.acreate(**embedding_args, model=self.embedding_model, input=query_text)
query_vector = embedding["data"][0]["embedding"]
else:
query_vector = None
# Only keep the text query if the retrieval mode uses text, otherwise drop it
if not has_text:
query_text = None
# Use semantic L2 reranker if requested and if retrieval mode is text or hybrid (vectors + text)
if overrides.get("semantic_ranker") and has_text:
r = await self.search_client.search(
query_text,
filter=filter,
query_type=QueryType.SEMANTIC,
query_language=self.query_language,
query_speller=self.query_speller,
semantic_configuration_name="default",
top=top,
query_caption="extractive|highlight-false" if use_semantic_captions else None,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None,
)
else:
r = await self.search_client.search(
query_text,
filter=filter,
top=top,
vector=query_vector,
top_k=50 if query_vector else None,
vector_fields="embedding" if query_vector else None,
)
if use_semantic_captions:
results = [
doc[self.sourcepage_field] + ": " + nonewlines(" . ".join([c.text for c in doc["@search.captions"]]))
async for doc in r
]
else:
results = [doc[self.sourcepage_field] + ": " + nonewlines(doc[self.content_field]) async for doc in r]
content = "\n".join(results)
follow_up_questions_prompt = (
self.follow_up_questions_prompt_content if overrides.get("suggest_followup_questions") else ""
)
# STEP 3: Generate a contextual and content specific answer using the search results and chat history
# Allow client to replace the entire prompt, or to inject into the exiting prompt using >>>
prompt_override = overrides.get("prompt_template")
if prompt_override is None:
system_message = self.system_message_chat_conversation.format(
injected_prompt="", follow_up_questions_prompt=follow_up_questions_prompt
)
elif prompt_override.startswith(">>>"):
system_message = self.system_message_chat_conversation.format(
injected_prompt=prompt_override[3:] + "\n", follow_up_questions_prompt=follow_up_questions_prompt
)
else:
system_message = prompt_override.format(follow_up_questions_prompt=follow_up_questions_prompt)
response_token_limit = 1024
messages_token_limit = self.chatgpt_token_limit - response_token_limit
messages = self.get_messages_from_history(
system_prompt=system_message,
model_id=self.chatgpt_model,
history=history,
# Model does not handle lengthy system messages well. Moving sources to latest user conversation to solve follow up questions prompt.
user_content=original_user_query + "\n\nSources:\n" + content,
max_tokens=messages_token_limit,
)
msg_to_display = "\n\n".join([str(message) for message in messages])
extra_info = {
"data_points": results,
"thoughts": f"Searched for:<br>{query_text}<br><br>Conversations:<br>"
+ msg_to_display.replace("\n", "<br>"),
}
chat_coroutine = openai.ChatCompletion.acreate(
**chatgpt_args,
model=self.chatgpt_model,
messages=messages,
temperature=overrides.get("temperature") or 0.7,
max_tokens=response_token_limit,
n=1,
stream=should_stream,
)
return (extra_info, chat_coroutine)
async def run_without_streaming(
self,
history: list[dict[str, str]],
overrides: dict[str, Any],
auth_claims: dict[str, Any],
session_state: Any = None,
) -> dict[str, Any]:
extra_info, chat_coroutine = await self.run_until_final_call(
history, overrides, auth_claims, should_stream=False
)
chat_resp = dict(await chat_coroutine)
chat_resp["choices"][0]["context"] = extra_info
if overrides.get("suggest_followup_questions"):
content, followup_questions = self.extract_followup_questions(chat_resp["choices"][0]["message"]["content"])
chat_resp["choices"][0]["message"]["content"] = content
chat_resp["choices"][0]["context"]["followup_questions"] = followup_questions
chat_resp["choices"][0]["session_state"] = session_state
return chat_resp
async def run_with_streaming(
self,
history: list[dict[str, str]],
overrides: dict[str, Any],
auth_claims: dict[str, Any],
session_state: Any = None,
) -> AsyncGenerator[dict, None]:
extra_info, chat_coroutine = await self.run_until_final_call(
history, overrides, auth_claims, should_stream=True
)
yield {
"choices": [
{
"delta": {"role": self.ASSISTANT},
"context": extra_info,
"session_state": session_state,
"finish_reason": None,
"index": 0,
}
],
"object": "chat.completion.chunk",
}
followup_questions_started = False
followup_content = ""
async for event in await chat_coroutine:
# "2023-07-01-preview" API version has a bug where first response has empty choices
if event["choices"]:
# if event contains << and not >>, it is start of follow-up question, truncate
content = event["choices"][0]["delta"].get("content", "")
if overrides.get("suggest_followup_questions") and "<<" in content:
followup_questions_started = True
earlier_content = content[: content.index("<<")]
if earlier_content:
event["choices"][0]["delta"]["content"] = earlier_content
yield event
followup_content += content[content.index("<<") :]
elif followup_questions_started:
followup_content += content
else:
yield event
if followup_content:
_, followup_questions = self.extract_followup_questions(followup_content)
yield {
"choices": [
{
"delta": {"role": self.ASSISTANT},
"context": {"followup_questions": followup_questions},
"finish_reason": None,
"index": 0,
}
],
"object": "chat.completion.chunk",
}
async def run(
self, messages: list[dict], stream: bool = False, session_state: Any = None, context: dict[str, Any] = {}
) -> Union[dict[str, Any], AsyncGenerator[dict[str, Any], None]]:
overrides = context.get("overrides", {})
auth_claims = context.get("auth_claims", {})
if stream is False:
# Workaround for: https://github.com/openai/openai-python/issues/371
async with aiohttp.ClientSession() as s:
openai.aiosession.set(s)
response = await self.run_without_streaming(messages, overrides, auth_claims, session_state)
return response
else:
return self.run_with_streaming(messages, overrides, auth_claims, session_state)
def get_messages_from_history(
self,
system_prompt: str,
model_id: str,
history: list[dict[str, str]],
user_content: str,
max_tokens: int,
few_shots=[],
) -> list:
message_builder = MessageBuilder(system_prompt, model_id)
# Add examples to show the chat what responses we want. It will try to mimic any responses and make sure they match the rules laid out in the system message.
for shot in reversed(few_shots):
message_builder.insert_message(shot.get("role"), shot.get("content"))
append_index = len(few_shots) + 1
message_builder.insert_message(self.USER, user_content, index=append_index)
total_token_count = message_builder.count_tokens_for_message(message_builder.messages[-1])
newest_to_oldest = list(reversed(history[:-1]))
for message in newest_to_oldest:
potential_message_count = message_builder.count_tokens_for_message(message)
if (total_token_count + potential_message_count) > max_tokens:
logging.debug("Reached max tokens of %d, history will be truncated", max_tokens)
break
message_builder.insert_message(message["role"], message["content"], index=append_index)
total_token_count += potential_message_count
return message_builder.messages
def get_search_query(self, chat_completion: dict[str, Any], user_query: str):
response_message = chat_completion["choices"][0]["message"]
if function_call := response_message.get("function_call"):
if function_call["name"] == "search_sources":
arg = json.loads(function_call["arguments"])
search_query = arg.get("search_query", self.NO_RESPONSE)
if search_query != self.NO_RESPONSE:
return search_query
elif query_text := response_message.get("content"):
if query_text.strip() != self.NO_RESPONSE:
return query_text
return user_query
def extract_followup_questions(self, content: str):
return content.split("<<")[0], re.findall(r"<<([^>>]+)>>", content)
| [
"Show available health plans",
"Health plan cardio coverage",
"What are my health plans?",
"Below is a history of the conversation so far, and a new question asked by the user that needs to be answered by searching in a knowledge base about employee healthcare plans and the employee handbook.\nYou have access to an Azure AI Search index with 100's of documents.\nGenerate a search query based on the conversation and the new question.\nDo not include cited source filenames and document names e.g info.txt or doc.pdf in the search query terms.\nDo not include any text inside [] or <<>> in the search query terms.\nDo not include any special characters like '+'.\nIf the question is not in English, translate the question to English before generating the search query.\nIf you cannot generate a search query, return just the number 0.\n",
"suggest_followup_questions",
"Generate 3 very brief follow-up questions that the user would likely ask next.\nEnclose the follow-up questions in double angle brackets. Example:\n<<Are there exclusions for prescriptions?>>\n<<Which pharmacies can be ordered from?>>\n<<What is the limit for over-the-counter medication?>>\nDo no repeat questions that have already been asked.\nMake sure the last question ends with \">>\".",
"does my plan cover cardio?",
"prompt_template"
] |
2024-01-10 | YoshimatsuSaito/whisper_podcast | modules~article_generator.py | import os
import dotenv
import openai
from langchain.chat_models import ChatOpenAI
from langchain.text_splitter import RecursiveCharacterTextSplitter
from tqdm import tqdm
dotenv.load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
class ArticleGenerator:
"""Generate article from the podcast transcript"""
def __init__(
self,
title: str,
text: str,
model_name: str = "gpt-3.5-turbo",
chunk_size: int = 1024,
chunk_overlap: int = 0,
) -> None:
self.model_name = model_name
self.llm = ChatOpenAI(
temperature=0,
openai_api_key=os.environ["OPENAI_API_KEY"],
model_name=self.model_name,
)
self.title = title
self.text = text
self.list_split_text = self._split_text(
chunk_size=chunk_size, chunk_overlap=chunk_overlap
)
def _split_text(self, chunk_size: int, chunk_overlap: int) -> list[str]:
"""Split the text into multiple documents"""
text_splitter = RecursiveCharacterTextSplitter.from_tiktoken_encoder(
model_name=self.model_name,
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
)
texts = text_splitter.split_text(self.text)
return texts
def _summarize_transcript(self, title: str, text: str, max_tokens: int) -> str:
"""Generate summary from the transcript"""
user_message = f"""
Your task is to expertly summarize the content of a podcast.
The podcast title is {title}.
As you read through the transcript, please adhere to the following requirements in your summary:
- Match the Tone: The tone of your summary should align with the atmosphere of the content being discussed. If the subject matter is serious, maintain a formal tone; conversely, if the content is light-hearted, reflect that in a more casual style.
- Sectional Breakdown: Divide your summary into sections based on different topics discussed in the podcast.
- Language Consistency: Ensure that the summary is written in the same language as the transcript.
- Caution: The transcript for summarization is a segment of a larger podcast. When you summarize, focus exclusively on the segment provided. It's important to remember not to add any concluding remarks or extrapolations beyond what is presented in this specific portion. Your task is to create a concise and accurate summary of this given segment alone, adhering strictly to the content it contains.
- Format: The output should be in markdown format. Each section should start with a header '###' and the header should be the topic of the section. Do not add title header of the summary, just the sections.
The transcript of the episode is as follows:
{text}
"""
res = openai.ChatCompletion.create(
model=self.model_name,
messages=[{"role": "user", "content": user_message}],
max_tokens=max_tokens,
)
return res["choices"][0]["message"]["content"]
def get_list_summary(self, max_tokens: int) -> list[str]:
"""Generate summaries from transcripts"""
list_article = []
for text in tqdm(self.list_split_text):
article = self._summarize_transcript(
text=text, title=self.title, max_tokens=max_tokens
)
list_article.append(f"{article} \n\n")
return list_article
def summarize_summaries(self, texts: list[str], max_tokens: int) -> str:
"""Summarize the summaries"""
summaries = "".join(texts)
user_message = f"""
You are a professional summarizer.
You will be provided with a text that is a combination of summaries from different segments of a podcast.
Your task is to create a further condensed summary of this combined text. While doing so, please ensure to:
- Preserve the Tone: Maintain the atmosphere and style of the original summaries. Whether the content is serious, humorous, or of any other tone, your summary should reflect that.
- Language Consistency: The summary should be in the same language as the provided text.
- Topic-Based Organization: Structure your summary by dividing it into sections based on the different topics covered in the summaries.
- Format: The output should be in markdown format. Each section should start with a header '###' and the header should be the topic of the section. Summary should start with title header '##'.
Here are the combination of summaries you need to summarize:
{summaries}
"""
res = openai.ChatCompletion.create(
model=self.model_name,
messages=[{"role": "user", "content": user_message}],
max_tokens=max_tokens,
)
return res["choices"][0]["message"]["content"]
| [] |
2024-01-10 | YoshimatsuSaito/whisper_podcast | modules~transcriber.py | import os
from pathlib import Path
import dotenv
import openai
dotenv.load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
class Transcriber:
"""Transcribes the audio file using the OpenAI API
The reason why argument is list of audio path is that, because of the limit of the API, the audio should be chunked.
"""
def __init__(
self,
list_audio_path: list[Path],
model_name: str = "whisper-1",
) -> None:
self.model_name = model_name
self.list_audio_path = list_audio_path
@staticmethod
def transcribe(audio_file_path: Path, model_name: str) -> str:
"""Transcribes the audio file using the OpenAI API"""
with audio_file_path.open("rb") as audio_file:
transcript = openai.Audio.transcribe(model_name, audio_file, verbose=True)
return transcript["text"]
def get_full_transcript(self) -> str:
"""Transcribes all the audio chunks in the given directory and returns the full transcript"""
transcript_full = ""
for audio_path in self.list_audio_path:
if audio_path.is_file() and audio_path.suffix == ".mp3":
print(f"Transcribing {audio_path.name}...")
transcript = Transcriber.transcribe(audio_path, self.model_name)
transcript_full += transcript
return transcript_full
| [] |
2024-01-10 | yoonjk/watsonx | ai~app~routers~watsonx.py | # general
import requests, os, json
# fastapi
from fastapi import APIRouter, File, UploadFile, Form, Query
# ibm-generative-ai package
from genai.credentials import Credentials
from genai.extensions.langchain import LangChainInterface
from genai.schemas import GenerateParams
from genai.model import Model
from genai.prompt_pattern import PromptPattern
# watson
from ibm_watson import LanguageTranslatorV3
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from langchain.document_loaders import PyPDFDirectoryLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.chains.question_answering import load_qa_chain
"""
User Define
"""
# models
from schemas import Message, PromptMessage
from config import getConfig
from typing import Optional, List
from decimal import Decimal
api_key = os.getenv("GENAI_KEY", None)
api_url = os.getenv("GENAI_URL", None)
print('api_key:', api_key)
creds = Credentials(api_key, api_endpoint=api_url)
router = APIRouter(prefix='/api/v1', tags = ["watsonx"])
@router.post('/qna',
description="prompt message",
responses={
404: {"model": Message, "description": "The item was not found"},
200: {
"description": "Item requested by ID",
"content": {
"application/json": {
"example": {"id": "bar", "value": "The bar tenders"}
}
},
},
})
async def qna(message: str = Form(), uploadfile: Optional[UploadFile] = File(None)):
if uploadfile:
try:
contents = await uploadfile.read()
print(contents)
finally:
uploadfile.file.close()
print(message)
print("\n------------- Example (LangChain)-------------\n")
#translate(body)
params = GenerateParams(decoding_method="greedy", max_new_tokens=700)
langchain_model = LangChainInterface(model="google/flan-ul2", params=params, credentials=creds)
result = langchain_model('{}'.format(message))
print("------------result:", result)
transMessage = langTranslate(result, 'en', 'ko')
voiceText = transMessage.get('translations')
msg = voiceText[0]
print(msg.get('translation'))
return msg
# return result
@router.post('/rag')
async def gen_rag(message: str = Form(),
path: Optional[str] = 'rag',
decoding_method: Optional[str] = 'sample',
min_new_tokens: Optional[int] = 50,
max_new_tokens: Optional[int] = 200,
repetition_penalty: Optional[Decimal] = 1.0,
temperature: Optional[Decimal] = 0.9,
top_k: Optional[int] = 50
):
# PDF ๋ฌธ์๋ค์ ๋ก๋
loader = PyPDFDirectoryLoader(path)
documents = loader.load()
# text ๋ฌธ์ ๋ถํ
text_splitter = CharacterTextSplitter(chunk_size=200, chunk_overlap=100)
split_docs = text_splitter.split_documents(documents)
# Embeddedings
embeddings = HuggingFaceEmbeddings()
# Create the vectorized db
db = FAISS.from_documents(split_docs, embeddings)
docs = db.similarity_search(message)
params = GenerateParams(
decoding_method = decoding_method,
max_new_tokens=max_new_tokens,
min_new_tokens=min_new_tokens,
repetition_penalty=repetition_penalty,
top_k=top_k,
temperature=temperature
).dict()
watsonx_model = LangChainInterface(model="google/flan-t5-xl", credentials=creds, params=params)
chain = load_qa_chain(watsonx_model, chain_type="stuff")
response = watsonx_model(message)
print(response)
response = chain({'input_documents': docs, 'question': message}, return_only_outputs = True)
return response
@router.post('/summary')
async def summarize(message: str = Form(),
upload_file: Optional[UploadFile] = File(None),
lang : Optional[str] = 'kr',
llm_model: Optional[str] = 'bigscience/mt0-xxl',
decoding_method: Optional[str] = 'sample',
min_new_tokens: Optional[int] = 50,
max_new_tokens: Optional[int] = 200,
repetition_penalty: Optional[Decimal] = 1.0,
temperature: Optional[Decimal] = 0.9,
top_k: Optional[int] = 50
):
json_data = json.load(upload_file.file)
content = 'new article'
instruct = 'summary'
if not message:
message ='๋ค์ ๋ณธ๋ฌธ์ ๋ด์ค ๊ธฐ์ฌ์
๋๋ค. ๋ณธ๋ถ์ ์ ๋ฌธ์ฅ์ผ๋ก ์์ฝํด์ฃผ์ธ์.'
# โThe following document is a news article from Korea. Read the document and then write 3 sentences summary.
if not lang:
lang = 'kr'
if lang == 'kr' or lang == 'ko':
content = '๋ณธ๋ฌธ'
instruct = '์์ฝ'
params = GenerateParams(
decoding_method = decoding_method,
max_new_tokens=max_new_tokens,
min_new_tokens=min_new_tokens,
repetition_penalty=repetition_penalty,
top_k=top_k,
temperature=temperature
)
# Prompt pattern
prompt_str = """
{0}
{1}:
{2}
{3}:
""".format(message, content, json_data['text'], instruct)
pattern = PromptPattern.from_str(prompt_str)
model = Model(model=llm_model, params=params, credentials=creds)
responses = model.generate_as_completed([str(pattern)])
result = []
for response in responses:
print("Generated text:")
result.append(response.generated_text)
return {'result': '\n'.join(result)}
def langTranslate(message: str, source: str, target: str):
apikey=os.environ['LANG_TRANSLATOR_APIKEY']
url = os.environ['LANG_TRANSLATOR_URL']
print(f'url:{url}')
authenticator = IAMAuthenticator(apikey)
language_translator = LanguageTranslatorV3(
version='2018-05-01',
authenticator=authenticator
)
language_translator.set_service_url(url)
print(message)
translation = language_translator.translate(
text=message,
source=source,
target=target
).get_result()
return translation | [
"{'application/json': {'example': {'id': 'bar', 'value': 'The bar tenders'}}}",
"\n PLACEHOLDER\n \n PLACEHOLDER:\n PLACEHOLDER\n PLACEHOLDER:\n "
] |
2024-01-10 | woohwan/richard-chatbot | langchain~LCEL~get-started~bedrock-basic.py | from langchain.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser
# create boto3 session
import boto3
session = boto3.session.Session(profile_name='default')
# create bedrock client from session
bedrock_client = session.client(
service_name='bedrock-runtime',
region_name='us-east-1')
# create bedrock chat model
from langchain.llms.bedrock import Bedrock
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
model = Bedrock(
client=bedrock_client,
model_id='anthropic.claude-v2',
model_kwargs={ 'max_tokens_to_sample': 512 },
streaming=True,
callbacks=[StreamingStdOutCallbackHandler()],
)
prompt = PromptTemplate.from_template("tell me a short joke about {topic}")
output_parser = StrOutputParser()
chain = prompt | model | output_parser
chain.invoke({"topic": "ice cream"}) | [
"tell me a short joke about {topic}"
] |
2024-01-10 | woohwan/richard-chatbot | langchain~LCEL~get-started~basic.py | from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
prompt = ChatPromptTemplate.from_template("tell me a short joke about {topic}")
model = ChatOpenAI()
output_parser = StrOutputParser()
chain = prompt | model | output_parser
chain.invoke({"topic": "ice cream"}) | [
"tell me a short joke about {topic}"
] |
2024-01-10 | woohwan/richard-chatbot | gradio~bedrock~sample.py | import gradio as gr
import openai
openai.api_key = 'YOUR API KEY HERE'
def answer(state, state_chatbot, text):
messages = state + [{
'role': 'user',
'content': text
}]
res = openai.ChatCompletion.create(
model='gpt-3.5-turbo',
messages=messages
)
msg = res['choices'][0]['message']['content']
new_state = [{
'role': 'user',
'content': text
}, {
'role': 'assistant',
'content': msg
}]
state = state + new_state
state_chatbot = state_chatbot + [(text, msg)]
print(state)
return state, state_chatbot, state_chatbot
with gr.Blocks(css='#chatbot .overflow-y-auto{height:750px}') as demo:
state = gr.State([{
'role': 'system',
'content': 'You are a helpful assistant.'
}])
state_chatbot = gr.State([])
with gr.Row():
gr.HTML("""<div style="text-align: center; max-width: 500px; margin: 0 auto;">
<div>
<h1>Yunwoong's ChatGPT-3.5</h1>
</div>
<p style="margin-bottom: 10px; font-size: 94%">
Blog <a href="https://yunwoong.tistory.com/">Be Original</a>
</p>
</div>""")
with gr.Row():
chatbot = gr.Chatbot(elem_id='chatbot')
with gr.Row():
txt = gr.Textbox(show_label=False, placeholder='Send a message...').style(container=False)
txt.submit(answer, [state, state_chatbot, txt], [state, state_chatbot, chatbot])
txt.submit(lambda: '', None, txt)
demo.launch(debug=True, share=True) | [
"state + [{\n 'role': 'user',\n 'content': text\n }]",
"You are a helpful assistant."
] |
2024-01-10 | ankitshah009/fastformers | src~transformers~modeling_openai.py | # coding=utf-8
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch OpenAI GPT model."""
import json
import logging
import math
import os
import warnings
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from .activations import gelu_new, swish
from .configuration_openai import OpenAIGPTConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_utils import (
Conv1D,
PreTrainedModel,
SequenceSummary,
find_pruneable_heads_and_indices,
prune_conv1d_layer,
)
logger = logging.getLogger(__name__)
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"openai-gpt",
# See all OpenAI GPT models at https://huggingface.co/models?filter=openai-gpt
]
def load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path):
""" Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
"""
import re
import numpy as np
if ".ckpt" in openai_checkpoint_folder_path:
openai_checkpoint_folder_path = os.path.dirname(openai_checkpoint_folder_path)
logger.info("Loading weights from {}".format(openai_checkpoint_folder_path))
with open(openai_checkpoint_folder_path + "/parameters_names.json", "r", encoding="utf-8") as names_handle:
names = json.load(names_handle)
with open(openai_checkpoint_folder_path + "/params_shapes.json", "r", encoding="utf-8") as shapes_handle:
shapes = json.load(shapes_handle)
offsets = np.cumsum([np.prod(shape) for shape in shapes])
init_params = [np.load(openai_checkpoint_folder_path + "/params_{}.npy".format(n)) for n in range(10)]
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
# This was used when we had a single embedding matrix for positions and tokens
# init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
# del init_params[1]
init_params = [arr.squeeze() for arr in init_params]
try:
assert model.tokens_embed.weight.shape == init_params[1].shape
assert model.positions_embed.weight.shape == init_params[0].shape
except AssertionError as e:
e.args += (model.tokens_embed.weight.shape, init_params[1].shape)
e.args += (model.positions_embed.weight.shape, init_params[0].shape)
raise
model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
model.positions_embed.weight.data = torch.from_numpy(init_params[0])
names.pop(0)
# Pop position and token embedding arrays
init_params.pop(0)
init_params.pop(0)
for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
name = name[6:] # skip "model/"
assert name[-2:] == ":0"
name = name[:-2]
name = name.split("/")
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+\d+", m_name):
scope_names = re.split(r"(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "g":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "b":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "w":
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, scope_names[0])
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
ACT_FNS = {"relu": nn.ReLU, "swish": swish, "gelu": gelu_new}
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False):
super().__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.c_attn = Conv1D(n_state * 3, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.n_head, self.split_size // self.n_head, self.pruned_heads
)
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
self.n_head = self.n_head - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, q, k, v, attention_mask=None, head_mask=None, output_attentions=False):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
# w = w * self.bias + -1e9 * (1 - self.bias) # TF implem method: mask_attn_weights
# XD: self.b may be larger than w, so we need to crop it
b = self.bias[:, :, : w.size(-2), : w.size(-1)]
w = w * b + -1e4 * (1 - b)
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = [torch.matmul(w, v)]
if output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1)
else:
return x.permute(0, 2, 1, 3)
def forward(self, x, attention_mask=None, head_mask=None, output_attentions=False):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
outputs = [a] + attn_outputs[1:]
return outputs # a, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super().__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = ACT_FNS[config.afn]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super().__init__()
nx = config.n_embd
self.attn = Attention(nx, n_ctx, config, scale)
self.ln_1 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
self.mlp = MLP(4 * nx, config)
self.ln_2 = nn.LayerNorm(nx, eps=config.layer_norm_epsilon)
def forward(self, x, attention_mask=None, head_mask=None, output_attentions=False):
attn_outputs = self.attn(
x, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions,
)
a = attn_outputs[0]
n = self.ln_1(x + a)
m = self.mlp(n)
h = self.ln_2(n + m)
outputs = [h] + attn_outputs[1:]
return outputs
class OpenAIGPTPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for downloading and loading pretrained models.
"""
config_class = OpenAIGPTConfig
load_tf_weights = load_tf_weights_in_openai_gpt
base_model_prefix = "transformer"
def _init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding, Conv1D)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, (nn.Linear, Conv1D)) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
OPENAI_GPT_START_DOCSTRING = r"""
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general
usage and behavior.
Parameters:
config (:class:`~transformers.OpenAIGPTConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
OPENAI_GPT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.OpenAIGPTTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.encode_plus` for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`):
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
:obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`, defaults to :obj:`None`):
If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under returned tensors for more detail.
"""
@add_start_docstrings(
"The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top.",
OPENAI_GPT_START_DOCSTRING,
)
class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.tokens_embed = nn.Embedding(config.vocab_size, config.n_embd)
self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
self.init_weights()
def get_input_embeddings(self):
return self.tokens_embed
def set_input_embeddings(self, new_embeddings):
self.tokens_embed = new_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
for layer, heads in heads_to_prune.items():
self.h[layer].attn.prune_heads(heads)
@add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.OpenAIGPTConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the last layer of the model.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import OpenAIGPTTokenizer, OpenAIGPTModel
import torch
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = OpenAIGPTModel.from_pretrained('openai-gpt')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if position_ids is None:
# Code is different from when we had a single embedding matrice from position and token embeddings
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(input_shape[-1], dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# Attention mask.
if attention_mask is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_mask = attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# Prepare head mask if needed
head_mask = self.get_head_mask(head_mask, self.config.n_layer)
if inputs_embeds is None:
inputs_embeds = self.tokens_embed(input_ids)
position_embeds = self.positions_embed(position_ids)
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
token_type_embeds = self.tokens_embed(token_type_ids)
else:
token_type_embeds = 0
hidden_states = inputs_embeds + position_embeds + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
all_attentions = ()
all_hidden_states = ()
for i, block in enumerate(self.h):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
outputs = block(hidden_states, attention_mask, head_mask[i], output_attentions=output_attentions)
hidden_states = outputs[0]
if output_attentions:
all_attentions = all_attentions + (outputs[1],)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states.view(*output_shape),)
outputs = (hidden_states.view(*output_shape),)
if output_hidden_states:
outputs = outputs + (all_hidden_states,)
if output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last hidden state, (all hidden states), (all attentions)
@add_start_docstrings(
"""OpenAI GPT Model transformer with a language modeling head on top
(linear layer with weights tied to the input embeddings). """,
OPENAI_GPT_START_DOCSTRING,
)
class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = OpenAIGPTModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
@add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``labels = input_ids``
Indices are selected in ``[-100, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.OpenAIGPTConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided)
Language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import OpenAIGPTTokenizer, OpenAIGPTLMHeadModel
import torch
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=input_ids)
loss, logits = outputs[:2]
"""
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
outputs = (lm_logits,) + transformer_outputs[1:]
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), lm_logits, (all hidden states), (all attentions)
@add_start_docstrings(
"""OpenAI GPT Model transformer with a language modeling and a multiple-choice classification
head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers.
The language modeling head has its weights tied to the input embeddings,
the classification head takes as input the input of a specified classification token index in the input sequence).
""",
OPENAI_GPT_START_DOCSTRING,
)
class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel):
def __init__(self, config):
super().__init__(config)
config.num_labels = 1
self.transformer = OpenAIGPTModel(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
self.multiple_choice_head = SequenceSummary(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head
@add_start_docstrings_to_callable(OPENAI_GPT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
mc_token_ids=None,
labels=None,
mc_labels=None,
output_attentions=None,
output_hidden_states=None,
**kwargs
):
r"""
mc_token_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input)
Index of the classification token in each input sequence.
Selected in the range ``[0, input_ids.size(-1) - 1]``.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`)
Labels for language modeling.
Note that the labels **are shifted** inside the model, i.e. you can set ``labels = input_ids``
Indices are selected in ``[-1, 0, ..., config.vocab_size]``
All labels set to ``-100`` are ignored (masked), the loss is only
computed for labels in ``[0, ..., config.vocab_size]``
mc_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size)`, `optional`, defaults to :obj:`None`)
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`):
Used to hide legacy arguments that have been deprecated.
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.OpenAIGPTConfig`) and inputs:
lm_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided):
Language modeling loss.
mc_loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`mc_labels` is provided):
Multiple choice classification loss.
lm_prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
mc_prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`):
Prediction scores of the multiple choice classification head (scores for each choice before SoftMax).
past (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers` with each tensor of shape :obj:`(2, batch_size, num_heads, sequence_length, embed_size_per_head)`):
Contains pre-computed hidden-states (key and values in the attention blocks).
Can be used (see `past` input) to speed up sequential decoding. The token ids which have their past given to this model
should not be passed as input ids as they have already been computed.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import OpenAIGPTTokenizer, OpenAIGPTDoubleHeadsModel
import torch
tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt')
model = OpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt')
tokenizer.add_special_tokens({'cls_token': '[CLS]'}) # Add a [CLS] to the vocabulary (we should train it also!)
model.resize_token_embeddings(len(tokenizer))
choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"]
input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
mc_token_ids = torch.tensor([input_ids.size(-1)-1, input_ids.size(-1)-1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, mc_token_ids=mc_token_ids)
lm_prediction_scores, mc_prediction_scores = outputs[:2]
"""
if "lm_labels" in kwargs:
warnings.warn(
"The `lm_labels` argument is deprecated and will be removed in a future version, use `labels` instead.",
DeprecationWarning,
)
labels = kwargs.pop("lm_labels")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
transformer_outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1)
outputs = (lm_logits, mc_logits) + transformer_outputs[1:]
if mc_labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1))
outputs = (loss,) + outputs
if labels is not None:
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
loss_fct = CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
outputs = (loss,) + outputs
return outputs # (lm loss), (mc loss), lm logits, mc logits, (all hidden_states), (attentions)
| [] |
2024-01-10 | Amit0617/lightningGPT | lightningGPT.py | #!/usr/bin/env python3
import openai
import sys
import os
from pyln.client import Plugin
# from lightning import Plugin
plugin = Plugin()
# plugin.add_option("advice", "Read logs and suggest other advices for working with lightning-cli")
# plugin.add_option("help", "Tell you appropriate commands for your query")
class LightningError(Exception):
"""Exception raised when there is an error in the lightningGPT plugin."""
plugin.log("LightningGPT error: {}".format(sys.exc_info()[1]))
@plugin.init()
def init(plugin, options, **kwargs):
if not os.environ['OPENAI_API_KEY']:
raise LightningError("Set your OPENAI_API_KEY in the Secrets in Tools Section of your replit")
sys.exit(1)
else:
plugin.log("OPENAI_API_KEY detected as environment variable ๐ ")
plugin.log("LightningGPT plugin intialized...")
@plugin.method("helpGPT")
def helpGPT(plugin,command=None, *args):
"""Gives you appropriate commands or help for your query"""
text=""
file_paths=[f"{os.path.realpath(os.path.dirname(__file__))}"+"/cheatsheet.md"]
for file_path in file_paths:
with open(file_path, "r", encoding="utf-8") as file:
text += file.read()
query = f"""You are a ligtningNodeGPT, friendly and helpful AI assistant by Amit0617 that provides help with operating lightning nodes for btc. You give thorough answers with command examples if possible.
QUESTION: How to merge tables in pandas?
=========
Content: pandas provides various facilities for easily combining together Series or DataFrame with various kinds of set logic for the indexes and relational algebra functionality in the case of join / merge-type operations.
Source: 28-pl
Content: pandas provides a single function, merge(), as the entry point for all standard database join operations between DataFrame or named Series objects: \n\npandas.merge(left, right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=('_x', '_y'), copy=True, indicator=False, validate=None)
Source: 30-pl
=========
FINAL ANSWER: To merge two tables in pandas, you can use the pd.merge() function. The basic syntax is: \n\npd.merge(left, right, on, how) \n\nwhere left and right are the two tables to merge, on is the column to merge on, and how is the type of merge to perform. \n\nFor example, to merge the two tables df1 and df2 on the column 'id', you can use: \n\npd.merge(df1, df2, on='id', how='inner')
SOURCES: 28-pl 30-pl
QUESTION: How are you?
=========
CONTENT:
SOURCE:
=========
FINAL ANSWER: I am fine, thank you. How are you?
SOURCES:
Question: {command}
=========
{text}
=========
FINAL ANSWER:
"""
response = openai.ChatCompletion.create(
messages=[
{'role': 'system', 'content': 'You answer questions about the lightning'},
{'role': 'user', 'content': query},
],
model='gpt-3.5-turbo',
temperature=0,
)
print(response['choices'][0]['message']['content'])
# if __name__ == '__main__':
plugin.run() | [
"You are a ligtningNodeGPT, friendly and helpful AI assistant by Amit0617 that provides help with operating lightning nodes for btc. You give thorough answers with command examples if possible.\n\nQUESTION: How to merge tables in pandas?\n=========\nContent: pandas provides various facilities for easily combining together Series or DataFrame with various kinds of set logic for the indexes and relational algebra functionality in the case of join / merge-type operations.\nSource: 28-pl\nContent: pandas provides a single function, merge(), as the entry point for all standard database join operations between DataFrame or named Series objects: \n\npandas.merge(left, right, how='inner', on=None, left_on=None, right_on=None, left_index=False, right_index=False, sort=False, suffixes=('_x', '_y'), copy=True, indicator=False, validate=None)\nSource: 30-pl\n=========\nFINAL ANSWER: To merge two tables in pandas, you can use the pd.merge() function. The basic syntax is: \n\npd.merge(left, right, on, how) \n\nwhere left and right are the two tables to merge, on is the column to merge on, and how is the type of merge to perform. \n\nFor example, to merge the two tables df1 and df2 on the column 'id', you can use: \n\npd.merge(df1, df2, on='id', how='inner')\nSOURCES: 28-pl 30-pl\n\nQUESTION: How are you?\n=========\nCONTENT:\nSOURCE:\n=========\nFINAL ANSWER: I am fine, thank you. How are you?\nSOURCES:\n\nQuestion: PLACEHOLDER\n=========\ntexte053f106-2cb7-4d19-ba7f-5856cbf5a2f6\n=========\nFINAL ANSWER:\n\n",
"You answer questions about the lightning"
] |
2024-01-10 | anitha8242/OpenChat | dj_backend_server~api~utils~make_chain.py | from langchain.vectorstores.base import VectorStore
from dotenv import load_dotenv
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
from langchain.memory import ConversationBufferMemory
from api.utils.get_openai_llm import get_llm
from langchain import PromptTemplate, LLMChain
from langchain.chains import RetrievalQAWithSourcesChain, ConversationalRetrievalChain
from api.utils.get_prompts import get_qa_prompt_by_mode
load_dotenv()
def get_qa_chain(vector_store: VectorStore, mode, initial_prompt: str) -> RetrievalQA:
llm = get_llm()
template = get_qa_prompt_by_mode(mode, initial_prompt=initial_prompt)
prompt = PromptTemplate.from_template(template)
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=vector_store.as_retriever(),
chain_type_kwargs={"prompt": prompt},
return_source_documents=True
)
return qa_chain
def getRetrievalQAWithSourcesChain(vector_store: VectorStore, mode, initial_prompt: str):
llm = get_llm()
chain = RetrievalQAWithSourcesChain.from_chain_type(llm, chain_type="stuff", retriever=vector_store.as_retriever())
return chain
def getConversationRetrievalChain(vector_store: VectorStore, mode, initial_prompt: str):
llm = get_llm()
template = get_qa_prompt_by_mode(mode, initial_prompt=initial_prompt)
prompt = PromptTemplate.from_template(template)
chain = ConversationalRetrievalChain.from_llm(
llm,
chain_type="stuff",
retriever=vector_store.as_retriever(),
verbose=True,
combine_docs_chain_kwargs={"prompt": prompt}
)
return chain | [] |
2024-01-10 | anitha8242/OpenChat | dj_backend_server~api~views~views_chat.py | from django.http import JsonResponse
from django.views.decorators.http import require_POST
from langchain import QAWithSourcesChain
from api.utils import get_vector_store
from api.utils.make_chain import getConversationRetrievalChain, getRetrievalQAWithSourcesChain
import json
from django.views.decorators.csrf import csrf_exempt
from api.interfaces import StoreOptions
from web.models.chat_histories import ChatHistory
from django.shortcuts import get_object_or_404
from web.models.chatbot import Chatbot
from uuid import uuid4
import logging
import traceback
from web.services.chat_history_service import get_chat_history_for_retrieval_chain
import os
from dotenv import load_dotenv
load_dotenv()
logger = logging.getLogger(__name__)
@csrf_exempt
@require_POST
def chat(request):
try:
body = json.loads(request.body.decode('utf-8'))
question = body.get('question')
namespace = body.get('namespace')
mode = body.get('mode')
initial_prompt = body.get('initial_prompt')
token = body.get('token')
session_id = body.get('session_id')
bot = get_object_or_404(Chatbot, token=token)
if not question:
return JsonResponse({'error': 'No question in the request'}, status=400)
sanitized_question = question.strip().replace('\n', ' ')
vector_store = get_vector_store(StoreOptions(namespace=namespace))
response_text = get_completion_response(vector_store=vector_store, initial_prompt=initial_prompt,mode=mode, sanitized_question=sanitized_question, session_id=session_id)
ChatHistory.objects.bulk_create([
ChatHistory(
id=uuid4(),
chatbot_id=bot.id,
from_user=True,
message=sanitized_question,
session_id=session_id
),
ChatHistory(
id=uuid4(),
chatbot_id=bot.id,
from_user=False,
message=response_text,
session_id=session_id
)
])
return JsonResponse({'text': response_text})
except json.JSONDecodeError:
return JsonResponse({'error': 'Invalid JSON in request body'}, status=400)
except Chatbot.DoesNotExist:
return JsonResponse({'error': 'Chatbot not found'}, status=404)
except Exception as e:
logger.error(str(e))
logger.error(traceback.format_exc())
return JsonResponse({'error': 'An error occurred'}, status=500)
def get_completion_response(vector_store, mode, initial_prompt, sanitized_question, session_id):
chain_type = os.getenv("CHAIN_TYPE", "conversation_retrieval")
chain: QAWithSourcesChain
if chain_type == 'retrieval_qa':
chain = getRetrievalQAWithSourcesChain(vector_store, mode, initial_prompt)
response = chain({"question": sanitized_question}, return_only_outputs=True)
response_text = response['answer']
elif chain_type == 'conversation_retrieval':
chain = getConversationRetrievalChain(vector_store, mode, initial_prompt)
chat_history = get_chat_history_for_retrieval_chain(session_id, limit=40)
response = chain({"question": sanitized_question, "chat_history": chat_history}, return_only_outputs=True)
response_text = response['answer']
return response_text
| [
"initial_prompt"
] |
2024-01-10 | anitha8242/OpenChat | dj_backend_server~api~utils~get_openai_llm.py | from langchain.llms import AzureOpenAI, OpenAI
import os
from dotenv import load_dotenv
from langchain.llms import LlamaCpp
load_dotenv()
from langchain import PromptTemplate, LLMChain
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
def get_llama_llm():
n_gpu_layers = 1 # Metal set to 1 is enough.
n_batch = 512 # Should be between 1 and n_ctx, consider the amount of RAM of your Apple Silicon Chip.
# Callbacks support token-wise streaming
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
llm = LlamaCpp(
model_path="llama-2-7b-chat.ggmlv3.q4_K_M.bin",
n_gpu_layers=n_gpu_layers,
n_batch=n_batch,
n_ctx=4096,
f16_kv=True, # MUST set to True, otherwise you will run into problem after a couple of calls
callback_manager=callback_manager,
verbose=True,
temperature=0.2,
)
return llm
# Azure OpenAI Language Model client
def get_azure_openai_llm():
"""Returns AzureOpenAI instance configured from environment variables"""
openai_api_type = os.environ['OPENAI_API_TYPE']
openai_api_key = os.environ['AZURE_OPENAI_API_KEY']
openai_deployment_name = os.environ['AZURE_OPENAI_DEPLOYMENT_NAME']
openai_model_name = os.environ['AZURE_OPENAI_COMPLETION_MODEL']
openai_api_version = os.environ['AZURE_OPENAI_API_VERSION']
openai_api_base=os.environ['AZURE_OPENAI_API_BASE']
return AzureOpenAI(
openai_api_base=openai_api_base,
openai_api_key=openai_api_key,
deployment_name=openai_deployment_name,
model_name=openai_model_name,
openai_api_type=openai_api_type,
openai_api_version=openai_api_version,
temperature=0,
batch_size=8
)
# OpenAI Language Model client
def get_openai_llm():
"""Returns OpenAI instance configured from environment variables"""
openai_api_key = os.environ['OPENAI_API_KEY']
return OpenAI(
temperature=0,
openai_api_key=openai_api_key
)
# recommend not caching initially, and optimizing only if you observe a clear performance benefit from caching the clients.
# The simplest thing that works is often best to start.
def get_llm():
"""Returns LLM client instance based on OPENAI_API_TYPE"""
clients = {
'azure': get_azure_openai_llm,
'openai': get_openai_llm,
'llama2': get_llama_llm
}
api_type = os.environ.get('OPENAI_API_TYPE')
if api_type not in clients:
raise ValueError(f"Invalid OPENAI_API_TYPE: {api_type}")
return clients[api_type]() | [] |
2024-01-10 | Sphincz/FenixGPT | scripts~parser~py2doc.py | import ast
import os
from pathlib import Path
import tiktoken
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
def find_files(directory):
files_list = []
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith('.py'):
files_list.append(os.path.join(root, file))
return files_list
def extract_functions(file_path):
with open(file_path, 'r') as file:
source_code = file.read()
functions = {}
tree = ast.parse(source_code)
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef):
func_name = node.name
func_def = ast.get_source_segment(source_code, node)
functions[func_name] = func_def
return functions
def extract_classes(file_path):
with open(file_path, 'r') as file:
source_code = file.read()
classes = {}
tree = ast.parse(source_code)
for node in ast.walk(tree):
if isinstance(node, ast.ClassDef):
class_name = node.name
function_names = []
for subnode in ast.walk(node):
if isinstance(subnode, ast.FunctionDef):
function_names.append(subnode.name)
classes[class_name] = ", ".join(function_names)
return classes
def extract_functions_and_classes(directory):
files = find_files(directory)
functions_dict = {}
classes_dict = {}
for file in files:
functions = extract_functions(file)
if functions:
functions_dict[file] = functions
classes = extract_classes(file)
if classes:
classes_dict[file] = classes
return functions_dict, classes_dict
def parse_functions(functions_dict, formats, dir):
c1 = len(functions_dict)
for i, (source, functions) in enumerate(functions_dict.items(), start=1):
print(f"Processing file {i}/{c1}")
source_w = source.replace(dir + "/", "").replace("." + formats, ".md")
subfolders = "/".join(source_w.split("/")[:-1])
Path(f"outputs/{subfolders}").mkdir(parents=True, exist_ok=True)
for j, (name, function) in enumerate(functions.items(), start=1):
print(f"Processing function {j}/{len(functions)}")
prompt = PromptTemplate(
input_variables=["code"],
template="Code: \n{code}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(code=function))
mode = "a" if Path(f"outputs/{source_w}").exists() else "w"
with open(f"outputs/{source_w}", mode) as f:
f.write(
f"\n\n# Function name: {name} \n\nFunction: \n```\n{function}\n```, \nDocumentation: \n{response}")
def parse_classes(classes_dict, formats, dir):
c1 = len(classes_dict)
for i, (source, classes) in enumerate(classes_dict.items()):
print(f"Processing file {i + 1}/{c1}")
source_w = source.replace(dir + "/", "").replace("." + formats, ".md")
subfolders = "/".join(source_w.split("/")[:-1])
Path(f"outputs/{subfolders}").mkdir(parents=True, exist_ok=True)
for name, function_names in classes.items():
print(f"Processing Class {i + 1}/{c1}")
prompt = PromptTemplate(
input_variables=["class_name", "functions_names"],
template="Class name: {class_name} \nFunctions: {functions_names}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(class_name=name, functions_names=function_names))
with open(f"outputs/{source_w}", "a" if Path(f"outputs/{source_w}").exists() else "w") as f:
f.write(f"\n\n# Class name: {name} \n\nFunctions: \n{function_names}, \nDocumentation: \n{response}")
def transform_to_docs(functions_dict, classes_dict, formats, dir):
docs_content = ''.join([str(key) + str(value) for key, value in functions_dict.items()])
docs_content += ''.join([str(key) + str(value) for key, value in classes_dict.items()])
num_tokens = len(tiktoken.get_encoding("cl100k_base").encode(docs_content))
total_price = ((num_tokens / 1000) * 0.02)
print(f"Number of Tokens = {num_tokens:,d}")
print(f"Approx Cost = ${total_price:,.2f}")
user_input = input("Price Okay? (Y/N)\n").lower()
if user_input == "y" or user_input == "":
if not Path("outputs").exists():
Path("outputs").mkdir()
parse_functions(functions_dict, formats, dir)
parse_classes(classes_dict, formats, dir)
print("All done!")
else:
print("The API was not called. No money was spent.")
| [
"Code: \n{code}, \nDocumentation: ",
"functions_names",
"class_name",
"Class name: {class_name} \nFunctions: {functions_names}, \nDocumentation: "
] |
2024-01-10 | Sphincz/FenixGPT | scripts~code_docs_gen.py | import ast
import json
from pathlib import Path
import dotenv
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
dotenv.load_dotenv()
ps = list(Path("inputs").glob("**/*.py"))
data = []
sources = []
for p in ps:
with open(p) as f:
data.append(f.read())
sources.append(p)
def get_functions_in_class(node):
functions = []
functions_code = []
for child in node.body:
if isinstance(child, ast.FunctionDef):
functions.append(child.name)
functions_code.append(ast.unparse(child))
return functions, functions_code
def get_classes_and_functions(source_code):
tree = ast.parse(source_code)
classes = {}
for node in tree.body:
if isinstance(node, ast.ClassDef):
class_name = node.name
function_name, function = get_functions_in_class(node)
# join function name and function code
functions = dict(zip(function_name, function))
classes[class_name] = functions
return classes
structure_dict = {}
c1 = 0
for code in data:
classes = get_classes_and_functions(ast.parse(code))
source = str(sources[c1])
structure_dict[source] = classes
c1 += 1
# save the structure dict as json
with open('structure_dict.json', 'w') as f:
json.dump(structure_dict, f)
if not Path("outputs").exists():
Path("outputs").mkdir()
c1 = len(structure_dict)
c2 = 0
for source, classes in structure_dict.items():
c2 += 1
print(f"Processing file {c2}/{c1}")
f1 = len(classes)
f2 = 0
for class_name, functions in classes.items():
f2 += 1
print(f"Processing class {f2}/{f1}")
source_w = source.replace("inputs/", "")
source_w = source_w.replace(".py", ".txt")
if not Path(f"outputs/{source_w}").exists():
with open(f"outputs/{source_w}", "w") as f:
f.write(f"Class: {class_name}")
else:
with open(f"outputs/{source_w}", "a") as f:
f.write(f"\n\nClass: {class_name}")
# append class name to the front
for function in functions:
b1 = len(functions)
b2 = 0
print(f"Processing function {b2}/{b1}")
b2 += 1
prompt = PromptTemplate(
input_variables=["code"],
template="Code: \n{code}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(code=functions[function]))
if not Path(f"outputs/{source_w}").exists():
with open(f"outputs/{source_w}", "w") as f:
f.write(f"Function: {functions[function]}, \nDocumentation: {response}")
else:
with open(f"outputs/{source_w}", "a") as f:
f.write(f"\n\nFunction: {functions[function]}, \nDocumentation: {response}")
| [
"Code: \n{code}, \nDocumentation: "
] |
2024-01-10 | Sphincz/FenixGPT | application~parser~py2doc.py | import ast
import os
from pathlib import Path
import tiktoken
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
def find_files(directory):
files_list = []
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith('.py'):
files_list.append(os.path.join(root, file))
return files_list
def extract_functions(file_path):
with open(file_path, 'r') as file:
source_code = file.read()
functions = {}
tree = ast.parse(source_code)
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef):
func_name = node.name
func_def = ast.get_source_segment(source_code, node)
functions[func_name] = func_def
return functions
def extract_classes(file_path):
with open(file_path, 'r') as file:
source_code = file.read()
classes = {}
tree = ast.parse(source_code)
for node in ast.walk(tree):
if isinstance(node, ast.ClassDef):
class_name = node.name
function_names = []
for subnode in ast.walk(node):
if isinstance(subnode, ast.FunctionDef):
function_names.append(subnode.name)
classes[class_name] = ", ".join(function_names)
return classes
def extract_functions_and_classes(directory):
files = find_files(directory)
functions_dict = {}
classes_dict = {}
for file in files:
functions = extract_functions(file)
if functions:
functions_dict[file] = functions
classes = extract_classes(file)
if classes:
classes_dict[file] = classes
return functions_dict, classes_dict
def parse_functions(functions_dict, formats, dir):
c1 = len(functions_dict)
for i, (source, functions) in enumerate(functions_dict.items(), start=1):
print(f"Processing file {i}/{c1}")
source_w = source.replace(dir + "/", "").replace("." + formats, ".md")
subfolders = "/".join(source_w.split("/")[:-1])
Path(f"outputs/{subfolders}").mkdir(parents=True, exist_ok=True)
for j, (name, function) in enumerate(functions.items(), start=1):
print(f"Processing function {j}/{len(functions)}")
prompt = PromptTemplate(
input_variables=["code"],
template="Code: \n{code}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(code=function))
mode = "a" if Path(f"outputs/{source_w}").exists() else "w"
with open(f"outputs/{source_w}", mode) as f:
f.write(
f"\n\n# Function name: {name} \n\nFunction: \n```\n{function}\n```, \nDocumentation: \n{response}")
def parse_classes(classes_dict, formats, dir):
c1 = len(classes_dict)
for i, (source, classes) in enumerate(classes_dict.items()):
print(f"Processing file {i + 1}/{c1}")
source_w = source.replace(dir + "/", "").replace("." + formats, ".md")
subfolders = "/".join(source_w.split("/")[:-1])
Path(f"outputs/{subfolders}").mkdir(parents=True, exist_ok=True)
for name, function_names in classes.items():
print(f"Processing Class {i + 1}/{c1}")
prompt = PromptTemplate(
input_variables=["class_name", "functions_names"],
template="Class name: {class_name} \nFunctions: {functions_names}, \nDocumentation: ",
)
llm = OpenAI(temperature=0)
response = llm(prompt.format(class_name=name, functions_names=function_names))
with open(f"outputs/{source_w}", "a" if Path(f"outputs/{source_w}").exists() else "w") as f:
f.write(f"\n\n# Class name: {name} \n\nFunctions: \n{function_names}, \nDocumentation: \n{response}")
def transform_to_docs(functions_dict, classes_dict, formats, dir):
docs_content = ''.join([str(key) + str(value) for key, value in functions_dict.items()])
docs_content += ''.join([str(key) + str(value) for key, value in classes_dict.items()])
num_tokens = len(tiktoken.get_encoding("cl100k_base").encode(docs_content))
total_price = ((num_tokens / 1000) * 0.02)
print(f"Number of Tokens = {num_tokens:,d}")
print(f"Approx Cost = ${total_price:,.2f}")
user_input = input("Price Okay? (Y/N)\n").lower()
if user_input == "y" or user_input == "":
if not Path("outputs").exists():
Path("outputs").mkdir()
parse_functions(functions_dict, formats, dir)
parse_classes(classes_dict, formats, dir)
print("All done!")
else:
print("The API was not called. No money was spent.")
| [
"Class name: {class_name} \nFunctions: {functions_names}, \nDocumentation: ",
"functions_names",
"class_name",
"Code: \n{code}, \nDocumentation: "
] |
2024-01-10 | KinXY/CommandlineGPT | historyManager.py | import openai
import json
import os
import prompts
import chat
import openai_parameters
openai.api_key = json.loads(open("./config.json", "r").read())["api_key"]
def chooseHistory(myChat):
# open the json file of all history
with open("./history.json", "r") as f:
historyList = json.loads(f.read())
# print all the history with summary, with index
historyIndex = 1
for i in historyList:
print(str(historyIndex) + i["file"] + " : " + i["summary"])
historyIndex += 1
print()
# ask the user to choose a history
while True:
print("Please enter the index of the history you want to load: ")
historyIndex = int(input("Input 0 if you want to start a new chat:"))
if historyIndex > len(historyList) or historyIndex < 0:
print("\033[91m" + "Invalid index!" + "\033[0m")
else:
break
# if the user want to start a new chat
if historyIndex == 0:
return
else:
loadDialogue(myChat, historyList[historyIndex - 1]["file"])
def loadDialogue(myChat, title):
myChat.title = title
with open(title, "r") as f:
listDialogue = json.loads(f.read())
myChat.chatHead = chat.dictToTree(listDialogue)
myChat.refreshEnd()
myChat.refreshHistory()
def summarizeHistory(myChat):
myHistory = myChat.history.copy()
myHistory.append({"role": "user", "content": prompts.summary_prompt})
try:
summary = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages = myHistory,
max_tokens = openai_parameters.max_tokens,
temperature = openai_parameters.temperature,
)
except:
print("\033[91m" + "OpenAI API Error!\nUse previous summary." + "\033[0m")
return ""
return summary.choices[0].message.content
def dumpHistory(myChat):
if os.path.getsize("./history.json") == 0:
nowHistory = []
else:
with open("./history.json", "r") as f:
nowHistory = json.loads(f.read())
# delete the original json file
summaryBackup = ""
if myChat.title != "":
os.system("rm " + myChat.title)
for i in range(len(nowHistory)):
if nowHistory[i]["file"] == myChat.title:
summaryBackup = nowHistory[i]["summary"]
nowHistory.pop(i)
break
# dumps the history to a json file
myChat.dumpHistory()
# summarize the history
summary = summarizeHistory(myChat)
if summary == "":
if summaryBackup != "":
summary = summaryBackup
else:
summary = "No summary"
# update the history.json
nowHistory.append({"file" : myChat.title, "summary" : summary})
with open("./history.json", "w") as f:
f.write(json.dumps(nowHistory))
| [] |
2024-01-10 | KinXY/CommandlineGPT | chatManager.py | import openai
import json
import sys
import historyManager
import chatFunction
import values
import openai_parameters
openai.api_key = json.loads(open("./config.json", "r").read())["api_key"]
def userInput(myChat):
user_input = input("You: ")
match user_input.lower():
case "exit"|"quit"|"bye":
historyManager.dumpHistory(myChat)
print("ChatGPT: Goodbye!")
return values.BYE
case "\t":
return chatFunction.redirect(myChat)
case "\t\t":
return chatFunction.generateAgain(myChat)
case "help":
printHelpMessage()
case _:
myChat.addUserMessage(user_input)
myChat.history.append({"role": "user", "content": user_input})
return values.INPUT
def GPTResponse(myChat):
try:
response = openai.ChatCompletion.create(
model = "gpt-3.5-turbo",
messages = myChat.history,
max_tokens = openai_parameters.max_tokens,
temperature = openai_parameters.temperature,
)
except:
print("\033[91m" + "OpenAI API Error!" + "\033[0m")
historyManager.dumpHistory(myChat)
print("ChatGPT: Goodbye!")
sys.exit()
myChat.addGPTMessage(response.choices[0].message.content)
myChat.history.append({"role": "assistant", "content": response.choices[0].message.content})
def printHelpMessage():
# print help message in purple
print("\033[95m" + "Help:" + "\033[0m")
print("\033[95m" + "1. Enter \"exit\" or \"quit\" or \"bye\" to exit the program." + "\033[0m")
print("\033[95m" + "2. Enter \"\t\" to redirect one response or user input in the conversation." + "\033[0m")
print("\033[95m" + "3. Enter \"\t\t\" to regenerate response for the last user input." + "\033[0m")
input() | [] |
2024-01-10 | lcharleslaing/english-to-any-language-app | translate_app.py | import os
import json
from dotenv import load_dotenv
import openai
import streamlit as st
# Load the .env file
load_dotenv()
openai.api_key = os.getenv("OPENAI_API_KEY")
def translate(text, target_language):
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{
"role": "system",
"content": "You are a helpful assistant that translates English to various languages.",
},
{
"role": "user",
"content": f"Translate the following English text to {target_language}: {text}",
},
],
)
return response["choices"][0]["message"]["content"] # type: ignore
def load_settings():
try:
with open("settings.json", "r") as f:
return json.load(f)
except FileNotFoundError:
return {
"default_language": "Tagalog-Filipino",
"history": [],
"openai_api_key": "",
}
def save_settings(settings):
with open("settings.json", "w") as f:
json.dump(settings, f)
def main():
st.title("English to Multiple Language Translator")
# Load settings
settings = load_settings()
openai.api_key = settings.get("openai_api_key", os.getenv("OPENAI_API_KEY"))
if not openai.api_key:
openai.api_key = st.text_input("Please enter your OpenAI API Key")
settings["openai_api_key"] = openai.api_key
save_settings(settings)
languages = [
"Afrikaans",
"Albanian",
"Amharic",
"Arabic",
"Armenian",
"Basque",
"Bengali",
"Byelorussian",
"Burmese",
"Bulgarian",
"Catalan",
"Czech",
"Chinese",
"Croatian",
"Danish",
"Dari",
"Dzongkha",
"Dutch",
"English",
"Esperanto",
"Estonian",
"Faroese",
"Farsi",
"Finnish",
"French",
"Gaelic",
"Galician",
"German",
"Greek",
"Hebrew",
"Hindi",
"Hungarian",
"Icelandic",
"Indonesian",
"Inuktitut (Eskimo)",
"Italian",
"Japanese",
"Khmer",
"Korean",
"Kurdish",
"Laotian",
"Latvian",
"Lappish",
"Lithuanian",
"Macedonian",
"Malay",
"Maltese",
"Nepali",
"Norwegian",
"Pashto",
"Polish",
"Portuguese",
"Romanian",
"Russian",
"Scots",
"Serbian",
"Slovak",
"Slovenian",
"Somali",
"Spanish",
"Swedish",
"Swahili",
"Tagalog-Filipino",
"Tajik",
"Tamil",
"Thai",
"Tibetan",
"Tigrinya",
"Tongan",
"Turkish",
"Turkmen",
"Ucrainian",
"Urdu",
"Uzbek",
]
default_language = settings["default_language"]
language = st.selectbox(
"Choose Language",
languages,
index=languages.index(default_language),
key="language",
)
if st.button("Set Default Language"):
settings["default_language"] = language
save_settings(settings)
st.success(f"Default language set to {language}")
with st.form(key="translation_form"):
text = st.text_area("Enter your text:", key="input_text")
submit_button = st.form_submit_button(label="Translate")
if submit_button:
translation = translate(text, language)
st.text_area("Translation:", value=translation, key="translation")
# Append to history
settings["history"].append(
{
"text": text,
"translation": translation,
"language": language,
}
)
# Save history
save_settings(settings)
clear_button = st.button("Clear Input")
if clear_button:
st.experimental_rerun()
# Display history
if settings["history"]:
st.subheader("Translation History")
for item in reversed(
settings["history"]
): # Display the latest translation at the top
st.markdown(f"**Text**: {item['text']}")
st.markdown(f"**Translation**: {item['translation']}")
st.markdown(f"**Language**: {item['language']}")
st.write("---")
if __name__ == "__main__":
main()
| [
"You are a helpful assistant that translates English to various languages.",
"Translate the following English text to PLACEHOLDER: PLACEHOLDER"
] |
2024-01-10 | bevanhunt/cyberchipped | cyberchipped~settings.py | import os
from typing import TYPE_CHECKING, Any, Optional, Union
from dotenv import load_dotenv
from pydantic import Field, SecretStr
from pydantic_settings import BaseSettings, SettingsConfigDict
if TYPE_CHECKING:
from openai import AsyncClient, Client
from openai.types.chat import ChatCompletion
load_dotenv(dotenv_path=os.path.join(os.getcwd(), ".env"))
class Settings(BaseSettings):
model_config = SettingsConfigDict(
extra="allow",
arbitrary_types_allowed=True,
)
def __setattr__(self, name: str, value: Any) -> None:
"""Preserve SecretStr type when setting values."""
field = self.model_fields.get(name)
if field:
annotation = field.annotation
base_types = (
annotation.__args__
if getattr(annotation, "__origin__", None) is Union
else (annotation,)
)
if SecretStr in base_types and not isinstance(value, SecretStr):
value = SecretStr(value)
super().__setattr__(name, value)
class ModelSettings(Settings):
model: str
@property
def encoder(self):
import tiktoken
return tiktoken.encoding_for_model(self.model).encode
class ChatCompletionSettings(ModelSettings):
model: str = Field(
default="gpt-3.5-turbo-1106",
description="The default chat model to use.",
)
async def acreate(self, **kwargs: Any) -> "ChatCompletion":
from cyberchipped.settings import settings
return await settings.openai.async_client.chat.completions.create(
model=self.model, **kwargs
)
def create(self, **kwargs: Any) -> "ChatCompletion":
from cyberchipped.settings import settings
return settings.openai.client.chat.completions.create(
model=self.model, **kwargs
)
class AssistantSettings(ModelSettings):
model: str = Field(
default="gpt-3.5-turbo-1106",
description="The default assistant model to use.",
)
class ChatSettings(Settings):
completions: ChatCompletionSettings = Field(default_factory=ChatCompletionSettings)
class OpenAISettings(Settings):
model_config = SettingsConfigDict(env_prefix="_openai_")
api_key: Optional[SecretStr] = Field(
default=os.getenv("OPENAI_API_KEY"),
description="Your OpenAI API key.",
)
organization: Optional[str] = Field(
default=None,
description="Your OpenAI organization ID.",
)
chat: ChatSettings = Field(default_factory=ChatSettings)
assistants: AssistantSettings = Field(default_factory=AssistantSettings)
@property
def async_client(
self, api_key: Optional[str] = None, **kwargs: Any
) -> "AsyncClient":
from openai import AsyncClient
if not (api_key or self.api_key):
raise ValueError("No API key provided.")
elif not api_key and self.api_key:
api_key = self.api_key.get_secret_value()
return AsyncClient(
api_key=api_key,
organization=self.organization,
**kwargs,
)
@property
def client(self, api_key: Optional[str] = None, **kwargs: Any) -> "Client":
from openai import Client
if not (api_key or self.api_key):
raise ValueError("No API key provided.")
elif not api_key and self.api_key:
api_key = self.api_key.get_secret_value()
return Client(
api_key=api_key,
organization=self.organization,
**kwargs,
)
class Settings(Settings):
model_config = SettingsConfigDict(env_prefix="_")
openai: OpenAISettings = Field(default_factory=OpenAISettings)
log_level: str = Field(
default="DEBUG",
description="The log level to use.",
)
settings = Settings()
| [] |
2024-01-10 | bevanhunt/cyberchipped | cyberchipped~assistants~formatting.py | from datetime import datetime
from openai.types.beta.threads import ThreadMessage
from rich import box
from rich.console import Console
from rich.panel import Panel
def pprint_message(message: ThreadMessage):
"""
Pretty-prints a single message using the rich library, highlighting the
speaker's role, the message text, any available images, and the message
timestamp in a panel format.
Args:
message (dict): A message object as described in the API documentation.
"""
console = Console()
role_colors = {
"user": "green",
"assistant": "blue",
}
color = role_colors.get(message.role, "red")
timestamp = datetime.fromtimestamp(message.created_at).strftime("%l:%M:%S %p")
content = ""
for item in message.content:
if item.type == "text":
content += item.text.value + "\n\n"
# Create the panel for the message
panel = Panel(
content.strip(),
title=f"[bold]{message.role.capitalize()}[/]",
subtitle=f"[italic]{timestamp}[/]",
title_align="left",
subtitle_align="right",
border_style=color,
box=box.ROUNDED,
# highlight=True,
width=100, # Fixed width for all panels
expand=True, # Panels always expand to the width of the console
padding=(1, 2),
)
# Printing the panel
console.print(panel)
def pprint_messages(messages: list[ThreadMessage]):
for message in messages:
pprint_message(message)
| [] |
2024-01-10 | bevanhunt/cyberchipped | cyberchipped~components~ai_listen.py | from typing import Literal
from cyberchipped.utilities.openai import get_client
from openai._types import FileTypes
async def ai_listen(
file: FileTypes,
response_format: Literal["json", "text", "srt", "verbose_json", "vtt"] = "text",
):
client = get_client()
response = await client.audio.transcriptions.create(
file=file,
model="whisper-1",
response_format=response_format,
)
return response
| [] |
2024-01-10 | bevanhunt/cyberchipped | cyberchipped~assistants~threads.py | from typing import TYPE_CHECKING, Optional
from openai.types.beta.threads import ThreadMessage
from pydantic import BaseModel, Field
from cyberchipped.utilities.asyncio import (
ExposeSyncMethodsMixin,
expose_sync_method,
)
from cyberchipped.utilities.logging import get_logger
from cyberchipped.utilities.openai import get_client
from cyberchipped.utilities.pydantic import parse_as
logger = get_logger("Threads")
if TYPE_CHECKING:
from .assistants import Assistant
from .runs import Run
class Thread(BaseModel, ExposeSyncMethodsMixin):
id: Optional[str] = None
metadata: dict = {}
messages: list[ThreadMessage] = Field([], repr=False)
@expose_sync_method("get")
async def get_async(self):
"""
Gets a thread.
"""
client = get_client()
response = await client.beta.threads.retrieve(thread_id=self.id)
self.id = response.id
return self
@expose_sync_method("create")
async def create_async(self=None):
"""
Creates a thread.
"""
client = get_client()
response = await client.beta.threads.create()
self.id = response.id
return self
@expose_sync_method("add")
async def add_async(
self, message: str, file_paths: Optional[list[str]] = None
) -> ThreadMessage:
"""
Add a user message to the thread.
"""
client = get_client()
# Create the message with the attached files
response = await client.beta.threads.messages.create(
thread_id=self.id,
role="user",
content=message,
)
return ThreadMessage.model_validate(response.model_dump())
@expose_sync_method("get_messages")
async def get_messages_async(
self,
limit: int = None,
before_message: Optional[str] = None,
after_message: Optional[str] = None,
):
client = get_client()
response = await client.beta.threads.messages.list(
thread_id=self.id,
# note that because messages are returned in descending order,
# we reverse "before" and "after" to the API
before=after_message,
after=before_message,
limit=limit,
order="desc",
)
return parse_as(list[ThreadMessage], reversed(response.model_dump()["data"]))
@expose_sync_method("delete")
async def delete_async(self):
client = get_client()
await client.beta.threads.delete(thread_id=self.id)
self.id = None
@expose_sync_method("cancel_run")
async def cancel_run_async(
self,
assistant: "Assistant",
**run_kwargs,
) -> "Run":
"""
Cancels the run of this thread with the provided assistant.
"""
from cyberchipped.assistants.runs import Run
run = Run(assistant=assistant, thread=self, **run_kwargs)
return await run.cancel_async()
@expose_sync_method("say")
async def say_async(self, text: str, assistant: "Assistant") -> str:
"""
Wraps the full process of adding a message to the thread and running it
"""
from cyberchipped.assistants.runs import Run
try:
await self.cancel_run_async(assistant=assistant)
except Exception:
pass
await self.add_async(text)
run = Run(assistant=assistant, thread=self)
await run.run_async()
messages = await self.get_messages_async()
last_message = messages[-1]
ai_message = last_message.content[0].text.value
return ai_message
| [] |
2024-01-10 | bevanhunt/cyberchipped | cyberchipped~components~ai_function.py | import asyncio
import inspect
import json
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Generic,
Optional,
TypeVar,
Union,
overload,
)
from pydantic import BaseModel, Field, ValidationError
from typing_extensions import ParamSpec, Self
from cyberchipped.components.prompt import PromptFunction
from cyberchipped.serializers import create_tool_from_type
from cyberchipped.utilities.asyncio import (
ExposeSyncMethodsMixin,
expose_sync_method,
run_async,
)
from cyberchipped.utilities.jinja import (
BaseEnvironment,
)
from cyberchipped.utilities.logging import get_logger
if TYPE_CHECKING:
from openai.types.chat import ChatCompletion
T = TypeVar("T")
P = ParamSpec("P")
class AIFunction(BaseModel, Generic[P, T], ExposeSyncMethodsMixin):
fn: Optional[Callable[P, T]] = None
environment: Optional[BaseEnvironment] = None
prompt: Optional[str] = Field(default=inspect.cleandoc("""
Your job is to generate likely outputs for a Python function with the
following signature and docstring:
{{_source_code}}
The user will provide function inputs (if any) and you must respond with
the most likely result.
user: The function was called with the following inputs:
{%for (arg, value) in _arguments.items()%}
- {{ arg }}: {{ value }}
{% endfor %}
What is its output?
"""))
name: str = "FormatResponse"
description: str = "Formats the response."
field_name: str = "data"
field_description: str = "The data to format."
render_kwargs: dict[str, Any] = Field(default_factory=dict)
create: Optional[Callable[..., "ChatCompletion"]] = Field(default=None)
def __call__(self, *args: P.args, **kwargs: P.kwargs) -> Union[T, Awaitable[T]]:
if self.fn is None:
raise NotImplementedError
from cyberchipped import settings
logger = get_logger("cyberchipped.ai_fn")
logger.debug_kv(
"AI Function Call",
f"Calling {self.fn.__name__} with {args} and {kwargs}",
"blue",
)
is_async_fn = asyncio.iscoroutinefunction(self.fn)
call = "async_call" if is_async_fn else "sync_call"
create = (
self.create or settings.openai.chat.completions.acreate
if is_async_fn
else settings.openai.chat.completions.create
)
result = getattr(self, call)(create, *args, **kwargs)
logger.debug_kv("AI Function Call", f"Returned {result}", "blue")
return result
async def async_call(
self, acreate: Callable[..., Awaitable[Any]], *args: P.args, **kwargs: P.kwargs
) -> T:
_response = await acreate(**self.as_prompt(*args, **kwargs).serialize())
return self.parse(_response)
def sync_call(
self, create: Callable[..., Any], *args: P.args, **kwargs: P.kwargs
) -> T:
_response = create(**self.as_prompt(*args, **kwargs).serialize())
return self.parse(_response)
def parse(self, response: "ChatCompletion") -> T:
tool_calls = response.choices[0].message.tool_calls
if tool_calls is None:
raise NotImplementedError
if self.fn is None:
raise NotImplementedError
arguments = tool_calls[0].function.arguments
tool = create_tool_from_type(
_type=self.fn.__annotations__["return"],
model_name=self.name,
model_description=self.description,
field_name=self.field_name,
field_description=self.field_description,
).function
if not tool or not tool.model:
raise NotImplementedError
try:
return getattr(tool.model.model_validate_json(arguments), self.field_name)
except ValidationError:
# When the user provides a dict obj as a type hint, the arguments
# are returned usually as an object and not a nested dict.
_arguments: str = json.dumps({self.field_name: json.loads(arguments)})
return getattr(tool.model.model_validate_json(_arguments), self.field_name)
@expose_sync_method("map")
async def amap(self, *map_args: list[Any], **map_kwargs: list[Any]) -> list[T]:
"""
Map the AI function over a sequence of arguments. Runs concurrently.
A `map` twin method is provided by the `expose_sync_method` decorator.
You can use `map` or `amap` synchronously or asynchronously, respectively,
regardless of whether the user function is synchronous or asynchronous.
Arguments should be provided as if calling the function normally, but
each argument must be a list. The function is called once for each item
in the list, and the results are returned in a list.
For example, fn.map([1, 2]) is equivalent to [fn(1), fn(2)].
fn.map([1, 2], x=['a', 'b']) is equivalent to [fn(1, x='a'), fn(2, x='b')].
"""
tasks: list[Any] = []
if map_args and map_kwargs:
max_length = max(
len(arg) for arg in (map_args + tuple(map_kwargs.values()))
)
elif map_args:
max_length = max(len(arg) for arg in map_args)
else:
max_length = max(len(v) for v in map_kwargs.values())
for i in range(max_length):
call_args = [arg[i] if i < len(arg) else None for arg in map_args]
call_kwargs = (
{k: v[i] if i < len(v) else None for k, v in map_kwargs.items()}
if map_kwargs
else {}
)
tasks.append(run_async(self, *call_args, **call_kwargs))
return await asyncio.gather(*tasks)
def as_prompt(
self,
*args: P.args,
**kwargs: P.kwargs,
) -> PromptFunction[BaseModel]:
return PromptFunction[BaseModel].as_function_call(
fn=self.fn,
environment=self.environment,
prompt=self.prompt,
model_name=self.name,
model_description=self.description,
field_name=self.field_name,
field_description=self.field_description,
**self.render_kwargs,
)(*args, **kwargs)
@overload
@classmethod
def as_decorator(
cls: type[Self],
*,
environment: Optional[BaseEnvironment] = None,
prompt: Optional[str] = None,
model_name: str = "FormatResponse",
model_description: str = "Formats the response.",
field_name: str = "data",
field_description: str = "The data to format.",
acreate: Optional[Callable[..., Awaitable[Any]]] = None,
**render_kwargs: Any,
) -> Callable[P, Self]:
pass
@overload
@classmethod
def as_decorator(
cls: type[Self],
fn: Callable[P, T],
*,
environment: Optional[BaseEnvironment] = None,
prompt: Optional[str] = None,
model_name: str = "FormatResponse",
model_description: str = "Formats the response.",
field_name: str = "data",
field_description: str = "The data to format.",
acreate: Optional[Callable[..., Awaitable[Any]]] = None,
**render_kwargs: Any,
) -> Self:
pass
@classmethod
def as_decorator(
cls: type[Self],
fn: Optional[Callable[P, T]] = None,
*,
environment: Optional[BaseEnvironment] = None,
prompt: Optional[str] = None,
model_name: str = "FormatResponse",
model_description: str = "Formats the response.",
field_name: str = "data",
field_description: str = "The data to format.",
**render_kwargs: Any,
) -> Union[Callable[[Callable[P, T]], Self], Self]:
def decorator(func: Callable[P, T]) -> Self:
return cls(
fn=func,
environment=environment,
name=model_name,
description=model_description,
field_name=field_name,
field_description=field_description,
**({"prompt": prompt} if prompt else {}),
**render_kwargs,
)
if fn is not None:
return decorator(fn)
return decorator
@overload
def ai_fn(
*,
environment: Optional[BaseEnvironment] = None,
prompt: Optional[str] = None,
model_name: str = "FormatResponse",
model_description: str = "Formats the response.",
field_name: str = "data",
field_description: str = "The data to format.",
**render_kwargs: Any,
) -> Callable[[Callable[P, T]], Callable[P, T]]:
pass
@overload
def ai_fn(
fn: Callable[P, T],
*,
environment: Optional[BaseEnvironment] = None,
prompt: Optional[str] = None,
model_name: str = "FormatResponse",
model_description: str = "Formats the response.",
field_name: str = "data",
field_description: str = "The data to format.",
**render_kwargs: Any,
) -> Callable[P, T]:
pass
def ai_fn(
fn: Optional[Callable[P, T]] = None,
*,
environment: Optional[BaseEnvironment] = None,
prompt: Optional[str] = None,
model_name: str = "FormatResponse",
model_description: str = "Formats the response.",
field_name: str = "data",
field_description: str = "The data to format.",
**render_kwargs: Any,
) -> Union[Callable[[Callable[P, T]], Callable[P, T]], Callable[P, T]]:
if fn is not None:
return AIFunction.as_decorator( # type: ignore
fn=fn,
environment=environment,
prompt=prompt,
model_name=model_name,
model_description=model_description,
field_name=field_name,
field_description=field_description,
**render_kwargs,
)
def decorator(func: Callable[P, T]) -> Callable[P, T]:
return AIFunction.as_decorator( # type: ignore
fn=func,
environment=environment,
prompt=prompt,
model_name=model_name,
model_description=model_description,
field_name=field_name,
field_description=field_description,
**render_kwargs,
)
return decorator
| [
"\n Your job is to generate likely outputs for a Python function with the\n following signature and docstring:\n\n {{_source_code}}\n\n The user will provide function inputs (if any) and you must respond with\n the most likely result.\n\n user: The function was called with the following inputs:\n {%for (arg, value) in _arguments.items()%}\n - {{ arg }}: {{ value }}\n {% endfor %}\n\n What is its output?\n "
] |
2024-01-10 | bevanhunt/cyberchipped | cyberchipped~assistants~runs.py | import asyncio
from typing import Any, Callable, Optional, Union
from openai.types.beta.threads.run import Run as OpenAIRun
from pydantic import BaseModel, Field, field_validator
import cyberchipped.utilities.tools
from cyberchipped.requests import Tool
from cyberchipped.tools.assistants import AssistantTools, CancelRun
from cyberchipped.utilities.asyncio import ExposeSyncMethodsMixin, expose_sync_method
from cyberchipped.utilities.logging import get_logger
from cyberchipped.utilities.openai import get_client
from .assistants import Assistant
from .threads import Thread
logger = get_logger("Runs")
class Run(BaseModel, ExposeSyncMethodsMixin):
thread: Thread
assistant: Assistant
instructions: Optional[str] = Field(
None, description="Replacement instructions to use for the run."
)
additional_instructions: Optional[str] = Field(
None,
description=(
"Additional instructions to append to the assistant's instructions."
),
)
tools: Optional[list[Union[AssistantTools, Callable]]] = Field(
None, description="Replacement tools to use for the run."
)
additional_tools: Optional[list[AssistantTools]] = Field(
None,
description="Additional tools to append to the assistant's tools. ",
)
run: OpenAIRun = None
data: Any = None
@field_validator("tools", "additional_tools", mode="before")
def format_tools(cls, tools: Union[None, list[Union[Tool, Callable]]]):
if tools is not None:
return [
(
tool
if isinstance(tool, Tool)
else cyberchipped.utilities.tools.tool_from_function(tool)
)
for tool in tools
]
@expose_sync_method("refresh")
async def refresh_async(self):
client = get_client()
self.run = await client.beta.threads.runs.retrieve(
run_id=self.run.id, thread_id=self.thread.id
)
@expose_sync_method("cancel")
async def cancel_async(self):
client = get_client()
await client.beta.threads.runs.cancel(
run_id=self.run.id, thread_id=self.thread.id
)
async def _handle_step_requires_action(self):
client = get_client()
if self.run.status != "requires_action":
return
if self.run.required_action.type == "submit_tool_outputs":
tool_outputs = []
tools = self.get_tools()
for tool_call in self.run.required_action.submit_tool_outputs.tool_calls:
try:
output = cyberchipped.utilities.tools.call_function_tool(
tools=tools,
function_name=tool_call.function.name,
function_arguments_json=tool_call.function.arguments,
)
except CancelRun as exc:
logger.debug(f"Ending run with data: {exc.data}")
raise
except Exception as exc:
output = f"Error calling function {tool_call.function.name}: {exc}"
logger.error(output)
tool_outputs.append(
dict(tool_call_id=tool_call.id, output=output or "")
)
await client.beta.threads.runs.submit_tool_outputs(
thread_id=self.thread.id, run_id=self.run.id, tool_outputs=tool_outputs
)
def get_instructions(self) -> str:
if self.instructions is None:
instructions = self.assistant.get_instructions() or ""
else:
instructions = self.instructions
if self.additional_instructions is not None:
instructions = "\n\n".join([instructions, self.additional_instructions])
return instructions
def get_tools(self) -> list[AssistantTools]:
tools = []
if self.tools is None:
tools.extend(self.assistant.get_tools())
else:
tools.extend(self.tools)
if self.additional_tools is not None:
tools.extend(self.additional_tools)
return tools
async def run_async(self) -> "Run":
client = get_client()
create_kwargs = {}
if self.instructions is not None or self.additional_instructions is not None:
create_kwargs["instructions"] = self.get_instructions()
if self.tools is not None or self.additional_tools is not None:
create_kwargs["tools"] = self.get_tools()
self.run = await client.beta.threads.runs.create(
thread_id=self.thread.id, assistant_id=self.assistant.id, **create_kwargs
)
try:
await asyncio.wait_for(self._run_loop(), timeout=60)
except asyncio.TimeoutError:
if self.run.status != "completed":
# Cancel the run if it's not completed
await client.beta.threads.runs.cancel(
run_id=self.run.id, thread_id=self.thread.id
)
self.data = "Run cancelled due to timeout."
else:
self.data = "Run already completed; no need to cancel."
await self.refresh_async()
async def _run_loop(self):
while self.run.status in ("queued", "in_progress", "requires_action"):
if self.run.status == "requires_action":
await self._handle_step_requires_action()
await asyncio.sleep(0.1)
await self.refresh_async()
| [] |
2024-01-10 | Arthuros120/PostAumatique | src~model~ia.py | # -*- coding: utf-8 -*-
"""Ia class"""
import logging
import openai
class Ia:
def __init__(self, config):
self.logger = logging.getLogger('PostAumatique-Log')
self.logger.info("Initialisation de l'IA...")
self.logger.info("Chargement de la configuration...")
self.config = config
self.logger.info("Chargement de l'API key...")
self.api_key = self.config.get("Ia", "apiKey")
def generate_text(self, body_path: str, society) -> str:
self.logger.info("Dรฉmarage de la gรฉnรฉration de texte...")
body_text = ""
speech_text = ""
self.logger.info("Chargement du fichier de base...")
with open(body_path, "r", encoding="utf-8") as file:
lines = file.readlines()
for line in lines:
body_text += line
self.logger.info(
"Chargement du speech pour la lettre de motivation...")
with open("res/motivationLetter/speech.txt", "r", encoding="utf-8") as file:
lines = file.readlines()
for line in lines:
speech_text += line
speech_text = speech_text.replace("ยคSocietyยค", society.get_name())
speech_text = speech_text.replace("ยคbodyยค", body_text)
self.logger.info("Attribution de l'API key...")
openai.api_key = self.api_key
self.logger.info("Je gรฉnรจre le texte pour toi...")
if self.config.get("Ia", "safeMode"):
self.logger.info(
"Mode safe activรฉ, je gรฉnรจre 3 textes diffรฉrents pour toi, tu pourras choisir celui que tu prรฉfรจres.")
choices = []
for i in range(0, 3):
self.logger.info("Gรฉnรฉration du texte {} ...".format(i + 1))
while True:
try:
response = openai.Completion.create(
model="text-davinci-003",
prompt=speech_text,
temperature=0.5,
max_tokens=200,
top_p=1,
frequency_penalty=0,
presence_penalty=0.6,
stop=["ยค"]
)
break
except Exception as e:
self.logger.error("Une erreur est survenue lors de la gรฉnรฉration du texte, on recommence...")
self.logger.error(e)
continue
choices.append(response.choices[0].text.replace("\n", "")) # type: ignore
self.logger.info(
"Voici les 3 propositions de textes que j'ai pu gรฉnรฉrer pour toi:")
for i in range(0, len(choices)):
print()
self.logger.info("{}: {}".format((i + 1), choices[i]))
print()
self.logger.debug("Demande de sรฉlection du texte...")
select = input("Quel texte veux-tu utiliser ? (1, 2 ou 3) : ")
self.logger.debug("Retour de l'utilisateur : " + select)
while select != "1" and select != "2" and select != "3":
self.logger.debug(
"L'utilisateur a entrรฉ une valeur incorrecte, on lui redemande...")
select = input("Quel texte veux-tu utiliser ? (1, 2 ou 3) : ")
self.logger.debug("Retour de l'utilisateur : " + select)
self.logger.info("Je choisis le texte {} ...".format(select))
choice = choices[int(select) - 1]
else:
self.logger.warning(
"Mode safe dรฉsactivรฉ, je gรฉnรจre un seul texte pour toi...")
while True:
try:
response = openai.Completion.create(
model="text-davinci-003",
prompt=speech_text,
temperature=0.5,
max_tokens=200,
top_p=1,
frequency_penalty=0,
presence_penalty=0.6,
stop=["ยค"]
)
break
except Exception as e:
self.logger.error("Une erreur est survenue lors de la gรฉnรฉration du texte, on recommence...")
self.logger.error(e)
continue
choice = response.choices[0].text.replace("\n", "") # type: ignore
self.logger.info("J'ai fini de gรฉnรฉrer le texte pour toi !")
return choice
| [] |
2024-01-10 | denson/langflow | src~backend~langflow~template~frontend_node~agents.py | from typing import Optional
from langchain.agents import types
from langflow.template.field.base import TemplateField
from langflow.template.frontend_node.base import FrontendNode
from langflow.template.template.base import Template
NON_CHAT_AGENTS = {
agent_type: agent_class
for agent_type, agent_class in types.AGENT_TO_CLASS.items()
if "chat" not in agent_type.value
}
class SQLAgentNode(FrontendNode):
name: str = "SQLAgent"
template: Template = Template(
type_name="sql_agent",
fields=[
TemplateField(
field_type="str",
required=True,
placeholder="",
is_list=False,
show=True,
multiline=False,
value="",
name="database_uri",
),
TemplateField(
field_type="BaseLanguageModel",
required=True,
show=True,
name="llm",
display_name="LLM",
),
],
)
description: str = """Construct an SQL agent from an LLM and tools."""
base_classes: list[str] = ["AgentExecutor"]
def to_dict(self):
return super().to_dict()
class VectorStoreRouterAgentNode(FrontendNode):
name: str = "VectorStoreRouterAgent"
template: Template = Template(
type_name="vectorstorerouter_agent",
fields=[
TemplateField(
field_type="VectorStoreRouterToolkit",
required=True,
show=True,
name="vectorstoreroutertoolkit",
display_name="Vector Store Router Toolkit",
),
TemplateField(
field_type="BaseLanguageModel",
required=True,
show=True,
name="llm",
display_name="LLM",
),
],
)
description: str = """Construct an agent from a Vector Store Router."""
base_classes: list[str] = ["AgentExecutor"]
def to_dict(self):
return super().to_dict()
class VectorStoreAgentNode(FrontendNode):
name: str = "VectorStoreAgent"
template: Template = Template(
type_name="vectorstore_agent",
fields=[
TemplateField(
field_type="VectorStoreInfo",
required=True,
show=True,
name="vectorstoreinfo",
display_name="Vector Store Info",
),
TemplateField(
field_type="BaseLanguageModel",
required=True,
show=True,
name="llm",
display_name="LLM",
),
],
)
description: str = """Construct an agent from a Vector Store."""
base_classes: list[str] = ["AgentExecutor"]
def to_dict(self):
return super().to_dict()
class SQLDatabaseNode(FrontendNode):
name: str = "SQLDatabase"
template: Template = Template(
type_name="sql_database",
fields=[
TemplateField(
field_type="str",
required=True,
is_list=False,
show=True,
multiline=False,
value="",
name="uri",
),
],
)
description: str = """SQLAlchemy wrapper around a database."""
base_classes: list[str] = ["SQLDatabase"]
def to_dict(self):
return super().to_dict()
class CSVAgentNode(FrontendNode):
name: str = "CSVAgent"
template: Template = Template(
type_name="csv_agent",
fields=[
TemplateField(
field_type="file",
required=True,
show=True,
name="path",
value="",
suffixes=[".csv"],
fileTypes=["csv"],
),
TemplateField(
field_type="BaseLanguageModel",
required=True,
show=True,
name="llm",
display_name="LLM",
),
],
)
description: str = """Construct a CSV agent from a CSV and tools."""
base_classes: list[str] = ["AgentExecutor"]
def to_dict(self):
return super().to_dict()
class InitializeAgentNode(FrontendNode):
name: str = "AgentInitializer"
display_name: str = "AgentInitializer"
template: Template = Template(
type_name="initialize_agent",
fields=[
TemplateField(
field_type="str",
required=True,
is_list=True,
show=True,
multiline=False,
options=list(NON_CHAT_AGENTS.keys()),
value=list(NON_CHAT_AGENTS.keys())[0],
name="agent",
advanced=False,
),
TemplateField(
field_type="BaseChatMemory",
required=False,
show=True,
name="memory",
advanced=False,
),
TemplateField(
field_type="Tool",
required=False,
show=True,
name="tools",
is_list=True,
advanced=False,
),
TemplateField(
field_type="BaseLanguageModel",
required=True,
show=True,
name="llm",
display_name="LLM",
advanced=False,
),
],
)
description: str = """Construct a zero shot agent from an LLM and tools."""
base_classes: list[str] = ["AgentExecutor", "function"]
def to_dict(self):
return super().to_dict()
@staticmethod
def format_field(field: TemplateField, name: Optional[str] = None) -> None:
# do nothing and don't return anything
pass
class JsonAgentNode(FrontendNode):
name: str = "JsonAgent"
template: Template = Template(
type_name="json_agent",
fields=[
TemplateField(
field_type="BaseToolkit",
required=True,
show=True,
name="toolkit",
),
TemplateField(
field_type="BaseLanguageModel",
required=True,
show=True,
name="llm",
display_name="LLM",
),
],
)
description: str = """Construct a json agent from an LLM and tools."""
base_classes: list[str] = ["AgentExecutor"]
def to_dict(self):
return super().to_dict()
| [
"Vector Store Info",
"VectorStoreInfo",
"agent",
"sql_agent",
"vectorstore_agent",
"BaseLanguageModel",
"initialize_agent",
"vectorstoreroutertoolkit",
"csv_agent",
"Vector Store Router Toolkit",
"database_uri",
"sql_database",
"json_agent",
"VectorStoreRouterToolkit",
"vectorstoreinfo",
"BaseToolkit",
"BaseChatMemory",
"vectorstorerouter_agent"
] |
2024-01-10 | denson/langflow | src~backend~langflow~processing~process.py | import contextlib
import io
from langchain.schema import AgentAction
import json
from langflow.interface.run import (
build_langchain_object_with_caching,
get_memory_key,
update_memory_keys,
)
from langflow.utils.logger import logger
from langflow.graph import Graph
from typing import Any, Dict, List, Tuple
def fix_memory_inputs(langchain_object):
"""
Given a LangChain object, this function checks if it has a memory attribute and if that memory key exists in the
object's input variables. If so, it does nothing. Otherwise, it gets a possible new memory key using the
get_memory_key function and updates the memory keys using the update_memory_keys function.
"""
if hasattr(langchain_object, "memory") and langchain_object.memory is not None:
try:
if langchain_object.memory.memory_key in langchain_object.input_variables:
return
except AttributeError:
input_variables = (
langchain_object.prompt.input_variables
if hasattr(langchain_object, "prompt")
else langchain_object.input_keys
)
if langchain_object.memory.memory_key in input_variables:
return
possible_new_mem_key = get_memory_key(langchain_object)
if possible_new_mem_key is not None:
update_memory_keys(langchain_object, possible_new_mem_key)
def format_actions(actions: List[Tuple[AgentAction, str]]) -> str:
"""Format a list of (AgentAction, answer) tuples into a string."""
output = []
for action, answer in actions:
log = action.log
tool = action.tool
tool_input = action.tool_input
output.append(f"Log: {log}")
if "Action" not in log and "Action Input" not in log:
output.append(f"Tool: {tool}")
output.append(f"Tool Input: {tool_input}")
output.append(f"Answer: {answer}")
output.append("") # Add a blank line
return "\n".join(output)
def get_result_and_thought(langchain_object, message: str):
"""Get result and thought from extracted json"""
try:
if hasattr(langchain_object, "verbose"):
langchain_object.verbose = True
chat_input = None
memory_key = ""
if hasattr(langchain_object, "memory") and langchain_object.memory is not None:
memory_key = langchain_object.memory.memory_key
if hasattr(langchain_object, "input_keys"):
for key in langchain_object.input_keys:
if key not in [memory_key, "chat_history"]:
chat_input = {key: message}
else:
chat_input = message # type: ignore
if hasattr(langchain_object, "return_intermediate_steps"):
# https://github.com/hwchase17/langchain/issues/2068
# Deactivating until we have a frontend solution
# to display intermediate steps
langchain_object.return_intermediate_steps = False
fix_memory_inputs(langchain_object)
with io.StringIO() as output_buffer, contextlib.redirect_stdout(output_buffer):
try:
# if hasattr(langchain_object, "acall"):
# output = await langchain_object.acall(chat_input)
# else:
output = langchain_object(chat_input)
except ValueError as exc:
# make the error message more informative
logger.debug(f"Error: {str(exc)}")
output = langchain_object.run(chat_input)
intermediate_steps = (
output.get("intermediate_steps", []) if isinstance(output, dict) else []
)
result = (
output.get(langchain_object.output_keys[0])
if isinstance(output, dict)
else output
)
if intermediate_steps:
thought = format_actions(intermediate_steps)
else:
thought = output_buffer.getvalue()
except Exception as exc:
raise ValueError(f"Error: {str(exc)}") from exc
return result, thought
def load_or_build_langchain_object(data_graph, is_first_message=False):
"""
Load langchain object from cache if it exists, otherwise build it.
"""
if is_first_message:
build_langchain_object_with_caching.clear_cache()
return build_langchain_object_with_caching(data_graph)
def process_graph_cached(data_graph: Dict[str, Any], message: str):
"""
Process graph by extracting input variables and replacing ZeroShotPrompt
with PromptTemplate,then run the graph and return the result and thought.
"""
# Load langchain object
is_first_message = len(data_graph.get("chatHistory", [])) == 0
langchain_object = load_or_build_langchain_object(data_graph, is_first_message)
logger.debug("Loaded langchain object")
if langchain_object is None:
# Raise user facing error
raise ValueError(
"There was an error loading the langchain_object. Please, check all the nodes and try again."
)
# Generate result and thought
logger.debug("Generating result and thought")
result, thought = get_result_and_thought(langchain_object, message)
logger.debug("Generated result and thought")
return {"result": str(result), "thought": thought.strip()}
def load_flow_from_json(path: str, build=True):
"""Load flow from json file"""
# This is done to avoid circular imports
with open(path, "r", encoding="utf-8") as f:
flow_graph = json.load(f)
data_graph = flow_graph["data"]
nodes = data_graph["nodes"]
# Substitute ZeroShotPrompt with PromptTemplate
# nodes = replace_zero_shot_prompt_with_prompt_template(nodes)
# Add input variables
# nodes = payload.extract_input_variables(nodes)
# Nodes, edges and root node
edges = data_graph["edges"]
graph = Graph(nodes, edges)
if build:
langchain_object = graph.build()
if hasattr(langchain_object, "verbose"):
langchain_object.verbose = True
if hasattr(langchain_object, "return_intermediate_steps"):
# https://github.com/hwchase17/langchain/issues/2068
# Deactivating until we have a frontend solution
# to display intermediate steps
langchain_object.return_intermediate_steps = False
fix_memory_inputs(langchain_object)
return langchain_object
return graph
def process_tweaks(graph_data: Dict, tweaks: Dict):
"""This function is used to tweak the graph data using the node id and the tweaks dict"""
# the tweaks dict is a dict of dicts
# the key is the node id and the value is a dict of the tweaks
# the dict of tweaks contains the name of a certain parameter and the value to be tweaked
# We need to process the graph data to add the tweaks
if "data" not in graph_data and "nodes" in graph_data:
nodes = graph_data["nodes"]
else:
nodes = graph_data["data"]["nodes"]
for node in nodes:
node_id = node["id"]
if node_id in tweaks:
node_tweaks = tweaks[node_id]
template_data = node["data"]["node"]["template"]
for tweak_name, tweake_value in node_tweaks.items():
if tweak_name in template_data:
template_data[tweak_name]["value"] = tweake_value
print(
f"Something changed in node {node_id} with tweak {tweak_name} and value {tweake_value}"
)
return graph_data
| [
"node"
] |
2024-01-10 | ewave33/generative-ai-amazon-bedrock-langchain-agent-example | agent~lambda~agent-handler~lambda_function.py | import json
import datetime
import time
import os
import dateutil.parser
import logging
import boto3
from boto3.dynamodb.conditions import Key
from langchain.llms.bedrock import Bedrock
from langchain.chat_models import BedrockChat
from langchain.schema import HumanMessage
from chat import Chat
from fsi_agent import FSIAgent
from pypdf import PdfReader, PdfWriter
# Create reference to DynamoDB tables
loan_application_table_name = os.environ['USER_PENDING_ACCOUNTS_TABLE']
user_accounts_table_name = os.environ['USER_EXISTING_ACCOUNTS_TABLE']
s3_artifact_bucket = os.environ['S3_ARTIFACT_BUCKET_NAME']
# Instantiate boto3 clients and resources
boto3_session = boto3.Session(region_name=os.environ['AWS_REGION'])
dynamodb = boto3.resource('dynamodb',region_name=os.environ['AWS_REGION'])
s3_client = boto3.client('s3',region_name=os.environ['AWS_REGION'],config=boto3.session.Config(signature_version='s3v4',))
s3_object = boto3.resource('s3')
bedrock_client = boto3_session.client(service_name="bedrock-runtime")
# --- Lex v2 request/response helpers (https://docs.aws.amazon.com/lexv2/latest/dg/lambda-response-format.html) ---
def elicit_slot(session_attributes, active_contexts, intent, slot_to_elicit, message):
response = {
'sessionState': {
'activeContexts':[{
'name': 'intentContext',
'contextAttributes': active_contexts,
'timeToLive': {
'timeToLiveInSeconds': 86400,
'turnsToLive': 20
}
}],
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'ElicitSlot',
'slotToElicit': slot_to_elicit
},
'intent': intent,
},
'messages': [{
"contentType": "PlainText",
"content": message,
}]
}
return response
def confirm_intent(active_contexts, session_attributes, intent, message):
response = {
'sessionState': {
'activeContexts': [active_contexts],
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'ConfirmIntent'
},
'intent': intent
}
}
return response
def close(session_attributes, active_contexts, fulfillment_state, intent, message):
response = {
'sessionState': {
'activeContexts':[{
'name': 'intentContext',
'contextAttributes': active_contexts,
'timeToLive': {
'timeToLiveInSeconds': 86400,
'turnsToLive': 20
}
}],
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Close',
},
'intent': intent,
},
'messages': [{'contentType': 'PlainText', 'content': message}]
}
return response
def elicit_intent(intent_request, session_attributes, message):
response = {
'sessionState': {
'dialogAction': {
'type': 'ElicitIntent'
},
'sessionAttributes': session_attributes
},
'messages': [
{
'contentType': 'PlainText',
'content': message
},
{
'contentType': 'ImageResponseCard',
'imageResponseCard': {
"buttons": [
{
"text": "Loan Application",
"value": "Loan Application"
},
{
"text": "Loan Calculator",
"value": "Loan Calculator"
},
{
"text": "Ask GenAI",
"value": "What kind of questions can the Assistant answer?"
}
],
"title": "How can I help you?"
}
}
]
}
return response
def delegate(session_attributes, active_contexts, intent, message):
response = {
'sessionState': {
'activeContexts':[{
'name': 'intentContext',
'contextAttributes': active_contexts,
'timeToLive': {
'timeToLiveInSeconds': 86400,
'turnsToLive': 20
}
}],
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Delegate',
},
'intent': intent,
},
'messages': [{'contentType': 'PlainText', 'content': message}]
}
return response
def initial_message(intent_name):
response = {
'sessionState': {
'dialogAction': {
'type': 'ElicitSlot',
'slotToElicit': 'UserName' if intent_name=='MakePayment' else 'PickUpCity'
},
'intent': {
'confirmationState': 'None',
'name': intent_name,
'state': 'InProgress'
}
}
}
return response
def build_response_card(title, subtitle, options):
"""
Build a responseCard with a title, subtitle, and an optional set of options which should be displayed as buttons.
"""
buttons = None
if options is not None:
buttons = []
for i in range(min(5, len(options))):
buttons.append(options[i])
return {
'contentType': 'ImageResponseCard',
'imageResponseCard': {
'title': title,
'subTitle': subtitle,
'buttons': buttons
}
}
def build_slot(intent_request, slot_to_build, slot_value):
intent_request['sessionState']['intent']['slots'][slot_to_build] = {
'shape': 'Scalar', 'value':
{
'originalValue': slot_value, 'resolvedValues': [slot_value],
'interpretedValue': slot_value
}
}
def build_validation_result(isvalid, violated_slot, message_content):
print("Build Validation")
return {
'isValid': isvalid,
'violatedSlot': violated_slot,
'message': message_content
}
# --- Utility helper functions ---
def isvalid_date(date):
try:
dateutil.parser.parse(date, fuzzy=True)
print("TRUE DATE")
return True
except ValueError as e:
print("DATE PARSER ERROR = " + str(e))
return False
def isvalid_yes_or_no(value):
if value == 'Yes' or value == 'yes' or value == 'No' or value == 'no':
return True
return False
def isvalid_credit_score(credit_score):
if int(credit_score) < 851 and int(credit_score) > 300:
return True
return False
def isvalid_zero_or_greater(value):
if int(value) >= 0:
return True
return False
def safe_int(n):
if n is not None:
return int(n)
return n
def create_presigned_url(bucket_name, object_name, expiration=600):
# Generate a presigned URL for the S3 object
try:
response = s3_client.generate_presigned_url('get_object',
Params={'Bucket': bucket_name,
'Key': object_name},
ExpiresIn=expiration)
except Exception as e:
print(e)
logging.error(e)
return "Error"
# The response contains the presigned URL
return response
def try_ex(value):
"""
Safely access Slots dictionary values.
"""
if value is not None:
if value['value']['resolvedValues']:
return value['value']['interpretedValue']
elif value['value']['originalValue']:
return value['value']['originalValue']
else:
return None
else:
return None
# --- Intent fulfillment functions ---
def isvalid_pin(userName, pin):
"""
Validates the user-provided PIN using a DynamoDB table lookup.
"""
plans_table = dynamodb.Table(user_accounts_table_name)
try:
# Set up the query parameters
params = {
'KeyConditionExpression': 'userName = :c',
'ExpressionAttributeValues': {
':c': userName
}
}
# Execute the query and get the result
response = plans_table.query(**params)
# iterate over the items returned in the response
if len(response['Items']) > 0:
pin_to_compare = int(response['Items'][0]['pin'])
# check if the password in the item matches the specified password
if pin_to_compare == int(pin):
return True
return False
except Exception as e:
print(e)
return e
def isvalid_username(userName):
"""
Validates the user-provided username exists in the 'user_accounts_table_name' DynamoDB table.
"""
plans_table = dynamodb.Table(user_accounts_table_name)
try:
# Set up the query parameters
params = {
'KeyConditionExpression': 'userName = :c',
'ExpressionAttributeValues': {
':c': userName
}
}
# Execute the query and get the result
response = plans_table.query(**params)
# Check if any items were returned
if response['Count'] != 0:
return True
else:
return False
except Exception as e:
print(e)
return e
def validate_pin(intent_request, slots):
"""
Performs slot validation for username and PIN. Invoked as part of 'verify_identity' intent fulfillment.
"""
username = try_ex(slots['UserName'])
pin = try_ex(slots['Pin'])
if username is not None:
if not isvalid_username(username):
return build_validation_result(
False,
'UserName',
'Our records indicate there is no profile belonging to the username, {}. Please enter a valid username'.format(username)
)
session_attributes = intent_request['sessionState'].get("sessionAttributes") or {}
session_attributes['UserName'] = username
intent_request['sessionState']['sessionAttributes']['UserName'] = username
else:
return build_validation_result(
False,
'UserName',
'Our records indicate there are no accounts belonging to that username. Please try again.'
)
if pin is not None:
if not isvalid_pin(username, pin):
return build_validation_result(
False,
'Pin',
'You have entered an incorrect PIN. Please try again.'.format(pin)
)
else:
message = "Thank you for choosing Octank Financial, {}. Please confirm your 4-digit PIN before we proceed.".format(username)
return build_validation_result(
False,
'Pin',
message
)
return {'isValid': True}
def verify_identity(intent_request):
"""
Performs dialog management and fulfillment for username verification.
Beyond fulfillment, the implementation for this intent demonstrates the following:
1) Use of elicitSlot in slot validation and re-prompting.
2) Use of sessionAttributes {UserName} to pass information that can be used to guide conversation.
"""
slots = intent_request['sessionState']['intent']['slots']
pin = try_ex(slots['Pin'])
username=try_ex(slots['UserName'])
confirmation_status = intent_request['sessionState']['intent']['confirmationState']
session_attributes = intent_request['sessionState'].get("sessionAttributes") or {}
intent = intent_request['sessionState']['intent']
active_contexts = {}
# Validate any slots which have been specified. If any are invalid, re-elicit for their value
validation_result = validate_pin(intent_request, intent_request['sessionState']['intent']['slots'])
session_attributes['UserName'] = username
if not validation_result['isValid']:
slots = intent_request['sessionState']['intent']['slots']
slots[validation_result['violatedSlot']] = None
return elicit_slot(
session_attributes,
active_contexts,
intent_request['sessionState']['intent'],
validation_result['violatedSlot'],
validation_result['message']
)
else:
if confirmation_status == 'None':
# Query DDB for user information before offering intents
plans_table = dynamodb.Table(user_accounts_table_name)
try:
# Query the table using the partition key
response = plans_table.query(
KeyConditionExpression=Key('userName').eq(username)
)
# TODO: Customize account readout based on account type
message = ""
items = response['Items']
for item in items:
if item['planName'] == 'mortgage' or item['planName'] == 'Mortgage':
message = "Your mortgage account summary includes a ${:,} loan at {}% interest with ${:,} of unpaid principal. Your next payment of ${:,} is scheduled for {}.".format(item['loanAmount'], item['loanInterest'], item['unpaidPrincipal'], item['amountDue'], item['dueDate'])
elif item['planName'] == 'Checking' or item['planName'] == 'checking':
message = "I see you have a Savings account with Octank Financial. Your account balance is ${:,} and your next payment \
amount of ${:,} is scheduled for {}.".format(item['unpaidPrincipal'], item['paymentAmount'], item['dueDate'])
elif item['planName'] == 'Loan' or item['planName'] == 'loan':
message = "I see you have a Loan account with Octank Financial. Your account balance is ${:,} and your next payment \
amount of ${:,} is scheduled for {}.".format(item['unpaidPrincipal'], item['paymentAmount'], item['dueDate'])
return elicit_intent(intent_request, session_attributes,
'Thank you for confirming your username and PIN, {}. {}'.format(username, message)
)
except Exception as e:
print(e)
return e
def validate_loan_application(intent_request, slots):
"""
Performs dialog management and fulfillment for completing a loan application.
Beyond fulfillment, the implementation for this intent demonstrates the following:
1) Use of elicitSlot in slot validation and re-prompting.
2) Use of sessionAttributes to pass information that can be used to guide conversation.
"""
username = try_ex(slots['UserName'])
loan_value = try_ex(slots['LoanValue'])
monthly_income = try_ex(slots['MonthlyIncome'])
work_history = try_ex(slots['WorkHistory'])
credit_score = try_ex(slots['CreditScore'])
housing_expense = try_ex(slots['HousingExpense'])
debt_amount = try_ex(slots['DebtAmount'])
down_payment = try_ex(slots['DownPayment'])
coborrow = try_ex(slots['Coborrow'])
closing_date = try_ex(slots['ClosingDate'])
confirmation_status = intent_request['sessionState']['intent']['confirmationState']
session_attributes = intent_request['sessionState'].get("sessionAttributes") or {}
active_contexts = {}
if username is not None:
if not isvalid_username(username):
return build_validation_result(
False,
'UserName',
'Our records indicate there is no profile belonging to the username, {}. Please enter a valid username'.format(username)
)
else:
try:
session_username = intent_request['sessionState']['sessionAttributes']['UserName']
build_slot(intent_request, 'UserName', session_username)
except KeyError:
return build_validation_result(
False,
'UserName',
'We cannot find an account under that username. Please try again with a valid username.'
)
if loan_value is not None:
if loan_value.isnumeric():
if not isvalid_zero_or_greater(loan_value):
return build_validation_result(False, 'LoanValue', 'Please enter a value greater than $0.')
else:
prompt = "The user was just asked to provide their loan value on a loan application and this was their response: " + intent_request['inputTranscript']
message = invoke_fm(prompt)
reply = message + " \n\nWhat is your desired loan amount?"
return build_validation_result(False, 'LoanValue', reply)
else:
return build_validation_result(
False,
'LoanValue',
"What is your desired loan amount? In other words, how much are looking to borrow?"
)
if monthly_income is not None:
if monthly_income.isnumeric():
if not isvalid_zero_or_greater(monthly_income):
return build_validation_result(False, 'MonthlyIncome', 'Monthly income amount must be greater than $0. Please try again.')
else:
prompt = "The user was just asked to provide their monthly income on a loan application and this was their response: " + intent_request['inputTranscript']
message = invoke_fm(prompt)
reply = message + " \n\nWhat is your monthly income?"
return build_validation_result(False, 'MonthlyIncome', reply)
else:
return build_validation_result(
False,
'MonthlyIncome',
"What is your monthly income?"
)
if work_history is not None:
if not isvalid_yes_or_no(work_history):
prompt = "The user was just asked to confirm their continuous two year work history on a loan application and this was their response: " + intent_request['inputTranscript']
message = invoke_fm(prompt)
reply = message + " \n\nDo you have a two-year continuous work history (Yes/No)?"
return build_validation_result(False, 'WorkHistory', reply)
else:
return build_validation_result(
False,
'WorkHistory',
"Do you have a two-year continuous work history (Yes/No)?"
)
if credit_score is not None:
if credit_score.isnumeric():
if not isvalid_credit_score(credit_score):
return build_validation_result(False, 'CreditScore', 'Credit score entries must be between 300 and 850. Please enter a valid credit score.')
else:
prompt = "The user was just asked to provide their credit score on a loan application and this was their response: " + intent_request['inputTranscript']
message = invoke_fm(prompt)
reply = message + " \n\nWhat do you think your current credit score is?"
return build_validation_result(False, 'CreditScore', reply)
else:
return build_validation_result(
False,
'CreditScore',
"What do you think your current credit score is?"
)
if housing_expense is not None:
if housing_expense.isnumeric():
if not isvalid_zero_or_greater(housing_expense):
return build_validation_result(False, 'HousingExpense', 'Your housing expense must be a value greater than or equal to $0. Please try again.')
else:
prompt = "The user was just asked to provide their monthly housing expense on a loan application and this was their response: " + intent_request['inputTranscript']
message = invoke_fm(prompt)
reply = message + " \n\nHow much are you currently paying for housing each month?"
return build_validation_result(False, 'HousingExpense', reply)
else:
return build_validation_result(
False,
'HousingExpense',
"How much are you currently paying for housing each month?"
)
if debt_amount is not None:
if debt_amount.isnumeric():
if not isvalid_zero_or_greater(debt_amount):
return build_validation_result(False, 'DebtAmount', 'Your debt amount must be a value greater than or equal to $0. Please try again.')
else:
prompt = "The user was just asked to provide their monthly debt amount on a loan application and this was their response: " + intent_request['inputTranscript']
message = invoke_fm(prompt)
reply = message + " \n\nWhat is your estimated credit card or student loan debt?"
return build_validation_result(False, 'DebtAmount', reply)
else:
return build_validation_result(
False,
'DebtAmount',
"What is your estimated credit card or student loan debt? Please enter '0' if none."
)
if down_payment is not None:
if down_payment.isnumeric():
if not isvalid_zero_or_greater(down_payment):
return build_validation_result(False, 'DownPayment', 'Your estimate down payment must be a value greater than or equal to $0. Please try again.')
else:
prompt = "The user was just asked to provide their estimated down payment on a loan application and this was their response: " + intent_request['inputTranscript']
message = invoke_fm(prompt)
reply = message + " \n\nWhat do you have saved for a down payment?"
return build_validation_result(False, 'DownPayment', reply)
else:
return build_validation_result(
False,
'DownPayment',
"What do you have saved for a down payment?"
)
if coborrow is not None:
if not isvalid_yes_or_no(coborrow):
prompt = "The user was just asked to confirm if they will have a co-borrow on a loan application and this was their response: " + intent_request['inputTranscript']
message = invoke_fm(prompt)
reply = message + " \n\nDo you have a co-borrower (Yes/No)?"
return build_validation_result(False, 'Coborrow', reply)
else:
return build_validation_result(
False,
'Coborrow',
"Do you have a co-borrower (Yes/No)?"
)
if closing_date is not None:
if not isvalid_date(closing_date):
prompt = "The user was just asked to provide their real estate closing date on a loan application and this was their response: " + intent_request['inputTranscript']
message = invoke_fm(prompt)
reply = message + " \n\nWhen are you looking to close?"
return build_validation_result(False, 'ClosingDate', reply)
#if datetime.datetime.strptime(closing_date, '%Y-%m-%d').date() <= datetime.date.today():
# return build_validation_result(False, 'ClosingDate', 'Closing dates must be scheduled at least one day in advance. Please try a different date.')
else:
print("## ClosingDate")
return build_validation_result(
False,
'ClosingDate',
'When are you looking to close?'
)
return {'isValid': True}
def loan_application(intent_request):
"""
Performs dialog management and fulfillment for booking a car.
Beyond fulfillment, the implementation for this intent demonstrates the following:
1) Use of elicitSlot in slot validation and re-prompting
2) Use of sessionAttributes to pass information that can be used to guide conversation
"""
slots = intent_request['sessionState']['intent']['slots']
username = try_ex(slots['UserName'])
loan_value = try_ex(slots['LoanValue'])
monthly_income = try_ex(slots['MonthlyIncome'])
work_history = try_ex(slots['WorkHistory'])
credit_score = try_ex(slots['CreditScore'])
housing_expense = try_ex(slots['HousingExpense'])
debt_amount = try_ex(slots['DebtAmount'])
down_payment = try_ex(slots['DownPayment'])
coborrow = try_ex(slots['Coborrow'])
closing_date = try_ex(slots['ClosingDate'])
confirmation_status = intent_request['sessionState']['intent']['confirmationState']
session_attributes = intent_request['sessionState'].get("sessionAttributes") or {}
intent = intent_request['sessionState']['intent']
active_contexts = {}
if intent_request['invocationSource'] == 'DialogCodeHook':
# Validate any slots which have been specified. If any are invalid, re-elicit for their value
validation_result = validate_loan_application(intent_request, intent_request['sessionState']['intent']['slots'])
print("LOAN APPLICATION - validation_result = " + str(validation_result))
if 'isValid' in validation_result:
if validation_result['isValid'] == False:
if validation_result['violatedSlot'] == 'CreditScore' and confirmation_status == 'Denied':
print("Invalid credit score")
validation_result['violatedSlot'] = 'UserName'
intent['slots'] = {}
slots[validation_result['violatedSlot']] = None
return elicit_slot(
session_attributes,
active_contexts,
intent,
validation_result['violatedSlot'],
validation_result['message']
)
if username and monthly_income:
application = {
'LoanValue': loan_value,
'MonthlyIncome': monthly_income,
'CreditScore': credit_score,
'DownPayment': down_payment
}
# Convert the JSON document to a string
application_string = json.dumps(application)
# Write the JSON document to DynamoDB
loan_application_table = dynamodb.Table(loan_application_table_name)
print("DYNAMODB username = " + str(username))
response = loan_application_table.put_item(
Item={
'userName': username,
'planName': 'Loan',
'document': application_string
}
)
# Determine if the intent (and current slot settings) has been denied. The messaging will be different
# if the user is denying a reservation he initiated or an auto-populated suggestion.
if confirmation_status == 'Denied':
return delegate(session_attributes, active_contexts, intent, 'Confirm hotel reservation')
if confirmation_status == 'None':
return delegate(session_attributes, active_contexts, intent, 'Confirm hotel reservation')
if confirmation_status == 'Confirmed':
intent['confirmationState']="Confirmed"
intent['state']="Fulfilled"
s3_client.download_file(s3_artifact_bucket, 'agent/assets/Mortgage-Loan-Application.pdf', '/tmp/Mortgage-Loan-Application.pdf')
reader = PdfReader('/tmp/Mortgage-Loan-Application.pdf')
writer = PdfWriter()
page = reader.pages[0]
fields = reader.get_fields()
writer.append(reader)
firstname, lastname = username.split(' ', 1)
writer.update_page_form_field_values(
writer.pages[0], {
'fullName34[first]': firstname,
'fullName34[last]': lastname,
'monthlyNet': monthly_income,
'creditScore': credit_score,
'requestedLoan': loan_value,
'downPayment': down_payment
}
)
with open('/tmp/Mortgage-Loan-Application.pdf', "wb") as output_stream:
writer.write(output_stream)
s3_client.upload_file('/tmp/Mortgage-Loan-Application.pdf', s3_artifact_bucket, 'agent/assets/Mortgage-Loan-Application-Completed.pdf')
# Create loan application doc in S3
URLs=[]
# create_presigned_url(bucket_name, object_name, expiration=600):
URLs.append(create_presigned_url(s3_artifact_bucket,'agent/assets/Mortgage-Loan-Application-Completed.pdf',3600))
mortgage_app = 'Your loan application is nearly complete! Please follow the link for the last few bits of information: ' + URLs[0]
return elicit_intent(
intent_request,
session_attributes,
mortgage_app
)
def loan_calculator(intent_request):
"""
Performs dialog management and fulfillment for calculating loan details.
This is an empty function framework intended for the user to develope their own intent fulfillment functions.
"""
session_attributes = intent_request['sessionState'].get("sessionAttributes") or {}
# def elicit_intent(intent_request, session_attributes, message)
return elicit_intent(
intent_request,
session_attributes,
'This is where you would implement LoanCalculator intent fulfillment.'
)
def invoke_fm(prompt):
"""
Invokes Foundational Model endpoint hosted on Amazon Bedrock and parses the response.
"""
chat = Chat(prompt)
llm = Bedrock(client=bedrock_client, model_id="anthropic.claude-instant-v1", region_name=os.environ['AWS_REGION']) # "anthropic.claude-v2 "
llm.model_kwargs = {'max_tokens_to_sample': 350}
lex_agent = FSIAgent(llm, chat.memory)
formatted_prompt = "\n\nHuman: " + prompt + " \n\nAssistant:"
try:
message = lex_agent.run(input=formatted_prompt)
except ValueError as e:
message = str(e)
if not message.startswith("Could not parse LLM output:"):
raise e
message = message.removeprefix("Could not parse LLM output: `").removesuffix("`")
return message
def genai_intent(intent_request):
"""
Performs dialog management and fulfillment for user utterances that do not match defined intents (i.e., FallbackIntent).
Sends user utterance to Foundational Model endpoint via 'invoke_fm' function.
"""
session_attributes = intent_request['sessionState'].get("sessionAttributes") or {}
if intent_request['invocationSource'] == 'DialogCodeHook':
prompt = intent_request['inputTranscript']
output = invoke_fm(prompt)
return elicit_intent(intent_request, session_attributes, output)
# --- Intents ---
def dispatch(intent_request):
"""
Routes the incoming request based on intent.
"""
slots = intent_request['sessionState']['intent']['slots']
username = slots['UserName'] if 'UserName' in slots else None
intent_name = intent_request['sessionState']['intent']['name']
if intent_name == 'VerifyIdentity':
return verify_identity(intent_request)
elif intent_name == 'LoanApplication':
return loan_application(intent_request)
elif intent_name == 'LoanCalculator':
return loan_calculator(intent_request)
else:
return genai_intent(intent_request)
raise Exception('Intent with name ' + intent_name + ' not supported')
# --- Main handler ---
def handler(event, context):
"""
Invoked when the user provides an utterance that maps to a Lex bot intent.
The JSON body of the user request is provided in the event slot.
"""
os.environ['TZ'] = 'America/New_York'
time.tzset()
return dispatch(event) | [
"inputTranscript",
"The user was just asked to provide their monthly income on a loan application and this was their response: PLACEHOLDER",
"The user was just asked to provide their estimated down payment on a loan application and this was their response: PLACEHOLDER",
"The user was just asked to confirm if they will have a co-borrow on a loan application and this was their response: PLACEHOLDER",
"The user was just asked to provide their monthly debt amount on a loan application and this was their response: PLACEHOLDER",
"PlainText",
"The user was just asked to provide their loan value on a loan application and this was their response: PLACEHOLDER",
"The user was just asked to provide their real estate closing date on a loan application and this was their response: PLACEHOLDER",
"ImageResponseCard",
"\n\nHuman: PLACEHOLDER \n\nAssistant:",
"The user was just asked to provide their monthly housing expense on a loan application and this was their response: PLACEHOLDER",
"The user was just asked to confirm their continuous two year work history on a loan application and this was their response: PLACEHOLDER",
"The user was just asked to provide their credit score on a loan application and this was their response: PLACEHOLDER"
] |
2024-01-10 | ewave33/generative-ai-amazon-bedrock-langchain-agent-example | agent~lambda~agent-handler~fsi_agent.py | from langchain.agents.tools import Tool
from langchain.agents.conversational.base import ConversationalAgent
from langchain.agents import AgentExecutor
from tools import tools
from datetime import datetime
PREFIX = "\n\nHuman: You are a Financial Services AI chatbot (Assistant) for a company called Octank Financial. Also, you can answer general questions about anything. You quickly respond to questions from a user with an answer and the sources you used to find your answer in the format: \
[Source 1: Source Title 1 - Source Link 1], \
[Source 2: Source Title 2 - Source Link 2], \
[Source n: Source Title n - Source Link n]. Provide two newline characters between your answer and the sources. By the way, the date is " + datetime.now().strftime("%m/%d/%Y, %H:%M:%S") + ".\n\nAssistant:"
FORMAT_INSTRUCTIONS = "\n\nHuman: \n\nAssistant:"
class FSIAgent():
def __init__(self,llm, memory) -> None:
self.prefix = PREFIX
self.ai_prefix = "Assistant"
self.human_prefix = "Human"
self.llm = llm
self.memory = memory
self.format_instructions = FORMAT_INSTRUCTIONS
self.agent = self.create_agent()
def create_agent(self):
fsi_agent = ConversationalAgent.from_llm_and_tools(
llm = self.llm,
tools = tools,
prefix = self.prefix,
ai_prefix = self.ai_prefix,
human_prefix = self.human_prefix,
format_instructions = self.format_instructions,
return_intermediate_steps = True,
return_source_documents = True
)
agent_executor = AgentExecutor.from_agent_and_tools(agent=fsi_agent, tools=tools, verbose=True, memory=self.memory, return_source_documents=True, return_intermediate_steps=True) # , handle_parsing_errors=True
return agent_executor
def run(self, input):
print("Running FSI Agent with input: " + str(input))
try:
response = self.agent(input)
except ValueError as e:
response = str(e)
if not response.startswith("An output parsing error occurred"):
raise e
response = response.removeprefix("An output parsing error occurred. In order to pass this error back to the agent and have it try again, pass `handle_parsing_errors=True` to the AgentExecutor. This is the error: Could not parse LLM output: `").removesuffix("`")
return response
| [] |
2024-01-10 | skimok/recipe | recipe.py | from flask import Flask, render_template, request
import openai
import os
from dotenv import load_dotenv
app = Flask(__name__)
# Securely fetch the API key using the dotenv library
load_dotenv()
key = os.getenv("OPENAI_API_KEY")
openai.api_key = key
dietary_restrictions = [
"Gluten-Free",
"Dairy-Free",
"Vegan",
"Pescatarian",
"Nut-Free",
"Kosher",
"Halal",
"Low-Carb",
"Organic",
"Locally Sourced",
]
cuisines = [
"",
"Italian",
"Mexican",
"Chinese",
"Indian",
"Japanese",
"Thai",
"French",
"Mediterranean",
"American",
"Greek",
]
@app.route('/')
def index():
# Display the main ingredient input page
return render_template('index.html', cuisines=cuisines, dietary_restrictions=dietary_restrictions)
@app.route('/generate_recipe', methods=['POST'])
def generate_recipe():
# Extract the three ingredients from the user's input
ingredients = request.form.getlist('ingredient')
# Extract cuisine and restrictions
selected_cuisine = request.form.get('cuisine')
selected_restrictions = request.form.getlist('restrictions')
print('selected_cuisine: ' + selected_cuisine)
print('selected_restrictions: ' + str(selected_restrictions))
if len(ingredients) != 3:
return "Kindly provide exactly 3 ingredients."
# Craft a conversational prompt for ChatGPT, specifying our needs
# prompt = f"Craft a recipe in HTML using \
# {', '.join(ingredients)}. It's okay to use some other necessary ingredients. \
# Ensure the recipe ingredients appear at the top, \
# followed by the step-by-step instructions."
prompt = f"{', '.join(ingredients)}์(๋ฅผ) ์ฌ์ฉํ์ฌ HTML์์ ๋ ์ํผ๋ฅผ ํ๊ตญ์ด๋ก ๋ง๋์ธ์. \
๋ค๋ฅธ ํ์ํ ์ฌ๋ฃ๋ฅผ ์ฌ์ฉํ๋ ๊ฒ๋ ๊ด์ฐฎ์ต๋๋ค. ๋ ์ํผ์ ์ฌ๋ฃ๊ฐ ์์ ๋ํ๋๋๋ก ํ๊ณ , \
๊ทธ ๋ค์์ ๋จ๊ณ๋ณ ์ง์นจ์ ๋ฐ๋ฅด์ธ์."
if selected_cuisine:
prompt += f" The cuisine should be {selected_cuisine}."
if selected_restrictions and len(selected_restrictions) > 0:
prompt += f" The recipe should have the following restrictions: {', '.join(selected_restrictions)}."
print('prompt: ' + prompt)
messages = [{'role': 'user', 'content': prompt}]
# Engage ChatGPT to receive the desired recipe
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0.8,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.6,
)
# Extract the recipe from ChatGPT's response
recipe = response["choices"][0]["message"]["content"]
# Showcase the recipe on a new page
return render_template('recipe.html', recipe=recipe)
if __name__ == '__main__':
app.run(debug=True)
| [
" The cuisine should be PLACEHOLDER.",
", "
] |
2024-01-10 | voynow/turbo-docs | turbo_docs~commands~readme.py | TEMPLATE = """
"You are an expert software developement assistant. Write a README.md for the following repo:
{repo}
- Imagine this repo were trending on GitHub. What sort of information is required in a readme of this caliber?
- Write from the perspective of the user and from a contributer. Curate a best in class user experience by providing ample detail, context, and examples.
- emoji are encouraged. One for each section at minimum.
"""
def readme(repo, template=TEMPLATE):
"""
Chose between GPT-3.5 Turbo and GPT-4, allow for template override, and
generate a README.md file for the current repo.
"""
from turbo_docs.utils import openai_api
readme = "README.md"
prompt = TEMPLATE.format(repo=repo)
response = openai_api.gpt_completion(prompt)
with open(readme, "w", encoding="utf-8") as readme_file:
readme_file.write(response)
| [
"\n\"You are an expert software developement assistant. Write a README.md for the following repo:\n{repo}\n\n- Imagine this repo were trending on GitHub. What sort of information is required in a readme of this caliber?\n- Write from the perspective of the user and from a contributer. Curate a best in class user experience by providing ample detail, context, and examples.\n- emoji are encouraged. One for each section at minimum.\n",
"\n\"You are an expert software developement assistant. Write a README.md for the following repo:\nPLACEHOLDER\n\n- Imagine this repo were trending on GitHub. What sort of information is required in a readme of this caliber?\n- Write from the perspective of the user and from a contributer. Curate a best in class user experience by providing ample detail, context, and examples.\n- emoji are encouraged. One for each section at minimum.\n"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.